ia64/xen-unstable

changeset 9736:91da9a1b7196

Merged.
author emellor@leeni.uk.xensource.com
date Sat Apr 15 19:25:21 2006 +0100 (2006-04-15)
parents 83eb8d81c96f b39365343de0
children 3c1cd09801c0
files xen/include/asm-x86/mach-default/irq_vectors_limits.h
line diff
     1.1 --- a/.hgignore	Sat Apr 15 19:25:09 2006 +0100
     1.2 +++ b/.hgignore	Sat Apr 15 19:25:21 2006 +0100
     1.3 @@ -184,6 +184,7 @@
     1.4  ^tools/xm-test/ramdisk/buildroot
     1.5  ^xen/BLOG$
     1.6  ^xen/TAGS$
     1.7 +^xen/cscope\.*$
     1.8  ^xen/arch/x86/asm-offsets\.s$
     1.9  ^xen/arch/x86/boot/mkelf32$
    1.10  ^xen/arch/x86/xen\.lds$
     2.1 --- a/buildconfigs/Rules.mk	Sat Apr 15 19:25:09 2006 +0100
     2.2 +++ b/buildconfigs/Rules.mk	Sat Apr 15 19:25:21 2006 +0100
     2.3 @@ -99,14 +99,14 @@ endif
     2.4  linux-2.6-xen.patch: ref-linux-$(LINUX_VER)/.valid-ref
     2.5  	rm -rf tmp-$@
     2.6  	cp -al $(<D) tmp-$@
     2.7 -	( cd linux-2.6-xen-sparse && ./mkbuildtree ../tmp-$@ )	
     2.8 +	( cd linux-2.6-xen-sparse && bash ./mkbuildtree ../tmp-$@ )	
     2.9  	diff -Nurp $(patsubst ref%,pristine%,$(<D)) tmp-$@ > $@ || true
    2.10  	rm -rf tmp-$@
    2.11  
    2.12  %-xen.patch: ref-%/.valid-ref
    2.13  	rm -rf tmp-$@
    2.14  	cp -al $(<D) tmp-$@
    2.15 -	( cd $*-xen-sparse && ./mkbuildtree ../tmp-$@ )	
    2.16 +	( cd $*-xen-sparse && bash ./mkbuildtree ../tmp-$@ )	
    2.17  	diff -Nurp $(patsubst ref%,pristine%,$(<D)) tmp-$@ > $@ || true
    2.18  	rm -rf tmp-$@
    2.19  
     3.1 --- a/buildconfigs/linux-defconfig_xen0_x86_64	Sat Apr 15 19:25:09 2006 +0100
     3.2 +++ b/buildconfigs/linux-defconfig_xen0_x86_64	Sat Apr 15 19:25:21 2006 +0100
     3.3 @@ -1,7 +1,7 @@
     3.4  #
     3.5  # Automatically generated make config: don't edit
     3.6 -# Linux kernel version: 2.6.16-rc3-xen0
     3.7 -# Mon Feb 20 11:37:43 2006
     3.8 +# Linux kernel version: 2.6.16-xen0
     3.9 +# Thu Apr 13 14:58:29 2006
    3.10  #
    3.11  CONFIG_X86_64=y
    3.12  CONFIG_64BIT=y
    3.13 @@ -99,6 +99,8 @@ CONFIG_X86_PC=y
    3.14  # CONFIG_MPSC is not set
    3.15  CONFIG_GENERIC_CPU=y
    3.16  CONFIG_X86_64_XEN=y
    3.17 +CONFIG_X86_NO_TSS=y
    3.18 +CONFIG_X86_NO_IDT=y
    3.19  CONFIG_X86_L1_CACHE_BYTES=128
    3.20  CONFIG_X86_L1_CACHE_SHIFT=7
    3.21  CONFIG_X86_GOOD_APIC=y
    3.22 @@ -176,6 +178,19 @@ CONFIG_PCI_DIRECT=y
    3.23  CONFIG_XEN_PCIDEV_FRONTEND=y
    3.24  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
    3.25  # CONFIG_UNORDERED_IO is not set
    3.26 +# CONFIG_PCIEPORTBUS is not set
    3.27 +CONFIG_PCI_LEGACY_PROC=y
    3.28 +# CONFIG_PCI_DEBUG is not set
    3.29 +
    3.30 +#
    3.31 +# PCCARD (PCMCIA/CardBus) support
    3.32 +#
    3.33 +# CONFIG_PCCARD is not set
    3.34 +
    3.35 +#
    3.36 +# PCI Hotplug Support
    3.37 +#
    3.38 +# CONFIG_HOTPLUG_PCI is not set
    3.39  
    3.40  #
    3.41  # Executable file formats / Emulations
    3.42 @@ -1001,11 +1016,7 @@ CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
    3.43  CONFIG_INFINIBAND_SRP=y
    3.44  
    3.45  #
    3.46 -# SN Devices
    3.47 -#
    3.48 -
    3.49 -#
    3.50 -# EDAC - error detection and reporting (RAS)
    3.51 +# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
    3.52  #
    3.53  # CONFIG_EDAC is not set
    3.54  
    3.55 @@ -1239,7 +1250,7 @@ CONFIG_CRYPTO_CRC32C=m
    3.56  # Hardware crypto devices
    3.57  #
    3.58  CONFIG_XEN=y
    3.59 -CONFIG_NO_IDLE_HZ=y
    3.60 +CONFIG_XEN_INTERFACE_VERSION=0x00030101
    3.61  
    3.62  #
    3.63  # XEN
    3.64 @@ -1266,6 +1277,7 @@ CONFIG_XEN_DISABLE_SERIAL=y
    3.65  CONFIG_XEN_SYSFS=y
    3.66  CONFIG_HAVE_ARCH_ALLOC_SKB=y
    3.67  CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
    3.68 +CONFIG_NO_IDLE_HZ=y
    3.69  
    3.70  #
    3.71  # Library routines
     4.1 --- a/buildconfigs/linux-defconfig_xenU_x86_64	Sat Apr 15 19:25:09 2006 +0100
     4.2 +++ b/buildconfigs/linux-defconfig_xenU_x86_64	Sat Apr 15 19:25:21 2006 +0100
     4.3 @@ -1,7 +1,7 @@
     4.4  #
     4.5  # Automatically generated make config: don't edit
     4.6 -# Linux kernel version: 2.6.16-rc3-xen0
     4.7 -# Thu Feb 16 22:56:02 2006
     4.8 +# Linux kernel version: 2.6.16-xenU
     4.9 +# Thu Apr 13 14:59:16 2006
    4.10  #
    4.11  CONFIG_X86_64=y
    4.12  CONFIG_64BIT=y
    4.13 @@ -103,6 +103,8 @@ CONFIG_X86_PC=y
    4.14  CONFIG_MPSC=y
    4.15  # CONFIG_GENERIC_CPU is not set
    4.16  CONFIG_X86_64_XEN=y
    4.17 +CONFIG_X86_NO_TSS=y
    4.18 +CONFIG_X86_NO_IDT=y
    4.19  CONFIG_X86_L1_CACHE_BYTES=128
    4.20  CONFIG_X86_L1_CACHE_SHIFT=7
    4.21  CONFIG_X86_GOOD_APIC=y
    4.22 @@ -147,6 +149,15 @@ CONFIG_GENERIC_PENDING_IRQ=y
    4.23  # CONFIG_UNORDERED_IO is not set
    4.24  
    4.25  #
    4.26 +# PCCARD (PCMCIA/CardBus) support
    4.27 +#
    4.28 +# CONFIG_PCCARD is not set
    4.29 +
    4.30 +#
    4.31 +# PCI Hotplug Support
    4.32 +#
    4.33 +
    4.34 +#
    4.35  # Executable file formats / Emulations
    4.36  #
    4.37  CONFIG_BINFMT_ELF=y
    4.38 @@ -844,11 +855,7 @@ CONFIG_DUMMY_CONSOLE=y
    4.39  #
    4.40  
    4.41  #
    4.42 -# SN Devices
    4.43 -#
    4.44 -
    4.45 -#
    4.46 -# EDAC - error detection and reporting (RAS)
    4.47 +# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
    4.48  #
    4.49  # CONFIG_EDAC is not set
    4.50  
    4.51 @@ -1128,7 +1135,7 @@ CONFIG_CRYPTO_CRC32C=m
    4.52  # Hardware crypto devices
    4.53  #
    4.54  CONFIG_XEN=y
    4.55 -CONFIG_NO_IDLE_HZ=y
    4.56 +CONFIG_XEN_INTERFACE_VERSION=0x00030101
    4.57  
    4.58  #
    4.59  # XEN
    4.60 @@ -1144,6 +1151,7 @@ CONFIG_XEN_DISABLE_SERIAL=y
    4.61  CONFIG_XEN_SYSFS=y
    4.62  CONFIG_HAVE_ARCH_ALLOC_SKB=y
    4.63  CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
    4.64 +CONFIG_NO_IDLE_HZ=y
    4.65  
    4.66  #
    4.67  # Library routines
     5.1 --- a/buildconfigs/linux-defconfig_xen_x86_64	Sat Apr 15 19:25:09 2006 +0100
     5.2 +++ b/buildconfigs/linux-defconfig_xen_x86_64	Sat Apr 15 19:25:21 2006 +0100
     5.3 @@ -1,7 +1,7 @@
     5.4  #
     5.5  # Automatically generated make config: don't edit
     5.6  # Linux kernel version: 2.6.16-xen
     5.7 -# Mon Mar 27 09:43:44 2006
     5.8 +# Thu Apr 13 15:01:04 2006
     5.9  #
    5.10  CONFIG_X86_64=y
    5.11  CONFIG_64BIT=y
    5.12 @@ -186,6 +186,41 @@ CONFIG_PCI_DIRECT=y
    5.13  CONFIG_XEN_PCIDEV_FRONTEND=y
    5.14  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
    5.15  # CONFIG_UNORDERED_IO is not set
    5.16 +# CONFIG_PCIEPORTBUS is not set
    5.17 +# CONFIG_PCI_LEGACY_PROC is not set
    5.18 +# CONFIG_PCI_DEBUG is not set
    5.19 +
    5.20 +#
    5.21 +# PCCARD (PCMCIA/CardBus) support
    5.22 +#
    5.23 +CONFIG_PCCARD=m
    5.24 +# CONFIG_PCMCIA_DEBUG is not set
    5.25 +CONFIG_PCMCIA=m
    5.26 +CONFIG_PCMCIA_LOAD_CIS=y
    5.27 +CONFIG_PCMCIA_IOCTL=y
    5.28 +CONFIG_CARDBUS=y
    5.29 +
    5.30 +#
    5.31 +# PC-card bridges
    5.32 +#
    5.33 +CONFIG_YENTA=m
    5.34 +CONFIG_YENTA_O2=y
    5.35 +CONFIG_YENTA_RICOH=y
    5.36 +CONFIG_YENTA_TI=y
    5.37 +CONFIG_YENTA_ENE_TUNE=y
    5.38 +CONFIG_YENTA_TOSHIBA=y
    5.39 +CONFIG_PD6729=m
    5.40 +CONFIG_I82092=m
    5.41 +CONFIG_PCCARD_NONSTATIC=m
    5.42 +
    5.43 +#
    5.44 +# PCI Hotplug Support
    5.45 +#
    5.46 +CONFIG_HOTPLUG_PCI=m
    5.47 +# CONFIG_HOTPLUG_PCI_FAKE is not set
    5.48 +# CONFIG_HOTPLUG_PCI_ACPI is not set
    5.49 +# CONFIG_HOTPLUG_PCI_CPCI is not set
    5.50 +# CONFIG_HOTPLUG_PCI_SHPC is not set
    5.51  
    5.52  #
    5.53  # Executable file formats / Emulations
    5.54 @@ -625,6 +660,10 @@ CONFIG_BT_HCIUART_BCSP=y
    5.55  CONFIG_BT_HCIBCM203X=m
    5.56  CONFIG_BT_HCIBPA10X=m
    5.57  CONFIG_BT_HCIBFUSB=m
    5.58 +# CONFIG_BT_HCIDTL1 is not set
    5.59 +# CONFIG_BT_HCIBT3C is not set
    5.60 +# CONFIG_BT_HCIBLUECARD is not set
    5.61 +# CONFIG_BT_HCIBTUART is not set
    5.62  CONFIG_BT_HCIVHCI=m
    5.63  CONFIG_IEEE80211=m
    5.64  # CONFIG_IEEE80211_DEBUG is not set
    5.65 @@ -769,6 +808,7 @@ CONFIG_PARPORT=m
    5.66  CONFIG_PARPORT_PC=m
    5.67  # CONFIG_PARPORT_PC_FIFO is not set
    5.68  # CONFIG_PARPORT_PC_SUPERIO is not set
    5.69 +# CONFIG_PARPORT_PC_PCMCIA is not set
    5.70  CONFIG_PARPORT_NOT_PC=y
    5.71  # CONFIG_PARPORT_GSC is not set
    5.72  CONFIG_PARPORT_1284=y
    5.73 @@ -851,6 +891,7 @@ CONFIG_BLK_DEV_IDE=y
    5.74  # CONFIG_BLK_DEV_HD_IDE is not set
    5.75  CONFIG_BLK_DEV_IDEDISK=y
    5.76  CONFIG_IDEDISK_MULTI_MODE=y
    5.77 +# CONFIG_BLK_DEV_IDECS is not set
    5.78  CONFIG_BLK_DEV_IDECD=y
    5.79  # CONFIG_BLK_DEV_IDETAPE is not set
    5.80  CONFIG_BLK_DEV_IDEFLOPPY=y
    5.81 @@ -1012,6 +1053,13 @@ CONFIG_SCSI_DC390T=m
    5.82  # CONFIG_SCSI_DEBUG is not set
    5.83  
    5.84  #
    5.85 +# PCMCIA SCSI adapter support
    5.86 +#
    5.87 +# CONFIG_PCMCIA_FDOMAIN is not set
    5.88 +# CONFIG_PCMCIA_QLOGIC is not set
    5.89 +# CONFIG_PCMCIA_SYM53C500 is not set
    5.90 +
    5.91 +#
    5.92  # Multi-device support (RAID and LVM)
    5.93  #
    5.94  CONFIG_MD=y
    5.95 @@ -1141,6 +1189,7 @@ CONFIG_DE4X5=m
    5.96  CONFIG_WINBOND_840=m
    5.97  CONFIG_DM9102=m
    5.98  CONFIG_ULI526X=m
    5.99 +# CONFIG_PCMCIA_XIRCOM is not set
   5.100  # CONFIG_HP100 is not set
   5.101  CONFIG_NET_PCI=y
   5.102  CONFIG_PCNET32=m
   5.103 @@ -1224,6 +1273,13 @@ CONFIG_NET_RADIO=y
   5.104  # Obsolete Wireless cards support (pre-802.11)
   5.105  #
   5.106  # CONFIG_STRIP is not set
   5.107 +# CONFIG_PCMCIA_WAVELAN is not set
   5.108 +# CONFIG_PCMCIA_NETWAVE is not set
   5.109 +
   5.110 +#
   5.111 +# Wireless 802.11 Frequency Hopping cards support
   5.112 +#
   5.113 +# CONFIG_PCMCIA_RAYCS is not set
   5.114  
   5.115  #
   5.116  # Wireless 802.11b ISA/PCI cards support
   5.117 @@ -1243,6 +1299,15 @@ CONFIG_ATMEL=m
   5.118  CONFIG_PCI_ATMEL=m
   5.119  
   5.120  #
   5.121 +# Wireless 802.11b Pcmcia/Cardbus cards support
   5.122 +#
   5.123 +# CONFIG_PCMCIA_HERMES is not set
   5.124 +# CONFIG_PCMCIA_SPECTRUM is not set
   5.125 +# CONFIG_AIRO_CS is not set
   5.126 +# CONFIG_PCMCIA_ATMEL is not set
   5.127 +# CONFIG_PCMCIA_WL3501 is not set
   5.128 +
   5.129 +#
   5.130  # Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
   5.131  #
   5.132  CONFIG_PRISM54=m
   5.133 @@ -1250,9 +1315,15 @@ CONFIG_HOSTAP=m
   5.134  # CONFIG_HOSTAP_FIRMWARE is not set
   5.135  CONFIG_HOSTAP_PLX=m
   5.136  CONFIG_HOSTAP_PCI=m
   5.137 +# CONFIG_HOSTAP_CS is not set
   5.138  CONFIG_NET_WIRELESS=y
   5.139  
   5.140  #
   5.141 +# PCMCIA network device support
   5.142 +#
   5.143 +# CONFIG_NET_PCMCIA is not set
   5.144 +
   5.145 +#
   5.146  # Wan interfaces
   5.147  #
   5.148  # CONFIG_WAN is not set
   5.149 @@ -1376,6 +1447,10 @@ CONFIG_HISAX_ENTERNOW_PCI=y
   5.150  #
   5.151  # HiSax PCMCIA card service modules
   5.152  #
   5.153 +# CONFIG_HISAX_SEDLBAUER_CS is not set
   5.154 +# CONFIG_HISAX_ELSA_CS is not set
   5.155 +# CONFIG_HISAX_AVM_A1_CS is not set
   5.156 +# CONFIG_HISAX_TELES_CS is not set
   5.157  
   5.158  #
   5.159  # HiSax sub driver modules
   5.160 @@ -1412,6 +1487,7 @@ CONFIG_CAPI_AVM=y
   5.161  CONFIG_ISDN_DRV_AVMB1_B1PCI=m
   5.162  CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
   5.163  CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
   5.164 +# CONFIG_ISDN_DRV_AVMB1_AVM_CS is not set
   5.165  CONFIG_ISDN_DRV_AVMB1_T1PCI=m
   5.166  CONFIG_ISDN_DRV_AVMB1_C4=m
   5.167  
   5.168 @@ -1600,6 +1676,13 @@ CONFIG_DRM_RADEON=m
   5.169  CONFIG_DRM_MGA=m
   5.170  CONFIG_DRM_VIA=m
   5.171  CONFIG_DRM_SAVAGE=m
   5.172 +
   5.173 +#
   5.174 +# PCMCIA character devices
   5.175 +#
   5.176 +# CONFIG_SYNCLINK_CS is not set
   5.177 +# CONFIG_CARDMAN_4000 is not set
   5.178 +# CONFIG_CARDMAN_4040 is not set
   5.179  # CONFIG_MWAVE is not set
   5.180  # CONFIG_RAW_DRIVER is not set
   5.181  # CONFIG_HPET is not set
   5.182 @@ -2101,6 +2184,10 @@ CONFIG_SND_USB_AUDIO=m
   5.183  CONFIG_SND_USB_USX2Y=m
   5.184  
   5.185  #
   5.186 +# PCMCIA devices
   5.187 +#
   5.188 +
   5.189 +#
   5.190  # Open Sound System
   5.191  #
   5.192  # CONFIG_SOUND_PRIME is not set
   5.193 @@ -2134,6 +2221,7 @@ CONFIG_USB_OHCI_HCD=m
   5.194  CONFIG_USB_OHCI_LITTLE_ENDIAN=y
   5.195  CONFIG_USB_UHCI_HCD=m
   5.196  CONFIG_USB_SL811_HCD=m
   5.197 +# CONFIG_USB_SL811_CS is not set
   5.198  
   5.199  #
   5.200  # USB Device Class drivers
   5.201 @@ -2284,6 +2372,7 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y
   5.202  CONFIG_USB_SERIAL_TI=m
   5.203  CONFIG_USB_SERIAL_CYBERJACK=m
   5.204  CONFIG_USB_SERIAL_XIRCOM=m
   5.205 +# CONFIG_USB_SERIAL_OPTION is not set
   5.206  CONFIG_USB_SERIAL_OMNINET=m
   5.207  CONFIG_USB_EZUSB=y
   5.208  
   5.209 @@ -2649,7 +2738,7 @@ CONFIG_CRYPTO_CRC32C=m
   5.210  # Hardware crypto devices
   5.211  #
   5.212  CONFIG_XEN=y
   5.213 -CONFIG_NO_IDLE_HZ=y
   5.214 +CONFIG_XEN_INTERFACE_VERSION=0x00030101
   5.215  
   5.216  #
   5.217  # XEN
   5.218 @@ -2676,6 +2765,7 @@ CONFIG_XEN_DISABLE_SERIAL=y
   5.219  CONFIG_XEN_SYSFS=m
   5.220  CONFIG_HAVE_ARCH_ALLOC_SKB=y
   5.221  CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
   5.222 +CONFIG_NO_IDLE_HZ=y
   5.223  
   5.224  #
   5.225  # Library routines
     6.1 --- a/buildconfigs/mk.linux-2.6-xen	Sat Apr 15 19:25:09 2006 +0100
     6.2 +++ b/buildconfigs/mk.linux-2.6-xen	Sat Apr 15 19:25:21 2006 +0100
     6.3 @@ -22,8 +22,8 @@ build: $(LINUX_DIR)/include/linux/autoco
     6.4  	rm -rf $(LINUX_DIR)
     6.5  	cp -al $(<D) $(LINUX_DIR)
     6.6  	# Apply arch-xen patches
     6.7 -	( cd linux-$(LINUX_SERIES)-xen-sparse ; \
     6.8 -          LINUX_ARCH=$(LINUX_ARCH) ./mkbuildtree ../$(LINUX_DIR) )
     6.9 +	( cd linux-$(LINUX_SERIES)-xen-sparse && \
    6.10 +          LINUX_ARCH=$(LINUX_ARCH) bash ./mkbuildtree ../$(LINUX_DIR) )
    6.11  	# Re-use config from install dir if one exits else use default config
    6.12  	CONFIG_VERSION=$$(sed -ne 's/^EXTRAVERSION = //p' $(LINUX_DIR)/Makefile); \
    6.13  	[ -r $(DESTDIR)/boot/config-$(LINUX_VER)$$CONFIG_VERSION-$(EXTRAVERSION) ] && \
     7.1 --- a/docs/src/user.tex	Sat Apr 15 19:25:09 2006 +0100
     7.2 +++ b/docs/src/user.tex	Sat Apr 15 19:25:21 2006 +0100
     7.3 @@ -1232,8 +1232,15 @@ customized variants for your site's pref
     7.4  \subsection{PCI}
     7.5  \label{ss:pcidd}
     7.6  
     7.7 -Individual PCI devices can be assigned to a given domain to allow that
     7.8 -domain direct access to the PCI hardware. To use this functionality, ensure
     7.9 +Individual PCI devices can be assigned to a given domain (a PCI driver domain)
    7.10 +to allow that domain direct access to the PCI hardware.
    7.11 +
    7.12 +While PCI Driver Domains can increase the stability and security of a system
    7.13 +by addressing a number of security concerns, there are some security issues
    7.14 +that remain that you can read about in Section~\ref{s:ddsecurity}.
    7.15 +
    7.16 +\subsubsection{Compile-Time Setup}
    7.17 +To use this functionality, ensure
    7.18  that the PCI Backend is compiled in to a privileged domain (e.g. domain 0)
    7.19  and that the domains which will be assigned PCI devices have the PCI Frontend
    7.20  compiled in. In XenLinux, the PCI Backend is available under the Xen
    7.21 @@ -1241,21 +1248,73 @@ configuration section while the PCI Fron
    7.22  architecture-specific "Bus Options" section. You may compile both the backend
    7.23  and the frontend into the same kernel; they will not affect each other.
    7.24  
    7.25 +\subsubsection{PCI Backend Configuration - Binding at Boot}
    7.26  The PCI devices you wish to assign to unprivileged domains must be "hidden"
    7.27  from your backend domain (usually domain 0) so that it does not load a driver
    7.28  for them. Use the \path{pciback.hide} kernel parameter which is specified on
    7.29  the kernel command-line and is configurable through GRUB (see
    7.30  Section~\ref{s:configure}). Note that devices are not really hidden from the
    7.31 -backend domain. The PCI Backend ensures that no other device driver loads
    7.32 -for those devices. PCI devices are identified by hexadecimal
    7.33 -slot/funciton numbers (on Linux, use \path{lspci} to determine slot/funciton
    7.34 -numbers of your devices) and can be specified with or without the PCI domain: \\
    7.35 +backend domain. The PCI Backend appears to the Linux kernel as a regular PCI
    7.36 +device driver. The PCI Backend ensures that no other device driver loads
    7.37 +for the devices by binding itself as the device driver for those devices.
    7.38 +PCI devices are identified by hexadecimal slot/funciton numbers (on Linux,
    7.39 +use \path{lspci} to determine slot/funciton numbers of your devices) and
    7.40 +can be specified with or without the PCI domain: \\
    7.41  \centerline{  {\tt ({\em bus}:{\em slot}.{\em func})} example {\tt (02:1d.3)}} \\
    7.42  \centerline{  {\tt ({\em domain}:{\em bus}:{\em slot}.{\em func})} example {\tt (0000:02:1d.3)}} \\
    7.43  
    7.44  An example kernel command-line which hides two PCI devices might be: \\
    7.45  \centerline{ {\tt root=/dev/sda4 ro console=tty0 pciback.hide=(02:01.f)(0000:04:1d.0) } } \\
    7.46  
    7.47 +\subsubsection{PCI Backend Configuration - Late Binding}
    7.48 +PCI devices can also be bound to the PCI Backend after boot through the manual
    7.49 +binding/unbinding facilities provided by the Linux kernel in sysfs (allowing
    7.50 +for a Xen user to give PCI devices to driver domains that were not specified
    7.51 +on the kernel command-line). There are several attributes with the PCI
    7.52 +Backend's sysfs directory (\path{/sys/bus/pci/drivers/pciback}) that can be
    7.53 +used to bind/unbind devices:
    7.54 +
    7.55 +\begin{description}
    7.56 +\item[slots] lists all of the PCI slots that the PCI Backend will try to seize
    7.57 +  (or "hide" from Domain 0). A PCI slot must appear in this list before it can
    7.58 +  be bound to the PCI Backend through the \path{bind} attribute.
    7.59 +\item[new\_slot] write the name of a slot here (in 0000:00:00.0 format) to
    7.60 +  have the PCI Backend seize the device in this slot.
    7.61 +\item[remove\_slot] write the name of a slot here (same format as
    7.62 +  \path{new\_slot}) to have the PCI Backend no longer try to seize devices in
    7.63 +  this slot. Note that this does not unbind the driver from a device it has
    7.64 +  already seized.
    7.65 +\item[bind] write the name of a slot here (in 0000:00:00.0 format) to have
    7.66 +  the Linux kernel attempt to bind the device in that slot to the PCI Backend
    7.67 +  driver.
    7.68 +\item[unbind] write the name of a skit here (same format as \path{bind}) to have
    7.69 +  the Linux kernel unbind the device from the PCI Backend. DO NOT unbind a
    7.70 +  device while it is currently given to a PCI driver domain!
    7.71 +\end{description}
    7.72 +
    7.73 +Some examples:
    7.74 +
    7.75 +Bind a device to the PCI Backend which is not bound to any other driver.
    7.76 +\begin{verbatim}
    7.77 +# # Add a new slot to the PCI Backend's list
    7.78 +# echo -n 0000:01:04.d > /sys/bus/pci/drivers/pciback/new_slot
    7.79 +# # Now that the backend is watching for the slot, bind to it
    7.80 +# echo -n 0000:01:04.d > /sys/bus/pci/drivers/pciback/bind
    7.81 +\end{verbatim}
    7.82 +
    7.83 +Unbind a device from its driver and bind to the PCI Backend.
    7.84 +\begin{verbatim}
    7.85 +# # Unbind a PCI network card from its network driver
    7.86 +# echo -n 0000:05:02.0 > /sys/bus/pci/drivers/3c905/unbind
    7.87 +# # And now bind it to the PCI Backend
    7.88 +# echo -n 0000:05:02.0 > /sys/bus/pci/drivers/pciback/new_slot
    7.89 +# echo -n 0000:05:02.0 > /sys/bus/pci/drivers/pciback/bind
    7.90 +\end{verbatim}
    7.91 +
    7.92 +Note that the "-n" option in the example is important as it causes echo to not
    7.93 +output a new-line.
    7.94 +
    7.95 +\subsubsection{PCI Frontend Configuration}
    7.96  To configure a domU to receive a PCI device:
    7.97  
    7.98  \begin{description}
    7.99 @@ -1282,9 +1341,6 @@ To configure a domU to receive a PCI dev
   7.100  }
   7.101  \end{description}
   7.102  
   7.103 -There are a number of security concerns associated with PCI Driver Domains
   7.104 -that you can read about in Section~\ref{s:ddsecurity}.
   7.105 -
   7.106  %% There are two possible types of privileges: IO privileges and
   7.107  %% administration privileges.
   7.108  
     8.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/fixup.c	Sat Apr 15 19:25:09 2006 +0100
     8.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/fixup.c	Sat Apr 15 19:25:21 2006 +0100
     8.3 @@ -68,6 +68,7 @@ fastcall void do_fixup_4gb_segment(struc
     8.4  	DP("");
     8.5  
     8.6  	for (i = 5; i > 0; i--) {
     8.7 +		touch_softlockup_watchdog();
     8.8  		printk("Pausing... %d", i);
     8.9  		mdelay(1000);
    8.10  		printk("\b\b\b\b\b\b\b\b\b\b\b\b");
     9.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c	Sat Apr 15 19:25:09 2006 +0100
     9.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/swiotlb.c	Sat Apr 15 19:25:21 2006 +0100
     9.3 @@ -206,8 +206,8 @@ swiotlb_init(void)
     9.4  }
     9.5  
     9.6  /*
     9.7 - * We use __copy_to_user to transfer to the host buffer because the buffer
     9.8 - * may be mapped read-only (e.g, in blkback driver) but lower-level
     9.9 + * We use __copy_to_user_inatomic to transfer to the host buffer because the
    9.10 + * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
    9.11   * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
    9.12   * unnecessary copy from the aperture to the host buffer, and a page fault.
    9.13   */
    9.14 @@ -225,7 +225,7 @@ static void
    9.15  			dev  = dma_addr + size - len;
    9.16  			host = kmp + buffer.offset;
    9.17  			if (dir == DMA_FROM_DEVICE) {
    9.18 -				if (__copy_to_user(host, dev, bytes))
    9.19 +				if (__copy_to_user_inatomic(host, dev, bytes))
    9.20  					/* inaccessible */;
    9.21  			} else
    9.22  				memcpy(dev, host, bytes);
    9.23 @@ -238,7 +238,7 @@ static void
    9.24  		char *host = (char *)phys_to_virt(
    9.25  			page_to_pseudophys(buffer.page)) + buffer.offset;
    9.26  		if (dir == DMA_FROM_DEVICE) {
    9.27 -			if (__copy_to_user(host, dma_addr, size))
    9.28 +			if (__copy_to_user_inatomic(host, dma_addr, size))
    9.29  				/* inaccessible */;
    9.30  		} else if (dir == DMA_TO_DEVICE)
    9.31  			memcpy(dma_addr, host, size);
    10.1 --- a/linux-2.6-xen-sparse/arch/x86_64/Kconfig	Sat Apr 15 19:25:09 2006 +0100
    10.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/Kconfig	Sat Apr 15 19:25:21 2006 +0100
    10.3 @@ -568,7 +568,6 @@ config UNORDERED_IO
    10.4  	 from i386. Requires that the driver writer used memory barriers
    10.5  	 properly.
    10.6  
    10.7 -if !X86_64_XEN
    10.8  source "drivers/pci/pcie/Kconfig"
    10.9  
   10.10  source "drivers/pci/Kconfig"
   10.11 @@ -576,7 +575,6 @@ source "drivers/pci/Kconfig"
   10.12  source "drivers/pcmcia/Kconfig"
   10.13  
   10.14  source "drivers/pci/hotplug/Kconfig"
   10.15 -endif
   10.16  
   10.17  endmenu
   10.18  
    11.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Sat Apr 15 19:25:09 2006 +0100
    11.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c	Sat Apr 15 19:25:21 2006 +0100
    11.3 @@ -186,9 +186,8 @@ static void fast_flush_area(pending_req_
    11.4  		handle = pending_handle(req, i);
    11.5  		if (handle == BLKBACK_INVALID_HANDLE)
    11.6  			continue;
    11.7 -		unmap[invcount].host_addr    = vaddr(req, i);
    11.8 -		unmap[invcount].dev_bus_addr = 0;
    11.9 -		unmap[invcount].handle       = handle;
   11.10 +		gnttab_set_unmap_op(&unmap[i], vaddr(req, i), GNTMAP_host_map,
   11.11 +				    handle);
   11.12  		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
   11.13  		invcount++;
   11.14  	}
   11.15 @@ -384,6 +383,8 @@ static void dispatch_rw_block_io(blkif_t
   11.16  	pending_req->nr_pages  = nseg;
   11.17  
   11.18  	for (i = 0; i < nseg; i++) {
   11.19 +		uint32_t flags;
   11.20 +
   11.21  		seg[i].nsec = req->seg[i].last_sect -
   11.22  			req->seg[i].first_sect + 1;
   11.23  
   11.24 @@ -392,12 +393,11 @@ static void dispatch_rw_block_io(blkif_t
   11.25  			goto fail_response;
   11.26  		preq.nr_sects += seg[i].nsec;
   11.27  
   11.28 -		map[i].host_addr = vaddr(pending_req, i);
   11.29 -		map[i].dom = blkif->domid;
   11.30 -		map[i].ref = req->seg[i].gref;
   11.31 -		map[i].flags = GNTMAP_host_map;
   11.32 +		flags = GNTMAP_host_map;
   11.33  		if ( operation == WRITE )
   11.34 -			map[i].flags |= GNTMAP_readonly;
   11.35 +			flags |= GNTMAP_readonly;
   11.36 +		gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
   11.37 +				  req->seg[i].gref, blkif->domid);
   11.38  	}
   11.39  
   11.40  	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
    12.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Sat Apr 15 19:25:09 2006 +0100
    12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Sat Apr 15 19:25:21 2006 +0100
    12.3 @@ -58,10 +58,8 @@ static int map_frontend_page(blkif_t *bl
    12.4  	struct gnttab_map_grant_ref op;
    12.5  	int ret;
    12.6  
    12.7 -	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
    12.8 -	op.flags     = GNTMAP_host_map;
    12.9 -	op.ref       = shared_page;
   12.10 -	op.dom       = blkif->domid;
   12.11 +	gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
   12.12 +			  GNTMAP_host_map, shared_page, blkif->domid);
   12.13  
   12.14  	lock_vm_area(blkif->blk_ring_area);
   12.15  	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
   12.16 @@ -90,9 +88,8 @@ static void unmap_frontend_page(blkif_t 
   12.17  	struct gnttab_unmap_grant_ref op;
   12.18  	int ret;
   12.19  
   12.20 -	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
   12.21 -	op.handle       = blkif->shmem_handle;
   12.22 -	op.dev_bus_addr = 0;
   12.23 +	gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
   12.24 +			    GNTMAP_host_map, blkif->shmem_handle);
   12.25  
   12.26  	lock_vm_area(blkif->blk_ring_area);
   12.27  	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
    13.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Sat Apr 15 19:25:09 2006 +0100
    13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Sat Apr 15 19:25:21 2006 +0100
    13.3 @@ -418,9 +418,9 @@ static void fast_flush_area(int idx, int
    13.4  		if (BLKTAP_INVALID_HANDLE(handle))
    13.5  			continue;
    13.6  
    13.7 -		unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
    13.8 -		unmap[op].dev_bus_addr = 0;
    13.9 -		unmap[op].handle = handle->kernel;
   13.10 +		gnttab_set_unmap_op(&unmap[op],
   13.11 +				    MMAP_VADDR(mmap_vstart, idx, i),
   13.12 +				    GNTMAP_host_map, handle->kernel);
   13.13  		op++;
   13.14  
   13.15  		if (create_lookup_pte_addr(
   13.16 @@ -430,9 +430,10 @@ static void fast_flush_area(int idx, int
   13.17  			DPRINTK("Couldn't get a pte addr!\n");
   13.18  			return;
   13.19  		}
   13.20 -		unmap[op].host_addr    = ptep;
   13.21 -		unmap[op].dev_bus_addr = 0;
   13.22 -		unmap[op].handle       = handle->user;
   13.23 +		gnttab_set_unmap_grnat_ref(&unmap[op], ptep,
   13.24 +					   GNTMAP_host_map |
   13.25 +					   GNTMAP_application_map |
   13.26 +					   GNTMAP_contains_pte, handle->user);
   13.27  		op++;
   13.28              
   13.29  		BLKTAP_INVALIDATE_HANDLE(handle);
   13.30 @@ -703,21 +704,21 @@ static void dispatch_rw_block_io(blkif_t
   13.31  		unsigned long uvaddr;
   13.32  		unsigned long kvaddr;
   13.33  		uint64_t ptep;
   13.34 +		uint32_t flags;
   13.35  
   13.36  		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
   13.37  		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
   13.38  
   13.39 -		/* Map the remote page to kernel. */
   13.40 -		map[op].host_addr = kvaddr;
   13.41 -		map[op].dom   = blkif->domid;
   13.42 -		map[op].ref   = req->seg[i].gref;
   13.43 -		map[op].flags = GNTMAP_host_map;
   13.44 +		flags = GNTMAP_host_map;
   13.45  		/* This needs a bit more thought in terms of interposition: 
   13.46  		 * If we want to be able to modify pages during write using 
   13.47  		 * grant table mappings, the guest will either need to allow 
   13.48  		 * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
   13.49  		if (req->operation == BLKIF_OP_WRITE)
   13.50 -			map[op].flags |= GNTMAP_readonly;
   13.51 +			flags |= GNTMAP_readonly;
   13.52 +		/* Map the remote page to kernel. */
   13.53 +		gnttab_set_map_op(&map[op], kvaddr, flags, req->seg[i].gref,
   13.54 +				  blkif->domid);
   13.55  		op++;
   13.56  
   13.57  		/* Now map it to user. */
   13.58 @@ -728,14 +729,13 @@ static void dispatch_rw_block_io(blkif_t
   13.59  			goto bad_descriptor;
   13.60  		}
   13.61  
   13.62 -		map[op].host_addr = ptep;
   13.63 -		map[op].dom       = blkif->domid;
   13.64 -		map[op].ref       = req->seg[i].gref;
   13.65 -		map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
   13.66 +		flags = GNTMAP_host_map | GNTMAP_application_map
   13.67  			| GNTMAP_contains_pte;
   13.68  		/* Above interposition comment applies here as well. */
   13.69  		if (req->operation == BLKIF_OP_WRITE)
   13.70 -			map[op].flags |= GNTMAP_readonly;
   13.71 +			flags |= GNTMAP_readonly;
   13.72 +		gnttab_set_map_op(&map[op], ptep, flags, req->seg[i].gref,
   13.73 +				  blkif->domid);
   13.74  		op++;
   13.75  	}
   13.76  
    14.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Sat Apr 15 19:25:09 2006 +0100
    14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Sat Apr 15 19:25:21 2006 +0100
    14.3 @@ -33,10 +33,8 @@ static int map_frontend_page(blkif_t *bl
    14.4  	struct gnttab_map_grant_ref op;
    14.5  	int ret;
    14.6  
    14.7 -	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
    14.8 -	op.flags     = GNTMAP_host_map;
    14.9 -	op.ref       = shared_page;
   14.10 -	op.dom       = blkif->domid;
   14.11 +	gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
   14.12 +			  GNTMAP_host_map, shared_page, blkif->domid);
   14.13  
   14.14  	lock_vm_area(blkif->blk_ring_area);
   14.15  	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
   14.16 @@ -59,9 +57,8 @@ static void unmap_frontend_page(blkif_t 
   14.17  	struct gnttab_unmap_grant_ref op;
   14.18  	int ret;
   14.19  
   14.20 -	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
   14.21 -	op.handle       = blkif->shmem_handle;
   14.22 -	op.dev_bus_addr = 0;
   14.23 +	gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
   14.24 +			    GNTMAP_host_map, blkif->shmem_handle);
   14.25  
   14.26  	lock_vm_area(blkif->blk_ring_area);
   14.27  	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
    15.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Sat Apr 15 19:25:09 2006 +0100
    15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Sat Apr 15 19:25:21 2006 +0100
    15.3 @@ -513,6 +513,8 @@ static void ack_dynirq(unsigned int irq)
    15.4  {
    15.5  	int evtchn = evtchn_from_irq(irq);
    15.6  
    15.7 +	move_native_irq(irq);
    15.8 +
    15.9  	if (VALID_EVTCHN(evtchn)) {
   15.10  		mask_evtchn(evtchn);
   15.11  		clear_evtchn(evtchn);
   15.12 @@ -636,6 +638,8 @@ static void ack_pirq(unsigned int irq)
   15.13  {
   15.14  	int evtchn = evtchn_from_irq(irq);
   15.15  
   15.16 +	move_native_irq(irq);
   15.17 +
   15.18  	if (VALID_EVTCHN(evtchn)) {
   15.19  		mask_evtchn(evtchn);
   15.20  		clear_evtchn(evtchn);
    16.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c	Sat Apr 15 19:25:09 2006 +0100
    16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c	Sat Apr 15 19:25:21 2006 +0100
    16.3 @@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(gnttab_end_foreign_tra
    16.4  EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
    16.5  EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
    16.6  EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
    16.7 +EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
    16.8  EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
    16.9  EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
   16.10  EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
   16.11 @@ -325,6 +326,12 @@ gnttab_alloc_grant_references(u16 count,
   16.12  }
   16.13  
   16.14  int
   16.15 +gnttab_empty_grant_references(const grant_ref_t *private_head)
   16.16 +{
   16.17 +	return (*private_head == GNTTAB_LIST_END);
   16.18 +}
   16.19 +
   16.20 +int
   16.21  gnttab_claim_grant_reference(grant_ref_t *private_head)
   16.22  {
   16.23  	grant_ref_t g = *private_head;
    17.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Sat Apr 15 19:25:09 2006 +0100
    17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Sat Apr 15 19:25:21 2006 +0100
    17.3 @@ -150,10 +150,8 @@ static int map_frontend_pages(
    17.4  	struct gnttab_map_grant_ref op;
    17.5  	int ret;
    17.6  
    17.7 -	op.host_addr = (unsigned long)netif->tx_comms_area->addr;
    17.8 -	op.flags     = GNTMAP_host_map;
    17.9 -	op.ref       = tx_ring_ref;
   17.10 -	op.dom       = netif->domid;
   17.11 +	gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
   17.12 +			  GNTMAP_host_map, tx_ring_ref, netif->domid);
   17.13      
   17.14  	lock_vm_area(netif->tx_comms_area);
   17.15  	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
   17.16 @@ -168,10 +166,8 @@ static int map_frontend_pages(
   17.17  	netif->tx_shmem_ref    = tx_ring_ref;
   17.18  	netif->tx_shmem_handle = op.handle;
   17.19  
   17.20 -	op.host_addr = (unsigned long)netif->rx_comms_area->addr;
   17.21 -	op.flags     = GNTMAP_host_map;
   17.22 -	op.ref       = rx_ring_ref;
   17.23 -	op.dom       = netif->domid;
   17.24 +	gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
   17.25 +			  GNTMAP_host_map, rx_ring_ref, netif->domid);
   17.26  
   17.27  	lock_vm_area(netif->rx_comms_area);
   17.28  	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
   17.29 @@ -194,18 +190,16 @@ static void unmap_frontend_pages(netif_t
   17.30  	struct gnttab_unmap_grant_ref op;
   17.31  	int ret;
   17.32  
   17.33 -	op.host_addr    = (unsigned long)netif->tx_comms_area->addr;
   17.34 -	op.handle       = netif->tx_shmem_handle;
   17.35 -	op.dev_bus_addr = 0;
   17.36 +	gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
   17.37 +			    GNTMAP_host_map, netif->tx_shmem_handle);
   17.38  
   17.39  	lock_vm_area(netif->tx_comms_area);
   17.40  	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
   17.41  	unlock_vm_area(netif->tx_comms_area);
   17.42  	BUG_ON(ret);
   17.43  
   17.44 -	op.host_addr    = (unsigned long)netif->rx_comms_area->addr;
   17.45 -	op.handle       = netif->rx_shmem_handle;
   17.46 -	op.dev_bus_addr = 0;
   17.47 +	gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
   17.48 +			    GNTMAP_host_map, netif->rx_shmem_handle);
   17.49  
   17.50  	lock_vm_area(netif->rx_comms_area);
   17.51  	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
    18.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Sat Apr 15 19:25:09 2006 +0100
    18.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Sat Apr 15 19:25:21 2006 +0100
    18.3 @@ -453,9 +453,9 @@ inline static void net_tx_action_dealloc
    18.4  	gop = tx_unmap_ops;
    18.5  	while (dc != dp) {
    18.6  		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
    18.7 -		gop->host_addr    = MMAP_VADDR(pending_idx);
    18.8 -		gop->dev_bus_addr = 0;
    18.9 -		gop->handle       = grant_tx_handle[pending_idx];
   18.10 +		gnttab_set_unmap_op(gop, MMAP_VADDR(pending_idx),
   18.11 +				    GNTMAP_host_map,
   18.12 +				    grant_tx_handle[pending_idx]);
   18.13  		gop++;
   18.14  	}
   18.15  	ret = HYPERVISOR_grant_table_op(
   18.16 @@ -579,10 +579,9 @@ static void net_tx_action(unsigned long 
   18.17  		/* Packets passed to netif_rx() must have some headroom. */
   18.18  		skb_reserve(skb, 16);
   18.19  
   18.20 -		mop->host_addr = MMAP_VADDR(pending_idx);
   18.21 -		mop->dom       = netif->domid;
   18.22 -		mop->ref       = txreq.gref;
   18.23 -		mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
   18.24 +		gnttab_set_map_op(mop, MMAP_VADDR(pending_idx),
   18.25 +				  GNTMAP_host_map | GNTMAP_readonly,
   18.26 +				  txreq.gref, netif->domid);
   18.27  		mop++;
   18.28  
   18.29  		memcpy(&pending_tx_info[pending_idx].req,
    19.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Sat Apr 15 19:25:09 2006 +0100
    19.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Sat Apr 15 19:25:21 2006 +0100
    19.3 @@ -106,7 +106,7 @@ struct netfront_info
    19.4  	/* Receive-ring batched refills. */
    19.5  #define RX_MIN_TARGET 8
    19.6  #define RX_DFL_MIN_TARGET 64
    19.7 -#define RX_MAX_TARGET NET_RX_RING_SIZE
    19.8 +#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
    19.9  	int rx_min_target, rx_max_target, rx_target;
   19.10  	struct sk_buff_head rx_batch;
   19.11  
   19.12 @@ -119,6 +119,7 @@ struct netfront_info
   19.13  	struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
   19.14  	struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
   19.15  
   19.16 +#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
   19.17  	grant_ref_t gref_tx_head;
   19.18  	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
   19.19  	grant_ref_t gref_rx_head;
   19.20 @@ -505,8 +506,9 @@ static void network_tx_buf_gc(struct net
   19.21  	} while (prod != np->tx.sring->rsp_prod);
   19.22  
   19.23   out:
   19.24 -	if (np->tx_full &&
   19.25 -	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
   19.26 +	if ((np->tx_full) &&
   19.27 +	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE) &&
   19.28 +	    !gnttab_empty_grant_references(&np->gref_tx_head)) {
   19.29  		np->tx_full = 0;
   19.30  		if (np->user_state == UST_OPEN)
   19.31  			netif_wake_queue(dev);
   19.32 @@ -705,7 +707,8 @@ static int network_start_xmit(struct sk_
   19.33  
   19.34  	network_tx_buf_gc(dev);
   19.35  
   19.36 -	if (RING_FULL(&np->tx)) {
   19.37 +	if (RING_FULL(&np->tx) ||
   19.38 +	    gnttab_empty_grant_references(&np->gref_tx_head)) {
   19.39  		np->tx_full = 1;
   19.40  		netif_stop_queue(dev);
   19.41  	}
   19.42 @@ -1140,14 +1143,14 @@ static int create_netdev(int handle, str
   19.43  	}
   19.44  
   19.45  	/* A grant for every tx ring slot */
   19.46 -	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
   19.47 +	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
   19.48  					  &np->gref_tx_head) < 0) {
   19.49  		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
   19.50  		err = -ENOMEM;
   19.51  		goto exit;
   19.52  	}
   19.53  	/* A grant for every rx ring slot */
   19.54 -	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
   19.55 +	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
   19.56  					  &np->gref_rx_head) < 0) {
   19.57  		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
   19.58  		gnttab_free_grant_references(np->gref_tx_head);
    20.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Sat Apr 15 19:25:09 2006 +0100
    20.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Sat Apr 15 19:25:21 2006 +0100
    20.3 @@ -13,6 +13,7 @@
    20.4  
    20.5  #include "common.h"
    20.6  #include <xen/balloon.h>
    20.7 +#include <xen/gnttab.h>
    20.8  
    20.9  static kmem_cache_t *tpmif_cachep;
   20.10  int num_frontends = 0;
   20.11 @@ -72,12 +73,10 @@ tpmif_t *tpmif_find(domid_t domid, long 
   20.12  static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
   20.13  {
   20.14  	int ret;
   20.15 -	struct gnttab_map_grant_ref op = {
   20.16 -		.host_addr = (unsigned long)tpmif->tx_area->addr,
   20.17 -		.flags = GNTMAP_host_map,
   20.18 -		.ref = shared_page,
   20.19 -		.dom = tpmif->domid,
   20.20 -	};
   20.21 +	struct gnttab_map_grant_ref op;
   20.22 +
   20.23 +	gnttab_set_map_op(&op, (unsigned long)tpmif->tx_area->addr,
   20.24 +			  GNTMAP_host_map, shared_page, tpmif->domid);
   20.25  
   20.26  	lock_vm_area(tpmif->tx_area);
   20.27  	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
   20.28 @@ -100,9 +99,8 @@ static void unmap_frontend_page(tpmif_t 
   20.29  	struct gnttab_unmap_grant_ref op;
   20.30  	int ret;
   20.31  
   20.32 -	op.host_addr    = (unsigned long)tpmif->tx_area->addr;
   20.33 -	op.handle       = tpmif->shmem_handle;
   20.34 -	op.dev_bus_addr = 0;
   20.35 +	gnttab_set_unmap_op(&op, (unsigned long)tpmif->tx_area->addr,
   20.36 +			    GNTMAP_host_map, tpmif->shmem_handle);
   20.37  
   20.38  	lock_vm_area(tpmif->tx_area);
   20.39  	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
    21.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Sat Apr 15 19:25:09 2006 +0100
    21.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c	Sat Apr 15 19:25:21 2006 +0100
    21.3 @@ -21,6 +21,7 @@
    21.4  #include <asm/uaccess.h>
    21.5  #include <xen/xenbus.h>
    21.6  #include <xen/interface/grant_table.h>
    21.7 +#include <xen/gnttab.h>
    21.8  
    21.9  /* local data structures */
   21.10  struct data_exchange {
   21.11 @@ -278,10 +279,8 @@ int _packet_write(struct packet *pak,
   21.12  			return 0;
   21.13  		}
   21.14  
   21.15 -		map_op.host_addr = MMAP_VADDR(tpmif, i);
   21.16 -		map_op.flags = GNTMAP_host_map;
   21.17 -		map_op.ref = tx->ref;
   21.18 -		map_op.dom = tpmif->domid;
   21.19 +		gnttab_set_map_op(&map_op, MMAP_VADDR(tpmif, i),
   21.20 +				  GNTMAP_host_map, tx->ref, tpmif->domid);
   21.21  
   21.22  		if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
   21.23  						       &map_op, 1))) {
   21.24 @@ -308,9 +307,8 @@ int _packet_write(struct packet *pak,
   21.25  		}
   21.26  		tx->size = tocopy;
   21.27  
   21.28 -		unmap_op.host_addr = MMAP_VADDR(tpmif, i);
   21.29 -		unmap_op.handle = handle;
   21.30 -		unmap_op.dev_bus_addr = 0;
   21.31 +		gnttab_set_unmap_op(&unmap_op, MMAP_VADDR(tpmif, i),
   21.32 +				    GNTMAP_host_map, handle);
   21.33  
   21.34  		if (unlikely
   21.35  		    (HYPERVISOR_grant_table_op
   21.36 @@ -422,10 +420,8 @@ static int packet_read_shmem(struct pack
   21.37  
   21.38  		tx = &tpmif->tx->ring[i].req;
   21.39  
   21.40 -		map_op.host_addr = MMAP_VADDR(tpmif, i);
   21.41 -		map_op.flags = GNTMAP_host_map;
   21.42 -		map_op.ref = tx->ref;
   21.43 -		map_op.dom = tpmif->domid;
   21.44 +		gnttab_set_map_op(&map_op, MMAP_VADDR(tpmif, i),
   21.45 +				  GNTMAP_host_map, tx->ref, tpmif->domid);
   21.46  
   21.47  		if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
   21.48  						       &map_op, 1))) {
   21.49 @@ -461,9 +457,8 @@ static int packet_read_shmem(struct pack
   21.50  			tpmif->domid, buffer[offset], buffer[offset + 1],
   21.51  			buffer[offset + 2], buffer[offset + 3]);
   21.52  
   21.53 -		unmap_op.host_addr = MMAP_VADDR(tpmif, i);
   21.54 -		unmap_op.handle = handle;
   21.55 -		unmap_op.dev_bus_addr = 0;
   21.56 +		gnttab_set_unmap_op(&unmap_op, MMAP_VADDR(tpmif, i),
   21.57 +				    GNTMAP_host_map, handle);
   21.58  
   21.59  		if (unlikely
   21.60  		    (HYPERVISOR_grant_table_op
    22.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Sat Apr 15 19:25:09 2006 +0100
    22.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/xenbus.c	Sat Apr 15 19:25:21 2006 +0100
    22.3 @@ -164,10 +164,10 @@ static void frontend_changed(struct xenb
    22.4  
    22.5  	switch (frontend_state) {
    22.6  	case XenbusStateInitialising:
    22.7 -	case XenbusStateConnected:
    22.8 +	case XenbusStateInitialised:
    22.9  		break;
   22.10  
   22.11 -	case XenbusStateInitialised:
   22.12 +	case XenbusStateConnected:
   22.13  		err = connect_ring(be);
   22.14  		if (err) {
   22.15  			return;
    23.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Sat Apr 15 19:25:09 2006 +0100
    23.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Sat Apr 15 19:25:21 2006 +0100
    23.3 @@ -334,12 +334,6 @@ again:
    23.4  		goto abort_transaction;
    23.5  	}
    23.6  
    23.7 -	err = xenbus_printf(xbt, dev->nodename,
    23.8 -	                    "state", "%d", XenbusStateInitialised);
    23.9 -	if (err) {
   23.10 -		goto abort_transaction;
   23.11 -	}
   23.12 -
   23.13  	err = xenbus_transaction_end(xbt, 0);
   23.14  	if (err == -EAGAIN)
   23.15  		goto again;
   23.16 @@ -347,6 +341,9 @@ again:
   23.17  		xenbus_dev_fatal(dev, err, "completing transaction");
   23.18  		goto destroy_tpmring;
   23.19  	}
   23.20 +
   23.21 +	xenbus_switch_state(dev, XenbusStateConnected);
   23.22 +
   23.23  	return 0;
   23.24  
   23.25  abort_transaction:
   23.26 @@ -387,6 +384,7 @@ static void backend_changed(struct xenbu
   23.27  		if (tp->is_suspended == 0) {
   23.28  			device_unregister(&dev->dev);
   23.29  		}
   23.30 +		xenbus_switch_state(dev, XenbusStateClosed);
   23.31  		break;
   23.32  	}
   23.33  }
   23.34 @@ -439,6 +437,7 @@ static int tpmfront_suspend(struct xenbu
   23.35  
   23.36  	/* lock, so no app can send */
   23.37  	mutex_lock(&suspend_lock);
   23.38 +	xenbus_switch_state(dev, XenbusStateClosed);
   23.39  	tp->is_suspended = 1;
   23.40  
   23.41  	for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
    24.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_backend_client.c	Sat Apr 15 19:25:09 2006 +0100
    24.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_backend_client.c	Sat Apr 15 19:25:21 2006 +0100
    24.3 @@ -37,11 +37,7 @@
    24.4  /* Based on Rusty Russell's skeleton driver's map_page */
    24.5  int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
    24.6  {
    24.7 -	struct gnttab_map_grant_ref op = {
    24.8 -		.flags = GNTMAP_host_map,
    24.9 -		.ref   = gnt_ref,
   24.10 -		.dom   = dev->otherend_id,
   24.11 -	};
   24.12 +	struct gnttab_map_grant_ref op;
   24.13  	struct vm_struct *area;
   24.14  
   24.15  	*vaddr = NULL;
   24.16 @@ -50,8 +46,9 @@ int xenbus_map_ring_valloc(struct xenbus
   24.17  	if (!area)
   24.18  		return -ENOMEM;
   24.19  
   24.20 -	op.host_addr = (unsigned long)area->addr;
   24.21 -
   24.22 +	gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
   24.23 +			  gnt_ref, dev->otherend_id);
   24.24 +	
   24.25  	lock_vm_area(area);
   24.26  	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
   24.27  	unlock_vm_area(area);
   24.28 @@ -76,13 +73,10 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc
   24.29  int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
   24.30  		   grant_handle_t *handle, void *vaddr)
   24.31  {
   24.32 -	struct gnttab_map_grant_ref op = {
   24.33 -		.host_addr = (unsigned long)vaddr,
   24.34 -		.flags     = GNTMAP_host_map,
   24.35 -		.ref       = gnt_ref,
   24.36 -		.dom       = dev->otherend_id,
   24.37 -	};
   24.38 -
   24.39 +	struct gnttab_map_grant_ref op;
   24.40 +	
   24.41 +	gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
   24.42 +			  gnt_ref, dev->otherend_id);
   24.43  	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
   24.44  
   24.45  	if (op.status != GNTST_okay) {
   24.46 @@ -101,9 +95,7 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring);
   24.47  int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
   24.48  {
   24.49  	struct vm_struct *area;
   24.50 -	struct gnttab_unmap_grant_ref op = {
   24.51 -		.host_addr = (unsigned long)vaddr,
   24.52 -	};
   24.53 +	struct gnttab_unmap_grant_ref op;
   24.54  
   24.55  	/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
   24.56  	 * method so that we don't have to muck with vmalloc internals here.
   24.57 @@ -124,7 +116,8 @@ int xenbus_unmap_ring_vfree(struct xenbu
   24.58  		return GNTST_bad_virt_addr;
   24.59  	}
   24.60  
   24.61 -	op.handle = (grant_handle_t)area->phys_addr;
   24.62 +	gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
   24.63 +			    (grant_handle_t)area->phys_addr);
   24.64  
   24.65  	lock_vm_area(area);
   24.66  	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   24.67 @@ -145,11 +138,10 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfre
   24.68  int xenbus_unmap_ring(struct xenbus_device *dev,
   24.69  		     grant_handle_t handle, void *vaddr)
   24.70  {
   24.71 -	struct gnttab_unmap_grant_ref op = {
   24.72 -		.host_addr = (unsigned long)vaddr,
   24.73 -		.handle    = handle,
   24.74 -	};
   24.75 +	struct gnttab_unmap_grant_ref op;
   24.76  
   24.77 +	gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
   24.78 +			    handle);
   24.79  	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   24.80  
   24.81  	if (op.status != GNTST_okay)
    25.1 --- a/linux-2.6-xen-sparse/include/xen/gnttab.h	Sat Apr 15 19:25:09 2006 +0100
    25.2 +++ b/linux-2.6-xen-sparse/include/xen/gnttab.h	Sat Apr 15 19:25:21 2006 +0100
    25.3 @@ -40,6 +40,7 @@
    25.4  #include <linux/config.h>
    25.5  #include <asm/hypervisor.h>
    25.6  #include <xen/interface/grant_table.h>
    25.7 +#include <xen/features.h>
    25.8  
    25.9  /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
   25.10  #ifdef __ia64__
   25.11 @@ -90,6 +91,8 @@ void gnttab_free_grant_reference(grant_r
   25.12  
   25.13  void gnttab_free_grant_references(grant_ref_t head);
   25.14  
   25.15 +int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
   25.16 +
   25.17  int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
   25.18  
   25.19  void gnttab_release_grant_reference(grant_ref_t *private_head,
   25.20 @@ -113,6 +116,37 @@ void gnttab_grant_foreign_transfer_ref(g
   25.21  int gnttab_suspend(void);
   25.22  int gnttab_resume(void);
   25.23  
   25.24 +static inline void
   25.25 +gnttab_set_map_op(struct gnttab_map_grant_ref *map, unsigned long addr,
   25.26 +		  uint32_t flags, grant_ref_t ref, domid_t domid)
   25.27 +{
   25.28 +	if (flags & GNTMAP_contains_pte)
   25.29 +		map->host_addr = addr;
   25.30 +	else if (xen_feature(XENFEAT_auto_translated_physmap))
   25.31 +		map->host_addr = __pa(addr);
   25.32 +	else
   25.33 +		map->host_addr = addr;
   25.34 +
   25.35 +	map->flags = flags;
   25.36 +	map->ref = ref;
   25.37 +	map->dom = domid;
   25.38 +}
   25.39 +
   25.40 +static inline void
   25.41 +gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, unsigned long addr,
   25.42 +		    uint32_t flags, grant_handle_t handle)
   25.43 +{
   25.44 +	if (flags & GNTMAP_contains_pte)
   25.45 +		unmap->host_addr = addr;
   25.46 +	else if (xen_feature(XENFEAT_auto_translated_physmap))
   25.47 +		unmap->host_addr = __pa(addr);
   25.48 +	else
   25.49 +		unmap->host_addr = addr;
   25.50 +
   25.51 +	unmap->handle = handle;
   25.52 +	unmap->dev_bus_addr = 0;
   25.53 +}
   25.54 +
   25.55  #endif /* __ASM_GNTTAB_H__ */
   25.56  
   25.57  /*
    26.1 --- a/linux-2.6-xen-sparse/mkbuildtree	Sat Apr 15 19:25:09 2006 +0100
    26.2 +++ b/linux-2.6-xen-sparse/mkbuildtree	Sat Apr 15 19:25:21 2006 +0100
    26.3 @@ -90,8 +90,8 @@ abs_to_rel ${AD} ${AS}
    26.4  RS=$DESTPATH
    26.5  
    26.6  # Arch-specific pre-processing
    26.7 -if [ -x arch/${LINUX_ARCH}/xen-mkbuildtree-pre ]; then
    26.8 -	arch/${LINUX_ARCH}/xen-mkbuildtree-pre
    26.9 +if [ -e arch/${LINUX_ARCH}/xen-mkbuildtree-pre ]; then
   26.10 +	bash arch/${LINUX_ARCH}/xen-mkbuildtree-pre
   26.11  fi
   26.12  
   26.13  # Remove old copies of files and directories at the destination
   26.14 @@ -115,6 +115,6 @@ relative_lndir ../../../${RS}/../xen/inc
   26.15  
   26.16  # Arch-specific post-processing
   26.17  cd ${AD}
   26.18 -if [ -x arch/${LINUX_ARCH}/xen-mkbuildtree-post ]; then
   26.19 -	arch/${LINUX_ARCH}/xen-mkbuildtree-post
   26.20 +if [ -e arch/${LINUX_ARCH}/xen-mkbuildtree-post ]; then
   26.21 +	bash arch/${LINUX_ARCH}/xen-mkbuildtree-post
   26.22  fi
    27.1 --- a/linux-2.6-xen-sparse/net/core/dev.c	Sat Apr 15 19:25:09 2006 +0100
    27.2 +++ b/linux-2.6-xen-sparse/net/core/dev.c	Sat Apr 15 19:25:21 2006 +0100
    27.3 @@ -1294,6 +1294,7 @@ int dev_queue_xmit(struct sk_buff *skb)
    27.4  		if ((skb->h.raw + skb->csum + 2) > skb->tail)
    27.5  			goto out_kfree_skb;
    27.6  		skb->ip_summed = CHECKSUM_HW;
    27.7 +		skb->proto_csum_blank = 0;
    27.8  	}
    27.9  #endif
   27.10  
    28.1 --- a/tools/debugger/gdb/gdbbuild	Sat Apr 15 19:25:09 2006 +0100
    28.2 +++ b/tools/debugger/gdb/gdbbuild	Sat Apr 15 19:25:21 2006 +0100
    28.3 @@ -7,7 +7,7 @@ rm -rf gdb-6.2.1 gdb-6.2.1-linux-i386-xe
    28.4  tar xjf gdb-6.2.1.tar.bz2
    28.5  
    28.6  cd gdb-6.2.1-xen-sparse
    28.7 -./mkbuildtree ../gdb-6.2.1
    28.8 +bash ./mkbuildtree ../gdb-6.2.1
    28.9  
   28.10  cd ..
   28.11  mkdir gdb-6.2.1-linux-i386-xen
    29.1 --- a/tools/examples/xend-config.sxp	Sat Apr 15 19:25:09 2006 +0100
    29.2 +++ b/tools/examples/xend-config.sxp	Sat Apr 15 19:25:21 2006 +0100
    29.3 @@ -127,3 +127,6 @@
    29.4  
    29.5  # Whether to enable core-dumps when domains crash.
    29.6  #(enable-dump no)
    29.7 +
    29.8 +# The tool used for initiating virtual TPM migration
    29.9 +#(external-migration-tool '')
    30.1 --- a/tools/firmware/hvmloader/Makefile	Sat Apr 15 19:25:09 2006 +0100
    30.2 +++ b/tools/firmware/hvmloader/Makefile	Sat Apr 15 19:25:21 2006 +0100
    30.3 @@ -21,7 +21,7 @@
    30.4  # External CFLAGS can do more harm than good.
    30.5  CFLAGS :=
    30.6  
    30.7 -XEN_TARGET_ARCH = x86_32
    30.8 +override XEN_TARGET_ARCH = x86_32
    30.9  XEN_ROOT = ../../..
   30.10  include $(XEN_ROOT)/Config.mk
   30.11  
    31.1 --- a/tools/firmware/vmxassist/Makefile	Sat Apr 15 19:25:09 2006 +0100
    31.2 +++ b/tools/firmware/vmxassist/Makefile	Sat Apr 15 19:25:21 2006 +0100
    31.3 @@ -21,7 +21,7 @@
    31.4  # External CFLAGS can do more harm than good.
    31.5  CFLAGS :=
    31.6  
    31.7 -XEN_TARGET_ARCH = x86_32
    31.8 +override XEN_TARGET_ARCH = x86_32
    31.9  XEN_ROOT = ../../..
   31.10  include $(XEN_ROOT)/Config.mk
   31.11  
    32.1 --- a/tools/ioemu/vl.c	Sat Apr 15 19:25:09 2006 +0100
    32.2 +++ b/tools/ioemu/vl.c	Sat Apr 15 19:25:21 2006 +0100
    32.3 @@ -138,7 +138,7 @@ int adlib_enabled = 1;
    32.4  int gus_enabled = 1;
    32.5  int pci_enabled = 1;
    32.6  int prep_enabled = 0;
    32.7 -int rtc_utc = 0;
    32.8 +int rtc_utc = 1;
    32.9  int cirrus_vga_enabled = 1;
   32.10  int vga_accelerate = 1;
   32.11  int graphic_width = 800;
    33.1 --- a/tools/libxc/xc_bvtsched.c	Sat Apr 15 19:25:09 2006 +0100
    33.2 +++ b/tools/libxc/xc_bvtsched.c	Sat Apr 15 19:25:21 2006 +0100
    33.3 @@ -1,8 +1,8 @@
    33.4  /******************************************************************************
    33.5   * xc_bvtsched.c
    33.6 - * 
    33.7 + *
    33.8   * API for manipulating parameters of the Borrowed Virtual Time scheduler.
    33.9 - * 
   33.10 + *
   33.11   * Copyright (c) 2003, K A Fraser.
   33.12   */
   33.13  
   33.14 @@ -26,7 +26,7 @@ int xc_bvtsched_global_get(int xc_handle
   33.15  {
   33.16      DECLARE_DOM0_OP;
   33.17      int ret;
   33.18 -    
   33.19 +
   33.20      op.cmd = DOM0_SCHEDCTL;
   33.21      op.u.schedctl.sched_id = SCHED_BVT;
   33.22      op.u.schedctl.direction = SCHED_INFO_GET;
   33.23 @@ -71,7 +71,7 @@ int xc_bvtsched_domain_get(int xc_handle
   33.24                             long long *warpl,
   33.25                             long long *warpu)
   33.26  {
   33.27 -    
   33.28 +
   33.29      DECLARE_DOM0_OP;
   33.30      int ret;
   33.31      struct bvt_adjdom *adjptr = &op.u.adjustdom.u.bvt;
    34.1 --- a/tools/libxc/xc_core.c	Sat Apr 15 19:25:09 2006 +0100
    34.2 +++ b/tools/libxc/xc_core.c	Sat Apr 15 19:25:21 2006 +0100
    34.3 @@ -23,7 +23,7 @@ copy_from_domain_page(int xc_handle,
    34.4      return 0;
    34.5  }
    34.6  
    34.7 -int 
    34.8 +int
    34.9  xc_domain_dumpcore_via_callback(int xc_handle,
   34.10                                  uint32_t domid,
   34.11                                  void *args,
   34.12 @@ -45,13 +45,13 @@ xc_domain_dumpcore_via_callback(int xc_h
   34.13          PERROR("Could not allocate dump_mem");
   34.14          goto error_out;
   34.15      }
   34.16 - 
   34.17 +
   34.18      if ( xc_domain_getinfo(xc_handle, domid, 1, &info) != 1 )
   34.19      {
   34.20          PERROR("Could not get info for domain");
   34.21          goto error_out;
   34.22      }
   34.23 - 
   34.24 +
   34.25      if ( domid != info.domid )
   34.26      {
   34.27          PERROR("Domain %d does not exist", domid);
   34.28 @@ -61,10 +61,10 @@ xc_domain_dumpcore_via_callback(int xc_h
   34.29      for ( i = 0; i <= info.max_vcpu_id; i++ )
   34.30          if ( xc_vcpu_getcontext(xc_handle, domid, i, &ctxt[nr_vcpus]) == 0)
   34.31              nr_vcpus++;
   34.32 - 
   34.33 +
   34.34      nr_pages = info.nr_pages;
   34.35  
   34.36 -    header.xch_magic = XC_CORE_MAGIC; 
   34.37 +    header.xch_magic = XC_CORE_MAGIC;
   34.38      header.xch_nr_vcpus = nr_vcpus;
   34.39      header.xch_nr_pages = nr_pages;
   34.40      header.xch_ctxt_offset = sizeof(struct xc_core_header);
   34.41 @@ -74,7 +74,7 @@ xc_domain_dumpcore_via_callback(int xc_h
   34.42                   (sizeof(vcpu_guest_context_t) * nr_vcpus) +
   34.43                   (nr_pages * sizeof(unsigned long)));
   34.44      header.xch_pages_offset = round_pgup(dummy_len);
   34.45 -    
   34.46 +
   34.47      sts = dump_rtn(args, (char *)&header, sizeof(struct xc_core_header));
   34.48      if ( sts != 0 )
   34.49          goto error_out;
   34.50 @@ -150,7 +150,7 @@ static int local_file_dump(void *args, c
   34.51      return 0;
   34.52  }
   34.53  
   34.54 -int 
   34.55 +int
   34.56  xc_domain_dumpcore(int xc_handle,
   34.57                     uint32_t domid,
   34.58                     const char *corename)
   34.59 @@ -163,7 +163,7 @@ xc_domain_dumpcore(int xc_handle,
   34.60          PERROR("Could not open corefile %s: %s", corename, strerror(errno));
   34.61          return -errno;
   34.62      }
   34.63 - 
   34.64 +
   34.65      sts = xc_domain_dumpcore_via_callback(
   34.66          xc_handle, domid, &da, &local_file_dump);
   34.67  
    35.1 --- a/tools/libxc/xc_domain.c	Sat Apr 15 19:25:09 2006 +0100
    35.2 +++ b/tools/libxc/xc_domain.c	Sat Apr 15 19:25:21 2006 +0100
    35.3 @@ -1,8 +1,8 @@
    35.4  /******************************************************************************
    35.5   * xc_domain.c
    35.6 - * 
    35.7 + *
    35.8   * API for manipulating and obtaining information on domains.
    35.9 - * 
   35.10 + *
   35.11   * Copyright (c) 2003, K A Fraser.
   35.12   */
   35.13  
   35.14 @@ -26,17 +26,17 @@ int xc_domain_create(int xc_handle,
   35.15  
   35.16      *pdomid = (uint16_t)op.u.createdomain.domain;
   35.17      return 0;
   35.18 -}    
   35.19 +}
   35.20  
   35.21  
   35.22 -int xc_domain_pause(int xc_handle, 
   35.23 +int xc_domain_pause(int xc_handle,
   35.24                      uint32_t domid)
   35.25  {
   35.26      DECLARE_DOM0_OP;
   35.27      op.cmd = DOM0_PAUSEDOMAIN;
   35.28      op.u.pausedomain.domain = (domid_t)domid;
   35.29      return do_dom0_op(xc_handle, &op);
   35.30 -}    
   35.31 +}
   35.32  
   35.33  
   35.34  int xc_domain_unpause(int xc_handle,
   35.35 @@ -46,7 +46,7 @@ int xc_domain_unpause(int xc_handle,
   35.36      op.cmd = DOM0_UNPAUSEDOMAIN;
   35.37      op.u.unpausedomain.domain = (domid_t)domid;
   35.38      return do_dom0_op(xc_handle, &op);
   35.39 -}    
   35.40 +}
   35.41  
   35.42  
   35.43  int xc_domain_destroy(int xc_handle,
   35.44 @@ -88,7 +88,7 @@ int xc_domain_shutdown(int xc_handle,
   35.45  
   35.46  
   35.47  int xc_vcpu_setaffinity(int xc_handle,
   35.48 -                        uint32_t domid, 
   35.49 +                        uint32_t domid,
   35.50                          int vcpu,
   35.51                          cpumap_t cpumap)
   35.52  {
   35.53 @@ -109,7 +109,7 @@ int xc_domain_getinfo(int xc_handle,
   35.54      unsigned int nr_doms;
   35.55      uint32_t next_domid = first_domid;
   35.56      DECLARE_DOM0_OP;
   35.57 -    int rc = 0; 
   35.58 +    int rc = 0;
   35.59  
   35.60      memset(info, 0, max_doms*sizeof(xc_dominfo_t));
   35.61  
   35.62 @@ -127,8 +127,8 @@ int xc_domain_getinfo(int xc_handle,
   35.63          info->blocked  = !!(op.u.getdomaininfo.flags & DOMFLAGS_BLOCKED);
   35.64          info->running  = !!(op.u.getdomaininfo.flags & DOMFLAGS_RUNNING);
   35.65  
   35.66 -        info->shutdown_reason = 
   35.67 -            (op.u.getdomaininfo.flags>>DOMFLAGS_SHUTDOWNSHIFT) & 
   35.68 +        info->shutdown_reason =
   35.69 +            (op.u.getdomaininfo.flags>>DOMFLAGS_SHUTDOWNSHIFT) &
   35.70              DOMFLAGS_SHUTDOWNMASK;
   35.71  
   35.72          if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_crash) )
   35.73 @@ -152,7 +152,7 @@ int xc_domain_getinfo(int xc_handle,
   35.74          info++;
   35.75      }
   35.76  
   35.77 -    if( !nr_doms ) return rc; 
   35.78 +    if( !nr_doms ) return rc;
   35.79  
   35.80      return nr_doms;
   35.81  }
   35.82 @@ -167,7 +167,7 @@ int xc_domain_getinfolist(int xc_handle,
   35.83  
   35.84      if ( mlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
   35.85          return -1;
   35.86 -    
   35.87 +
   35.88      op.cmd = DOM0_GETDOMAININFOLIST;
   35.89      op.u.getdomaininfolist.first_domain = first_domain;
   35.90      op.u.getdomaininfolist.max_domains  = max_domains;
   35.91 @@ -177,10 +177,10 @@ int xc_domain_getinfolist(int xc_handle,
   35.92          ret = -1;
   35.93      else
   35.94          ret = op.u.getdomaininfolist.num_domains;
   35.95 -    
   35.96 +
   35.97      if ( munlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
   35.98          ret = -1;
   35.99 -    
  35.100 +
  35.101      return ret;
  35.102  }
  35.103  
  35.104 @@ -209,7 +209,7 @@ int xc_vcpu_getcontext(int xc_handle,
  35.105  
  35.106  
  35.107  int xc_shadow_control(int xc_handle,
  35.108 -                      uint32_t domid, 
  35.109 +                      uint32_t domid,
  35.110                        unsigned int sop,
  35.111                        unsigned long *dirty_bitmap,
  35.112                        unsigned long pages,
  35.113 @@ -238,11 +238,11 @@ int xc_domain_setcpuweight(int xc_handle
  35.114  {
  35.115      int sched_id;
  35.116      int ret;
  35.117 -    
  35.118 +
  35.119      /* Figure out which scheduler is currently used: */
  35.120      if ( (ret = xc_sched_id(xc_handle, &sched_id)) != 0 )
  35.121          return ret;
  35.122 -    
  35.123 +
  35.124      switch ( sched_id )
  35.125      {
  35.126          case SCHED_BVT:
  35.127 @@ -253,20 +253,20 @@ int xc_domain_setcpuweight(int xc_handle
  35.128              long long warpl;
  35.129              long long warpu;
  35.130  
  35.131 -            /* Preserve all the scheduling parameters apart 
  35.132 +            /* Preserve all the scheduling parameters apart
  35.133                 of MCU advance. */
  35.134              if ( (ret = xc_bvtsched_domain_get(
  35.135 -                xc_handle, domid, &mcuadv, 
  35.136 +                xc_handle, domid, &mcuadv,
  35.137                  &warpback, &warpvalue, &warpl, &warpu)) != 0 )
  35.138                  return ret;
  35.139 -            
  35.140 +
  35.141              /* The MCU advance is inverse of the weight.
  35.142                 Default value of the weight is 1, default mcuadv 10.
  35.143                 The scaling factor is therefore 10. */
  35.144              if ( weight > 0 )
  35.145                  mcuadv = 10 / weight;
  35.146 -            
  35.147 -            ret = xc_bvtsched_domain_set(xc_handle, domid, mcuadv, 
  35.148 +
  35.149 +            ret = xc_bvtsched_domain_set(xc_handle, domid, mcuadv,
  35.150                                           warpback, warpvalue, warpl, warpu);
  35.151              break;
  35.152          }
  35.153 @@ -276,7 +276,7 @@ int xc_domain_setcpuweight(int xc_handle
  35.154  }
  35.155  
  35.156  int xc_domain_setmaxmem(int xc_handle,
  35.157 -                        uint32_t domid, 
  35.158 +                        uint32_t domid,
  35.159                          unsigned int max_memkb)
  35.160  {
  35.161      DECLARE_DOM0_OP;
  35.162 @@ -287,7 +287,7 @@ int xc_domain_setmaxmem(int xc_handle,
  35.163  }
  35.164  
  35.165  int xc_domain_memory_increase_reservation(int xc_handle,
  35.166 -                                          uint32_t domid, 
  35.167 +                                          uint32_t domid,
  35.168                                            unsigned long nr_extents,
  35.169                                            unsigned int extent_order,
  35.170                                            unsigned int address_bits,
  35.171 @@ -297,7 +297,7 @@ int xc_domain_memory_increase_reservatio
  35.172      struct xen_memory_reservation reservation = {
  35.173          .extent_start = extent_start, /* may be NULL */
  35.174          .nr_extents   = nr_extents,
  35.175 -        .extent_order = extent_order,  
  35.176 +        .extent_order = extent_order,
  35.177          .address_bits = address_bits,
  35.178          .domid        = domid
  35.179      };
  35.180 @@ -319,16 +319,16 @@ int xc_domain_memory_increase_reservatio
  35.181  }
  35.182  
  35.183  int xc_domain_memory_decrease_reservation(int xc_handle,
  35.184 -                                          uint32_t domid, 
  35.185 +                                          uint32_t domid,
  35.186                                            unsigned long nr_extents,
  35.187                                            unsigned int extent_order,
  35.188                                            unsigned long *extent_start)
  35.189  {
  35.190      int err;
  35.191      struct xen_memory_reservation reservation = {
  35.192 -        .extent_start = extent_start, 
  35.193 +        .extent_start = extent_start,
  35.194          .nr_extents   = nr_extents,
  35.195 -        .extent_order = extent_order,  
  35.196 +        .extent_order = extent_order,
  35.197          .address_bits = 0,
  35.198          .domid        = domid
  35.199      };
  35.200 @@ -411,7 +411,7 @@ int xc_domain_max_vcpus(int xc_handle, u
  35.201      return do_dom0_op(xc_handle, &op);
  35.202  }
  35.203  
  35.204 -int xc_domain_sethandle(int xc_handle, uint32_t domid, 
  35.205 +int xc_domain_sethandle(int xc_handle, uint32_t domid,
  35.206                          xen_domain_handle_t handle)
  35.207  {
  35.208      DECLARE_DOM0_OP;
  35.209 @@ -506,7 +506,7 @@ int xc_domain_iomem_permission(int xc_ha
  35.210      op.cmd = DOM0_IOMEM_PERMISSION;
  35.211      op.u.iomem_permission.domain = domid;
  35.212      op.u.iomem_permission.first_mfn = first_mfn;
  35.213 -	op.u.iomem_permission.nr_mfns = nr_mfns;
  35.214 +    op.u.iomem_permission.nr_mfns = nr_mfns;
  35.215      op.u.iomem_permission.allow_access = allow_access;
  35.216  
  35.217      return do_dom0_op(xc_handle, &op);
    36.1 --- a/tools/libxc/xc_elf.h	Sat Apr 15 19:25:09 2006 +0100
    36.2 +++ b/tools/libxc/xc_elf.h	Sat Apr 15 19:25:21 2006 +0100
    36.3 @@ -46,7 +46,7 @@ typedef uint32_t	Elf64_Half;
    36.4  typedef uint16_t	Elf64_Quarter;
    36.5  
    36.6  /*
    36.7 - * e_ident[] identification indexes 
    36.8 + * e_ident[] identification indexes
    36.9   * See http://www.caldera.com/developers/gabi/2000-07-17/ch4.eheader.html
   36.10   */
   36.11  #define EI_MAG0		0		/* file ID */
   36.12 @@ -57,7 +57,7 @@ typedef uint16_t	Elf64_Quarter;
   36.13  #define EI_DATA		5		/* data encoding */
   36.14  #define EI_VERSION	6		/* ELF header version */
   36.15  #define EI_OSABI	7		/* OS/ABI ID */
   36.16 -#define EI_ABIVERSION	8		/* ABI version */ 
   36.17 +#define EI_ABIVERSION	8		/* ABI version */
   36.18  #define EI_PAD		9		/* start of pad bytes */
   36.19  #define EI_NIDENT	16		/* Size of e_ident[] */
   36.20  
   36.21 @@ -119,7 +119,7 @@ typedef struct elfhdr {
   36.22  	Elf32_Half	e_phnum;	/* number of program header entries */
   36.23  	Elf32_Half	e_shentsize;	/* section header entry size */
   36.24  	Elf32_Half	e_shnum;	/* number of section header entries */
   36.25 -	Elf32_Half	e_shstrndx;	/* section header table's "section 
   36.26 +	Elf32_Half	e_shstrndx;	/* section header table's "section
   36.27  					   header string table" entry offset */
   36.28  } Elf32_Ehdr;
   36.29  
   36.30 @@ -160,7 +160,7 @@ typedef struct {
   36.31  #define EM_486		6		/* Intel 80486 - unused? */
   36.32  #define EM_860		7		/* Intel 80860 */
   36.33  #define EM_MIPS		8		/* MIPS R3000 Big-Endian only */
   36.34 -/* 
   36.35 +/*
   36.36   * Don't know if EM_MIPS_RS4_BE,
   36.37   * EM_SPARC64, EM_PARISC,
   36.38   * or EM_PPC are ABI compliant
   36.39 @@ -441,7 +441,7 @@ typedef struct {
   36.40  #define DT_NUM		25		/* Number used. */
   36.41  #define DT_LOPROC	0x70000000	/* reserved range for processor */
   36.42  #define DT_HIPROC	0x7fffffff	/*  specific dynamic array tags */
   36.43 -	
   36.44 +
   36.45  /* Standard ELF hashing function */
   36.46  unsigned int elf_hash(const unsigned char *name);
   36.47  
    37.1 --- a/tools/libxc/xc_evtchn.c	Sat Apr 15 19:25:09 2006 +0100
    37.2 +++ b/tools/libxc/xc_evtchn.c	Sat Apr 15 19:25:21 2006 +0100
    37.3 @@ -1,8 +1,8 @@
    37.4  /******************************************************************************
    37.5   * xc_evtchn.c
    37.6 - * 
    37.7 + *
    37.8   * API for manipulating and accessing inter-domain event channels.
    37.9 - * 
   37.10 + *
   37.11   * Copyright (c) 2004, K A Fraser.
   37.12   */
   37.13  
   37.14 @@ -44,7 +44,7 @@ int xc_evtchn_alloc_unbound(int xc_handl
   37.15  
   37.16      if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
   37.17          rc = op.u.alloc_unbound.port;
   37.18 -    
   37.19 +
   37.20      return rc;
   37.21  }
   37.22  
   37.23 @@ -62,6 +62,6 @@ int xc_evtchn_status(int xc_handle,
   37.24  
   37.25      if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
   37.26          memcpy(status, &op.u.status, sizeof(*status));
   37.27 -    
   37.28 +
   37.29      return rc;
   37.30  }
    38.1 --- a/tools/libxc/xc_ia64_stubs.c	Sat Apr 15 19:25:09 2006 +0100
    38.2 +++ b/tools/libxc/xc_ia64_stubs.c	Sat Apr 15 19:25:21 2006 +0100
    38.3 @@ -22,7 +22,7 @@ unsigned long xc_ia64_fpsr_default(void)
    38.4          return FPSR_DEFAULT;
    38.5  }
    38.6  
    38.7 -int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters, 
    38.8 +int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
    38.9                    uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
   38.10                    int (*suspend)(int domid))
   38.11  {
   38.12 @@ -50,8 +50,8 @@ xc_plan9_build(int xc_handle,
   38.13  }
   38.14  
   38.15  int xc_ia64_get_pfn_list(int xc_handle,
   38.16 -                         uint32_t domid, 
   38.17 -                         unsigned long *pfn_buf, 
   38.18 +                         uint32_t domid,
   38.19 +                         unsigned long *pfn_buf,
   38.20                           unsigned int start_page,
   38.21                           unsigned int nr_pages)
   38.22  {
   38.23 @@ -65,16 +65,16 @@ int xc_ia64_get_pfn_list(int xc_handle,
   38.24      op.u.getmemlist.buffer   = pfn_buf;
   38.25  
   38.26      if ( (max_pfns != -1UL)
   38.27 -		&& mlock(pfn_buf, nr_pages * sizeof(unsigned long)) != 0 )
   38.28 +        && mlock(pfn_buf, nr_pages * sizeof(unsigned long)) != 0 )
   38.29      {
   38.30          PERROR("Could not lock pfn list buffer");
   38.31          return -1;
   38.32 -    }    
   38.33 +    }
   38.34  
   38.35      ret = do_dom0_op(xc_handle, &op);
   38.36  
   38.37      if (max_pfns != -1UL)
   38.38 -    	(void)munlock(pfn_buf, nr_pages * sizeof(unsigned long));
   38.39 +        (void)munlock(pfn_buf, nr_pages * sizeof(unsigned long));
   38.40  
   38.41      return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
   38.42  }
   38.43 @@ -84,7 +84,7 @@ long xc_get_max_pages(int xc_handle, uin
   38.44      dom0_op_t op;
   38.45      op.cmd = DOM0_GETDOMAININFO;
   38.46      op.u.getdomaininfo.domain = (domid_t)domid;
   38.47 -    return (do_dom0_op(xc_handle, &op) < 0) ? 
   38.48 +    return (do_dom0_op(xc_handle, &op) < 0) ?
   38.49          -1 : op.u.getdomaininfo.max_pages;
   38.50  }
   38.51  
   38.52 @@ -92,7 +92,7 @@ int xc_ia64_copy_to_domain_pages(int xc_
   38.53          void* src_page, unsigned long dst_pfn, int nr_pages)
   38.54  {
   38.55      // N.B. gva should be page aligned
   38.56 -    
   38.57 +
   38.58      unsigned long *page_array = NULL;
   38.59      int i;
   38.60  
   38.61 @@ -107,13 +107,13 @@ int xc_ia64_copy_to_domain_pages(int xc_
   38.62      }
   38.63  
   38.64      for ( i=0; i< nr_pages; i++ ){
   38.65 -	if (xc_copy_to_domain_page(xc_handle, domid, page_array[i],
   38.66 -			src_page + (i << PAGE_SHIFT)))
   38.67 -	    goto error_out;
   38.68 +        if (xc_copy_to_domain_page(xc_handle, domid, page_array[i],
   38.69 +                    src_page + (i << PAGE_SHIFT)))
   38.70 +            goto error_out;
   38.71      }
   38.72      free(page_array);
   38.73      return 0;
   38.74 -    
   38.75 +
   38.76  error_out:
   38.77      free(page_array);
   38.78      return -1;
   38.79 @@ -123,8 +123,8 @@ error_out:
   38.80  #define HOB_SIGNATURE 0x3436474953424f48 // "HOBSIG64"
   38.81  #define GFW_HOB_START    ((4UL<<30)-(14UL<<20))    //4G -14M
   38.82  #define GFW_HOB_SIZE     (1UL<<20)              //1M
   38.83 -#define MEM_G   (1UL << 30) 
   38.84 -#define MEM_M   (1UL << 20) 
   38.85 +#define MEM_G   (1UL << 30)
   38.86 +#define MEM_M   (1UL << 20)
   38.87  
   38.88  typedef struct {
   38.89      unsigned long signature;
   38.90 @@ -136,7 +136,7 @@ typedef struct {
   38.91   * INFO HOB is the first data data in one HOB list
   38.92   * it contains the control information of the HOB list
   38.93   */
   38.94 -typedef struct { 
   38.95 +typedef struct {
   38.96      HOB_GENERIC_HEADER  header;
   38.97      unsigned long       length;    // current length of hob
   38.98      unsigned long       cur_pos;   // current poisiton of hob
   38.99 @@ -216,7 +216,7 @@ hob_init( void  *buffer ,unsigned long b
  38.100          // buffer too small
  38.101          return -1;
  38.102      }
  38.103 -    
  38.104 +
  38.105      phit = (HOB_INFO*)buffer;
  38.106      phit->header.signature = HOB_SIGNATURE;
  38.107      phit->header.type = HOB_TYPE_INFO;
  38.108 @@ -224,7 +224,7 @@ hob_init( void  *buffer ,unsigned long b
  38.109      phit->length = sizeof(HOB_INFO) + sizeof(HOB_GENERIC_HEADER);
  38.110      phit->cur_pos = 0;
  38.111      phit->buf_size = buf_size;
  38.112 -    
  38.113 +
  38.114      terminal = (HOB_GENERIC_HEADER*) (buffer + sizeof(HOB_INFO));
  38.115      terminal->signature= HOB_SIGNATURE;
  38.116      terminal->type = HOB_TYPE_TERMINAL;
  38.117 @@ -235,7 +235,7 @@ hob_init( void  *buffer ,unsigned long b
  38.118  
  38.119  /*
  38.120   *  Add a new HOB to the HOB List.
  38.121 - *       
  38.122 + *
  38.123   *  hob_start  -  start address of hob buffer
  38.124   *  type       -  type of the hob to be added
  38.125   *  data       -  data of the hob to be added
  38.126 @@ -250,8 +250,8 @@ hob_add(
  38.127  )
  38.128  {
  38.129      HOB_INFO *phit;
  38.130 -    HOB_GENERIC_HEADER     *newhob,*tail;   
  38.131 -    
  38.132 +    HOB_GENERIC_HEADER     *newhob,*tail;
  38.133 +
  38.134      phit = (HOB_INFO*)hob_start;
  38.135  
  38.136      if (phit->length + data_size > phit->buf_size){
  38.137 @@ -259,7 +259,7 @@ hob_add(
  38.138          return -1;
  38.139      }
  38.140  
  38.141 -    //append new HOB 
  38.142 +    //append new HOB
  38.143      newhob = (HOB_GENERIC_HEADER*)
  38.144          (hob_start + phit->length - sizeof(HOB_GENERIC_HEADER));
  38.145      newhob->signature = HOB_SIGNATURE;
  38.146 @@ -267,7 +267,7 @@ hob_add(
  38.147      newhob->length = data_size + sizeof(HOB_GENERIC_HEADER);
  38.148      memcpy((void*)newhob + sizeof(HOB_GENERIC_HEADER), data, data_size);
  38.149  
  38.150 -    // append terminal HOB  
  38.151 +    // append terminal HOB
  38.152      tail = (HOB_GENERIC_HEADER*) ( hob_start + phit->length + data_size);
  38.153      tail->signature = HOB_SIGNATURE;
  38.154      tail->type = HOB_TYPE_TERMINAL;
  38.155 @@ -281,9 +281,9 @@ hob_add(
  38.156  }
  38.157  
  38.158  int get_hob_size(void* hob_buf){
  38.159 -    
  38.160 +
  38.161      HOB_INFO *phit = (HOB_INFO*)hob_buf;
  38.162 -    
  38.163 +
  38.164      if (phit->header.signature != HOB_SIGNATURE){
  38.165          PERROR("xc_get_hob_size:Incorrect signature");
  38.166          return -1;
  38.167 @@ -293,30 +293,30 @@ int get_hob_size(void* hob_buf){
  38.168  
  38.169  int build_hob (void* hob_buf, unsigned long hob_buf_size,
  38.170                    unsigned long dom_mem_size)
  38.171 -{   
  38.172 -    //Init HOB List 
  38.173 +{
  38.174 +    //Init HOB List
  38.175      if (hob_init (hob_buf, hob_buf_size)<0){
  38.176          PERROR("buffer too small");
  38.177          goto err_out;
  38.178      }
  38.179 -    
  38.180 +
  38.181      if ( add_mem_hob( hob_buf,dom_mem_size) < 0){
  38.182          PERROR("Add memory hob failed, buffer too small");
  38.183          goto err_out;
  38.184      }
  38.185 -    
  38.186 +
  38.187      if ( add_pal_hob( hob_buf ) < 0 ){
  38.188          PERROR("Add PAL hob failed, buffer too small");
  38.189          goto err_out;
  38.190      }
  38.191 -    
  38.192 +
  38.193      return 0;
  38.194  
  38.195  err_out:
  38.196 -    return -1;  
  38.197 +    return -1;
  38.198  }
  38.199  
  38.200 -static int 
  38.201 +static int
  38.202  load_hob(int xc_handle, uint32_t dom, void *hob_buf)
  38.203  {
  38.204      // hob_buf should be page aligned
  38.205 @@ -334,22 +334,22 @@ load_hob(int xc_handle, uint32_t dom, vo
  38.206      }
  38.207  
  38.208      nr_pages = (hob_size + PAGE_SIZE -1) >> PAGE_SHIFT;
  38.209 -    
  38.210 +
  38.211      return xc_ia64_copy_to_domain_pages(xc_handle, dom,
  38.212              hob_buf, GFW_HOB_START, nr_pages );
  38.213  }
  38.214  
  38.215  #define MIN(x, y) ((x) < (y)) ? (x) : (y)
  38.216 -static int 
  38.217 +static int
  38.218  add_mem_hob(void* hob_buf, unsigned long dom_mem_size){
  38.219      hob_mem_t memhob;
  38.220  
  38.221      // less than 3G
  38.222      memhob.start = 0;
  38.223      memhob.size = MIN(dom_mem_size, 0xC0000000);
  38.224 -    
  38.225 +
  38.226      if (hob_add(hob_buf, HOB_TYPE_MEM, &memhob, sizeof(memhob)) < 0){
  38.227 -	return -1;
  38.228 +        return -1;
  38.229      }
  38.230  
  38.231      if (dom_mem_size > 0xC0000000) {
  38.232 @@ -373,29 +373,29 @@ unsigned char config_pal_mem_attrib[8] =
  38.233  };
  38.234  unsigned char config_pal_cache_info[152] = {
  38.235      3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.236 -    6, 4, 6, 7, 255, 1, 0, 1, 0, 64, 0, 0, 12, 12, 
  38.237 +    6, 4, 6, 7, 255, 1, 0, 1, 0, 64, 0, 0, 12, 12,
  38.238      49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 6, 7, 0, 1,
  38.239 -    0, 1, 0, 64, 0, 0, 12, 12, 49, 0, 0, 0, 0, 0, 0, 
  38.240 -    0, 0, 0, 6, 8, 7, 7, 255, 7, 0, 11, 0, 0, 16, 0, 
  38.241 -    12, 17, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 7, 
  38.242 +    0, 1, 0, 64, 0, 0, 12, 12, 49, 0, 0, 0, 0, 0, 0,
  38.243 +    0, 0, 0, 6, 8, 7, 7, 255, 7, 0, 11, 0, 0, 16, 0,
  38.244 +    12, 17, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 7,
  38.245      7, 7, 5, 9, 11, 0, 0, 4, 0, 12, 15, 49, 0, 254, 255,
  38.246 -    255, 255, 255, 255, 255, 255, 2, 8, 7, 7, 7, 5, 9, 
  38.247 -    11, 0, 0, 4, 0, 12, 15, 49, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.248 +    255, 255, 255, 255, 255, 255, 2, 8, 7, 7, 7, 5, 9,
  38.249 +    11, 0, 0, 4, 0, 12, 15, 49, 0, 0, 0, 0, 0, 0, 0, 0,
  38.250      0, 3, 12, 7, 7, 7, 14, 1, 3, 0, 0, 192, 0, 12, 20, 49, 0
  38.251  };
  38.252  unsigned char config_pal_cache_prot_info[200] = {
  38.253 -    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.254 +    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.255      45, 0, 16, 8, 0, 76, 12, 64, 0, 0, 0, 0, 0, 0, 0,
  38.256 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.257 -    8, 0, 16, 4, 0, 76, 44, 68, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.258 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 
  38.259 -    0, 16, 8, 0, 81, 44, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.260 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.261 +    8, 0, 16, 4, 0, 76, 44, 68, 0, 0, 0, 0, 0, 0, 0, 0,
  38.262 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32,
  38.263 +    0, 16, 8, 0, 81, 44, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.264      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0,
  38.265 -    112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.266 -    0, 0, 0, 0, 0, 0, 254, 255, 255, 255, 255, 255, 255, 255, 
  38.267 +    112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.268 +    0, 0, 0, 0, 0, 0, 254, 255, 255, 255, 255, 255, 255, 255,
  38.269      32, 0, 112, 12, 0, 79, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.270 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 160, 
  38.271 -    12, 0, 84, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.272 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 160,
  38.273 +    12, 0, 84, 124, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.274      0, 0, 0
  38.275  };
  38.276  unsigned char config_pal_debug_info[16] = {
  38.277 @@ -408,37 +408,37 @@ unsigned char config_pal_freq_base[8] = 
  38.278      109, 219, 182, 13, 0, 0, 0, 0
  38.279  };
  38.280  unsigned char config_pal_freq_ratios[24] = {
  38.281 -    11, 1, 0, 0, 77, 7, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 4, 
  38.282 +    11, 1, 0, 0, 77, 7, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 4,
  38.283      0, 0, 0, 7, 0, 0, 0
  38.284  };
  38.285  unsigned char config_pal_halt_info[64] = {
  38.286 -    0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.287 +    0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0,
  38.288      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.289 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.290 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.291      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  38.292  };
  38.293  unsigned char config_pal_perf_mon_info[136] = {
  38.294 -    12, 47, 18, 8, 0, 0, 0, 0, 241, 255, 0, 0, 255, 7, 0, 0, 
  38.295 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.296 -    0, 0, 0, 0, 0, 0, 0, 0, 241, 255, 0, 0, 223, 0, 255, 255, 
  38.297 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.298 -    0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0, 
  38.299 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.300 -    0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0, 
  38.301 -    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.302 +    12, 47, 18, 8, 0, 0, 0, 0, 241, 255, 0, 0, 255, 7, 0, 0,
  38.303 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.304 +    0, 0, 0, 0, 0, 0, 0, 0, 241, 255, 0, 0, 223, 0, 255, 255,
  38.305 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.306 +    0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
  38.307 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.308 +    0, 0, 0, 0, 0, 0, 0, 0, 240, 255, 0, 0, 0, 0, 0, 0,
  38.309 +    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.310      0, 0, 0, 0, 0, 0, 0, 0
  38.311  };
  38.312  unsigned char config_pal_proc_get_features[104] = {
  38.313 -    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
  38.314 -    0, 0, 0, 0, 64, 6, 64, 49, 0, 0, 0, 0, 64, 6, 0, 0, 
  38.315 -    0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 
  38.316 -    231, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 
  38.317 -    0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0, 
  38.318 +    3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38.319 +    0, 0, 0, 0, 64, 6, 64, 49, 0, 0, 0, 0, 64, 6, 0, 0,
  38.320 +    0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0,
  38.321 +    231, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0,
  38.322 +    0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0,
  38.323      63, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0,
  38.324      0, 0, 0, 0, 0, 0, 0, 0
  38.325  };
  38.326  unsigned char config_pal_ptce_info[24] = {
  38.327 -    0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 
  38.328 +    0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
  38.329      0, 0, 0, 0, 0, 0, 0, 0
  38.330  };
  38.331  unsigned char config_pal_register_info[64] = {
  38.332 @@ -473,7 +473,7 @@ unsigned char config_pal_vm_page_size[16
  38.333  typedef struct{
  38.334      hob_type_t type;
  38.335      void* data;
  38.336 -    unsigned long size;     
  38.337 +    unsigned long size;
  38.338  }hob_batch_t;
  38.339  
  38.340  hob_batch_t hob_batch[]={
  38.341 @@ -552,13 +552,13 @@ hob_batch_t hob_batch[]={
  38.342      {  HOB_TYPE_PAL_VM_PAGE_SIZE,
  38.343          &config_pal_vm_page_size,
  38.344          sizeof(config_pal_vm_page_size)
  38.345 -    },      
  38.346 +    },
  38.347  };
  38.348  
  38.349  static int add_pal_hob(void* hob_buf){
  38.350      int i;
  38.351      for (i=0; i<sizeof(hob_batch)/sizeof(hob_batch_t); i++){
  38.352 -        if (hob_add(hob_buf, hob_batch[i].type, 
  38.353 +        if (hob_add(hob_buf, hob_batch[i].type,
  38.354                      hob_batch[i].data,
  38.355                      hob_batch[i].size)<0)
  38.356              return -1;
  38.357 @@ -579,17 +579,17 @@ static int setup_guest(  int xc_handle,
  38.358  
  38.359      // FIXME: initialize pfn list for a temp hack
  38.360      if (xc_ia64_get_pfn_list(xc_handle, dom, NULL, -1, -1) == -1) {
  38.361 -	PERROR("Could not allocate continuous memory");
  38.362 -	goto error_out;
  38.363 +        PERROR("Could not allocate continuous memory");
  38.364 +        goto error_out;
  38.365      }
  38.366 -    
  38.367 +
  38.368      if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
  38.369          PERROR("Guest firmware size is incorrect [%ld]?", image_size);
  38.370          return -1;
  38.371      }
  38.372  
  38.373      /* Load guest firmware */
  38.374 -    if( xc_ia64_copy_to_domain_pages( xc_handle, dom, 
  38.375 +    if( xc_ia64_copy_to_domain_pages( xc_handle, dom,
  38.376              image, 4*MEM_G-image_size, image_size>>PAGE_SHIFT)) {
  38.377          PERROR("Could not load guest firmware into domain");
  38.378          goto error_out;
  38.379 @@ -610,9 +610,9 @@ static int setup_guest(  int xc_handle,
  38.380  
  38.381      *store_mfn = page_array[1];
  38.382      if ((sp = (shared_iopage_t *) xc_map_foreign_range(
  38.383 -		xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
  38.384 -		page_array[0])) == 0)
  38.385 -	goto error_out;
  38.386 +                    xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
  38.387 +                    page_array[0])) == 0)
  38.388 +        goto error_out;
  38.389      memset(sp, 0, PAGE_SIZE);
  38.390  
  38.391      for (i = 0; i < vcpus; i++) {
  38.392 @@ -665,14 +665,14 @@ int xc_hvm_build(int xc_handle,
  38.393  
  38.394      image_size = (image_size + PAGE_SIZE - 1) & PAGE_MASK;
  38.395  
  38.396 -    if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ){   
  38.397 +    if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ){
  38.398          PERROR("Unable to mlock ctxt");
  38.399          return 1;
  38.400      }
  38.401  
  38.402      op.cmd = DOM0_GETDOMAININFO;
  38.403      op.u.getdomaininfo.domain = (domid_t)domid;
  38.404 -    if ( (do_dom0_op(xc_handle, &op) < 0) || 
  38.405 +    if ( (do_dom0_op(xc_handle, &op) < 0) ||
  38.406           ((uint16_t)op.u.getdomaininfo.domain != domid) ) {
  38.407          PERROR("Could not get info on domain");
  38.408          goto error_out;
    39.1 --- a/tools/libxc/xc_linux_build.c	Sat Apr 15 19:25:09 2006 +0100
    39.2 +++ b/tools/libxc/xc_linux_build.c	Sat Apr 15 19:25:21 2006 +0100
    39.3 @@ -237,7 +237,7 @@ static int setup_pg_tables(int xc_handle
    39.4          else
    39.5          {
    39.6              *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
    39.7 -            if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) && 
    39.8 +            if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
    39.9                   (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) )
   39.10                  *vl1e &= ~_PAGE_RW;
   39.11          }
   39.12 @@ -314,7 +314,7 @@ static int setup_pg_tables_pae(int xc_ha
   39.13              else
   39.14                  *vl2e++ = l1tab | L2_PROT;
   39.15          }
   39.16 -        
   39.17 +
   39.18          if ( shadow_mode_enabled )
   39.19          {
   39.20              *vl1e = (count << PAGE_SHIFT) | L1_PROT;
   39.21 @@ -323,12 +323,12 @@ static int setup_pg_tables_pae(int xc_ha
   39.22          {
   39.23              *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
   39.24              if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
   39.25 -                 (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) ) 
   39.26 +                 (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) )
   39.27                  *vl1e &= ~_PAGE_RW;
   39.28          }
   39.29          vl1e++;
   39.30      }
   39.31 -     
   39.32 +
   39.33      munmap(vl1tab, PAGE_SIZE);
   39.34      munmap(vl2tab, PAGE_SIZE);
   39.35      munmap(vl3tab, PAGE_SIZE);
   39.36 @@ -376,13 +376,13 @@ static int setup_pg_tables_64(int xc_han
   39.37          ctxt->ctrlreg[3] = pl4tab;
   39.38      else
   39.39          ctxt->ctrlreg[3] = l4tab;
   39.40 -    
   39.41 +
   39.42      for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
   39.43      {
   39.44          if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
   39.45          {
   39.46              alloc_pt(l1tab, vl1tab, pl1tab);
   39.47 -            
   39.48 +
   39.49              if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
   39.50              {
   39.51                  alloc_pt(l2tab, vl2tab, pl2tab);
   39.52 @@ -410,7 +410,7 @@ static int setup_pg_tables_64(int xc_han
   39.53                  *vl2e = l1tab | L2_PROT;
   39.54              vl2e++;
   39.55          }
   39.56 -        
   39.57 +
   39.58          if ( shadow_mode_enabled )
   39.59          {
   39.60              *vl1e = (count << PAGE_SHIFT) | L1_PROT;
   39.61 @@ -419,14 +419,14 @@ static int setup_pg_tables_64(int xc_han
   39.62          {
   39.63              *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
   39.64              if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
   39.65 -                 (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) ) 
   39.66 +                 (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) )
   39.67                  {
   39.68                      *vl1e &= ~_PAGE_RW;
   39.69                  }
   39.70          }
   39.71          vl1e++;
   39.72      }
   39.73 -     
   39.74 +
   39.75      munmap(vl1tab, PAGE_SIZE);
   39.76      munmap(vl2tab, PAGE_SIZE);
   39.77      munmap(vl3tab, PAGE_SIZE);
   39.78 @@ -509,7 +509,7 @@ static int setup_guest(int xc_handle,
   39.79             " Loaded kernel: %p->%p\n"
   39.80             " Init. ramdisk: %p->%p\n"
   39.81             " TOTAL:         %p->%p\n",
   39.82 -           _p(dsi.v_kernstart), _p(dsi.v_kernend), 
   39.83 +           _p(dsi.v_kernstart), _p(dsi.v_kernend),
   39.84             _p(vinitrd_start),   _p(vinitrd_end),
   39.85             _p(dsi.v_start),     _p(v_end));
   39.86      printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
   39.87 @@ -696,10 +696,10 @@ static int setup_guest(int xc_handle,
   39.88                                             required_features);
   39.89  
   39.90      /*
   39.91 -     * Why do we need this? The number of page-table frames depends on the 
   39.92 -     * size of the bootstrap address space. But the size of the address space 
   39.93 -     * depends on the number of page-table frames (since each one is mapped 
   39.94 -     * read-only). We have a pair of simultaneous equations in two unknowns, 
   39.95 +     * Why do we need this? The number of page-table frames depends on the
   39.96 +     * size of the bootstrap address space. But the size of the address space
   39.97 +     * depends on the number of page-table frames (since each one is mapped
   39.98 +     * read-only). We have a pair of simultaneous equations in two unknowns,
   39.99       * which we solve by exhaustive search.
  39.100       */
  39.101      v_end = round_pgup(dsi.v_end);
  39.102 @@ -731,13 +731,13 @@ static int setup_guest(int xc_handle,
  39.103          if ( dsi.pae_kernel )
  39.104          {
  39.105              /* FIXME: assumes one L2 pgtable @ 0xc0000000 */
  39.106 -            if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT_PAE)-1)) >> 
  39.107 +            if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT_PAE)-1)) >>
  39.108                     L2_PAGETABLE_SHIFT_PAE) + 2) <= nr_pt_pages )
  39.109                  break;
  39.110          }
  39.111          else
  39.112          {
  39.113 -            if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >> 
  39.114 +            if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >>
  39.115                     L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
  39.116                  break;
  39.117          }
  39.118 @@ -873,7 +873,7 @@ static int setup_guest(int xc_handle,
  39.119              count) )
  39.120          {
  39.121              fprintf(stderr,"m2p update failure p=%lx m=%lx\n",
  39.122 -                    count, page_array[count]); 
  39.123 +                    count, page_array[count]);
  39.124              munmap(physmap, PAGE_SIZE);
  39.125              goto error_out;
  39.126          }
  39.127 @@ -982,7 +982,7 @@ static int setup_guest(int xc_handle,
  39.128          start_info->mod_len      = initrd->len;
  39.129      }
  39.130      if ( cmdline != NULL )
  39.131 -    { 
  39.132 +    {
  39.133          strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE);
  39.134          start_info->cmd_line[MAX_GUEST_CMDLINE-1] = '\0';
  39.135      }
  39.136 @@ -1073,14 +1073,14 @@ static int xc_linux_build_internal(int x
  39.137  #endif
  39.138  
  39.139      if ( mlock(&st_ctxt, sizeof(st_ctxt) ) )
  39.140 -    {   
  39.141 +    {
  39.142          PERROR("%s: ctxt mlock failed", __func__);
  39.143          return 1;
  39.144      }
  39.145  
  39.146      op.cmd = DOM0_GETDOMAININFO;
  39.147      op.u.getdomaininfo.domain = (domid_t)domid;
  39.148 -    if ( (xc_dom0_op(xc_handle, &op) < 0) || 
  39.149 +    if ( (xc_dom0_op(xc_handle, &op) < 0) ||
  39.150           ((uint16_t)op.u.getdomaininfo.domain != domid) )
  39.151      {
  39.152          PERROR("Could not get info on domain");
  39.153 @@ -1089,9 +1089,9 @@ static int xc_linux_build_internal(int x
  39.154  
  39.155      memset(ctxt, 0, sizeof(*ctxt));
  39.156  
  39.157 -    if ( setup_guest(xc_handle, domid, image, image_size, 
  39.158 +    if ( setup_guest(xc_handle, domid, image, image_size,
  39.159                       initrd,
  39.160 -                     nr_pages, 
  39.161 +                     nr_pages,
  39.162                       &vstartinfo_start, &vkern_entry,
  39.163                       &vstack_start, ctxt, cmdline,
  39.164                       op.u.getdomaininfo.shared_info_frame,
  39.165 @@ -1152,7 +1152,7 @@ static int xc_linux_build_internal(int x
  39.166  
  39.167      /* No LDT. */
  39.168      ctxt->ldt_ents = 0;
  39.169 -    
  39.170 +
  39.171      /* Use the default Xen-provided GDT. */
  39.172      ctxt->gdt_ents = 0;
  39.173  
  39.174 @@ -1184,7 +1184,7 @@ static int xc_linux_build_internal(int x
  39.175  
  39.176      launch_op.cmd = DOM0_SETVCPUCONTEXT;
  39.177      rc = xc_dom0_op(xc_handle, &launch_op);
  39.178 -    
  39.179 +
  39.180      return rc;
  39.181  
  39.182   error_out:
    40.1 --- a/tools/libxc/xc_linux_restore.c	Sat Apr 15 19:25:09 2006 +0100
    40.2 +++ b/tools/libxc/xc_linux_restore.c	Sat Apr 15 19:25:21 2006 +0100
    40.3 @@ -1,8 +1,8 @@
    40.4  /******************************************************************************
    40.5   * xc_linux_restore.c
    40.6 - * 
    40.7 + *
    40.8   * Restore the state of a Linux session.
    40.9 - * 
   40.10 + *
   40.11   * Copyright (c) 2003, K A Fraser.
   40.12   */
   40.13  
   40.14 @@ -13,13 +13,13 @@
   40.15  #include "xg_save_restore.h"
   40.16  
   40.17  /* max mfn of the whole machine */
   40.18 -static unsigned long max_mfn; 
   40.19 +static unsigned long max_mfn;
   40.20  
   40.21  /* virtual starting address of the hypervisor */
   40.22 -static unsigned long hvirt_start; 
   40.23 +static unsigned long hvirt_start;
   40.24  
   40.25  /* #levels of page tables used by the currrent guest */
   40.26 -static unsigned int pt_levels; 
   40.27 +static unsigned int pt_levels;
   40.28  
   40.29  /* total number of pages used by the current guest */
   40.30  static unsigned long max_pfn;
   40.31 @@ -41,84 +41,84 @@ read_exact(int fd, void *buf, size_t cou
   40.32          s = read(fd, &b[r], count - r);
   40.33          if ((s == -1) && (errno == EINTR))
   40.34              continue;
   40.35 -        if (s <= 0) { 
   40.36 +        if (s <= 0) {
   40.37              break;
   40.38 -        } 
   40.39 +        }
   40.40          r += s;
   40.41      }
   40.42  
   40.43 -    return (r == count) ? 1 : 0; 
   40.44 +    return (r == count) ? 1 : 0;
   40.45  }
   40.46  
   40.47  /*
   40.48 -** In the state file (or during transfer), all page-table pages are 
   40.49 -** converted into a 'canonical' form where references to actual mfns 
   40.50 -** are replaced with references to the corresponding pfns. 
   40.51 -** This function inverts that operation, replacing the pfn values with 
   40.52 -** the (now known) appropriate mfn values. 
   40.53 +** In the state file (or during transfer), all page-table pages are
   40.54 +** converted into a 'canonical' form where references to actual mfns
   40.55 +** are replaced with references to the corresponding pfns.
   40.56 +** This function inverts that operation, replacing the pfn values with
   40.57 +** the (now known) appropriate mfn values.
   40.58  */
   40.59 -int uncanonicalize_pagetable(unsigned long type, void *page) 
   40.60 -{ 
   40.61 -    int i, pte_last; 
   40.62 -    unsigned long pfn; 
   40.63 -    uint64_t pte; 
   40.64 +int uncanonicalize_pagetable(unsigned long type, void *page)
   40.65 +{
   40.66 +    int i, pte_last;
   40.67 +    unsigned long pfn;
   40.68 +    uint64_t pte;
   40.69  
   40.70 -    pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8); 
   40.71 +    pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
   40.72  
   40.73      /* Now iterate through the page table, uncanonicalizing each PTE */
   40.74 -    for(i = 0; i < pte_last; i++) { 
   40.75 -        
   40.76 -        if(pt_levels == 2) 
   40.77 -            pte = ((uint32_t *)page)[i]; 
   40.78 -        else 
   40.79 -            pte = ((uint64_t *)page)[i]; 
   40.80 +    for(i = 0; i < pte_last; i++) {
   40.81  
   40.82 -        if(pte & _PAGE_PRESENT) { 
   40.83 +        if(pt_levels == 2)
   40.84 +            pte = ((uint32_t *)page)[i];
   40.85 +        else
   40.86 +            pte = ((uint64_t *)page)[i];
   40.87 +
   40.88 +        if(pte & _PAGE_PRESENT) {
   40.89  
   40.90              pfn = (pte >> PAGE_SHIFT) & 0xffffffff;
   40.91 -            
   40.92 -            if(pfn >= max_pfn) { 
   40.93 +
   40.94 +            if(pfn >= max_pfn) {
   40.95                  /* This "page table page" is probably not one; bail. */
   40.96                  ERR("Frame number in type %lu page table is out of range: "
   40.97 -                    "i=%d pfn=0x%lx max_pfn=%lu", 
   40.98 +                    "i=%d pfn=0x%lx max_pfn=%lu",
   40.99                      type >> 28, i, pfn, max_pfn);
  40.100 -                return 0; 
  40.101 -            } 
  40.102 -            
  40.103 -            
  40.104 +                return 0;
  40.105 +            }
  40.106 +
  40.107 +
  40.108              pte &= 0xffffff0000000fffULL;
  40.109              pte |= (uint64_t)p2m[pfn] << PAGE_SHIFT;
  40.110  
  40.111 -            if(pt_levels == 2) 
  40.112 -                ((uint32_t *)page)[i] = (uint32_t)pte; 
  40.113 -            else 
  40.114 -                ((uint64_t *)page)[i] = (uint64_t)pte; 
  40.115 +            if(pt_levels == 2)
  40.116 +                ((uint32_t *)page)[i] = (uint32_t)pte;
  40.117 +            else
  40.118 +                ((uint64_t *)page)[i] = (uint64_t)pte;
  40.119  
  40.120 -        
  40.121 +
  40.122  
  40.123          }
  40.124      }
  40.125 -    
  40.126 -    return 1; 
  40.127 +
  40.128 +    return 1;
  40.129  }
  40.130  
  40.131 -int xc_linux_restore(int xc_handle, int io_fd, 
  40.132 -                     uint32_t dom, unsigned long nr_pfns, 
  40.133 +int xc_linux_restore(int xc_handle, int io_fd,
  40.134 +                     uint32_t dom, unsigned long nr_pfns,
  40.135                       unsigned int store_evtchn, unsigned long *store_mfn,
  40.136                       unsigned int console_evtchn, unsigned long *console_mfn)
  40.137  {
  40.138      DECLARE_DOM0_OP;
  40.139      int rc = 1, i, n;
  40.140 -    unsigned long mfn, pfn; 
  40.141 +    unsigned long mfn, pfn;
  40.142      unsigned int prev_pc, this_pc;
  40.143      int verify = 0;
  40.144 -    int nraces = 0; 
  40.145 +    int nraces = 0;
  40.146  
  40.147      /* The new domain's shared-info frame number. */
  40.148      unsigned long shared_info_frame;
  40.149      unsigned char shared_info_page[PAGE_SIZE]; /* saved contents from file */
  40.150      shared_info_t *shared_info = (shared_info_t *)shared_info_page;
  40.151 -    
  40.152 +
  40.153      /* A copy of the CPU context of the guest. */
  40.154      vcpu_guest_context_t ctxt;
  40.155  
  40.156 @@ -135,7 +135,7 @@ int xc_linux_restore(int xc_handle, int 
  40.157      unsigned long *page = NULL;
  40.158  
  40.159      /* A copy of the pfn-to-mfn table frame list. */
  40.160 -    unsigned long *p2m_frame_list = NULL; 
  40.161 +    unsigned long *p2m_frame_list = NULL;
  40.162  
  40.163      /* A temporary mapping of the guest's start_info page. */
  40.164      start_info_t *start_info;
  40.165 @@ -148,17 +148,17 @@ int xc_linux_restore(int xc_handle, int 
  40.166      unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
  40.167  
  40.168      struct mmuext_op pin[MAX_PIN_BATCH];
  40.169 -    unsigned int nr_pins; 
  40.170 +    unsigned int nr_pins;
  40.171  
  40.172  
  40.173 -    max_pfn = nr_pfns; 
  40.174 +    max_pfn = nr_pfns;
  40.175  
  40.176      DPRINTF("xc_linux_restore start: max_pfn = %lx\n", max_pfn);
  40.177  
  40.178  
  40.179 -    if(!get_platform_info(xc_handle, dom, 
  40.180 +    if(!get_platform_info(xc_handle, dom,
  40.181                            &max_mfn, &hvirt_start, &pt_levels)) {
  40.182 -        ERR("Unable to get platform info."); 
  40.183 +        ERR("Unable to get platform info.");
  40.184          return 1;
  40.185      }
  40.186  
  40.187 @@ -171,20 +171,20 @@ int xc_linux_restore(int xc_handle, int 
  40.188  
  40.189  
  40.190      /* Read the saved P2M frame list */
  40.191 -    if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) { 
  40.192 +    if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
  40.193          ERR("Couldn't allocate p2m_frame_list array");
  40.194          goto out;
  40.195      }
  40.196 -    
  40.197 -    if (!read_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) { 
  40.198 +
  40.199 +    if (!read_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) {
  40.200          ERR("read p2m_frame_list failed");
  40.201          goto out;
  40.202      }
  40.203  
  40.204 -    
  40.205 +
  40.206      /* We want zeroed memory so use calloc rather than malloc. */
  40.207 -    p2m        = calloc(sizeof(unsigned long), max_pfn); 
  40.208 -    pfn_type   = calloc(sizeof(unsigned long), max_pfn);    
  40.209 +    p2m        = calloc(sizeof(unsigned long), max_pfn);
  40.210 +    pfn_type   = calloc(sizeof(unsigned long), max_pfn);
  40.211      region_mfn = calloc(sizeof(unsigned long), MAX_BATCH_SIZE);
  40.212  
  40.213      if ((p2m == NULL) || (pfn_type == NULL) || (region_mfn == NULL)) {
  40.214 @@ -192,7 +192,7 @@ int xc_linux_restore(int xc_handle, int 
  40.215          errno = ENOMEM;
  40.216          goto out;
  40.217      }
  40.218 -    
  40.219 +
  40.220      if (mlock(region_mfn, sizeof(unsigned long) * MAX_BATCH_SIZE)) {
  40.221          ERR("Could not mlock region_mfn");
  40.222          goto out;
  40.223 @@ -207,27 +207,27 @@ int xc_linux_restore(int xc_handle, int 
  40.224      }
  40.225      shared_info_frame = op.u.getdomaininfo.shared_info_frame;
  40.226  
  40.227 -    if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) { 
  40.228 +    if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) {
  40.229          errno = ENOMEM;
  40.230          goto out;
  40.231      }
  40.232 -    
  40.233 +
  40.234      if(xc_domain_memory_increase_reservation(
  40.235 -           xc_handle, dom, max_pfn, 0, 0, NULL) != 0) { 
  40.236 +           xc_handle, dom, max_pfn, 0, 0, NULL) != 0) {
  40.237          ERR("Failed to increase reservation by %lx KB", PFN_TO_KB(max_pfn));
  40.238          errno = ENOMEM;
  40.239          goto out;
  40.240      }
  40.241  
  40.242 -    DPRINTF("Increased domain reservation by %lx KB\n", PFN_TO_KB(max_pfn)); 
  40.243 +    DPRINTF("Increased domain reservation by %lx KB\n", PFN_TO_KB(max_pfn));
  40.244  
  40.245      /* Build the pfn-to-mfn table. We choose MFN ordering returned by Xen. */
  40.246      if (xc_get_pfn_list(xc_handle, dom, p2m, max_pfn) != max_pfn) {
  40.247          ERR("Did not read correct number of frame numbers for new dom");
  40.248          goto out;
  40.249      }
  40.250 -    
  40.251 -    if(!(mmu = xc_init_mmu_updates(xc_handle, dom))) { 
  40.252 +
  40.253 +    if(!(mmu = xc_init_mmu_updates(xc_handle, dom))) {
  40.254          ERR("Could not initialise for MMU updates");
  40.255          goto out;
  40.256      }
  40.257 @@ -242,7 +242,7 @@ int xc_linux_restore(int xc_handle, int 
  40.258      prev_pc = 0;
  40.259  
  40.260      n = 0;
  40.261 -    while (1) { 
  40.262 +    while (1) {
  40.263  
  40.264          int j;
  40.265  
  40.266 @@ -253,13 +253,13 @@ int xc_linux_restore(int xc_handle, int 
  40.267              prev_pc = this_pc;
  40.268          }
  40.269  
  40.270 -        if (!read_exact(io_fd, &j, sizeof(int))) { 
  40.271 +        if (!read_exact(io_fd, &j, sizeof(int))) {
  40.272              ERR("Error when reading batch size");
  40.273              goto out;
  40.274          }
  40.275  
  40.276          PPRINTF("batch %d\n",j);
  40.277 - 
  40.278 +
  40.279          if (j == -1) {
  40.280              verify = 1;
  40.281              fprintf(stderr, "Entering page verify mode\n");
  40.282 @@ -269,27 +269,27 @@ int xc_linux_restore(int xc_handle, int 
  40.283          if (j == 0)
  40.284              break;  /* our work here is done */
  40.285  
  40.286 -        if (j > MAX_BATCH_SIZE) { 
  40.287 +        if (j > MAX_BATCH_SIZE) {
  40.288              ERR("Max batch size exceeded. Giving up.");
  40.289              goto out;
  40.290          }
  40.291 - 
  40.292 -        if (!read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long))) { 
  40.293 +
  40.294 +        if (!read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long))) {
  40.295              ERR("Error when reading region pfn types");
  40.296              goto out;
  40.297          }
  40.298  
  40.299 -        for (i = 0; i < j; i++) { 
  40.300 +        for (i = 0; i < j; i++) {
  40.301  
  40.302              if ((region_pfn_type[i] & LTAB_MASK) == XTAB)
  40.303                  region_mfn[i] = 0; /* we know map will fail, but don't care */
  40.304 -            else 
  40.305 -                region_mfn[i] = p2m[region_pfn_type[i] & ~LTAB_MASK]; 
  40.306 +            else
  40.307 +                region_mfn[i] = p2m[region_pfn_type[i] & ~LTAB_MASK];
  40.308  
  40.309          }
  40.310 - 
  40.311 +
  40.312          if (!(region_base = xc_map_foreign_batch(
  40.313 -                  xc_handle, dom, PROT_WRITE, region_mfn, j))) {  
  40.314 +                  xc_handle, dom, PROT_WRITE, region_mfn, j))) {
  40.315              ERR("map batch failed");
  40.316              goto out;
  40.317          }
  40.318 @@ -297,12 +297,12 @@ int xc_linux_restore(int xc_handle, int 
  40.319          for ( i = 0; i < j; i++ )
  40.320          {
  40.321              void *page;
  40.322 -            unsigned long pagetype; 
  40.323 +            unsigned long pagetype;
  40.324  
  40.325              pfn      = region_pfn_type[i] & ~LTAB_MASK;
  40.326 -            pagetype = region_pfn_type[i] & LTAB_MASK; 
  40.327 +            pagetype = region_pfn_type[i] & LTAB_MASK;
  40.328  
  40.329 -            if (pagetype == XTAB) 
  40.330 +            if (pagetype == XTAB)
  40.331                  /* a bogus/unmapped page: skip it */
  40.332                  continue;
  40.333  
  40.334 @@ -311,72 +311,72 @@ int xc_linux_restore(int xc_handle, int 
  40.335                  goto out;
  40.336              }
  40.337  
  40.338 -            pfn_type[pfn] = pagetype; 
  40.339 +            pfn_type[pfn] = pagetype;
  40.340  
  40.341              mfn = p2m[pfn];
  40.342  
  40.343              /* In verify mode, we use a copy; otherwise we work in place */
  40.344 -            page = verify ? (void *)buf : (region_base + i*PAGE_SIZE); 
  40.345 +            page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
  40.346  
  40.347 -            if (!read_exact(io_fd, page, PAGE_SIZE)) { 
  40.348 +            if (!read_exact(io_fd, page, PAGE_SIZE)) {
  40.349                  ERR("Error when reading page (type was %lx)", pagetype);
  40.350                  goto out;
  40.351              }
  40.352  
  40.353 -            pagetype &= LTABTYPE_MASK; 
  40.354 +            pagetype &= LTABTYPE_MASK;
  40.355  
  40.356 -            if(pagetype >= L1TAB && pagetype <= L4TAB) { 
  40.357 -                
  40.358 -                /* 
  40.359 -                ** A page table page - need to 'uncanonicalize' it, i.e. 
  40.360 -                ** replace all the references to pfns with the corresponding 
  40.361 -                ** mfns for the new domain. 
  40.362 -                ** 
  40.363 -                ** On PAE we need to ensure that PGDs are in MFNs < 4G, and 
  40.364 -                ** so we may need to update the p2m after the main loop. 
  40.365 -                ** Hence we defer canonicalization of L1s until then. 
  40.366 +            if(pagetype >= L1TAB && pagetype <= L4TAB) {
  40.367 +
  40.368 +                /*
  40.369 +                ** A page table page - need to 'uncanonicalize' it, i.e.
  40.370 +                ** replace all the references to pfns with the corresponding
  40.371 +                ** mfns for the new domain.
  40.372 +                **
  40.373 +                ** On PAE we need to ensure that PGDs are in MFNs < 4G, and
  40.374 +                ** so we may need to update the p2m after the main loop.
  40.375 +                ** Hence we defer canonicalization of L1s until then.
  40.376                  */
  40.377 -                if(pt_levels != 3 || pagetype != L1TAB) { 
  40.378 +                if(pt_levels != 3 || pagetype != L1TAB) {
  40.379  
  40.380                      if(!uncanonicalize_pagetable(pagetype, page)) {
  40.381 -                        /* 
  40.382 +                        /*
  40.383                          ** Failing to uncanonicalize a page table can be ok
  40.384                          ** under live migration since the pages type may have
  40.385 -                        ** changed by now (and we'll get an update later). 
  40.386 +                        ** changed by now (and we'll get an update later).
  40.387                          */
  40.388 -                        DPRINTF("PT L%ld race on pfn=%08lx mfn=%08lx\n", 
  40.389 -                                pagetype >> 28, pfn, mfn); 
  40.390 -                        nraces++; 
  40.391 -                        continue; 
  40.392 +                        DPRINTF("PT L%ld race on pfn=%08lx mfn=%08lx\n",
  40.393 +                                pagetype >> 28, pfn, mfn);
  40.394 +                        nraces++;
  40.395 +                        continue;
  40.396                      }
  40.397  
  40.398 -                } 
  40.399 -                    
  40.400 -            } else if(pagetype != NOTAB) { 
  40.401 +                }
  40.402 +
  40.403 +            } else if(pagetype != NOTAB) {
  40.404  
  40.405                  ERR("Bogus page type %lx page table is out of range: "
  40.406                      "i=%d max_pfn=%lu", pagetype, i, max_pfn);
  40.407                  goto out;
  40.408  
  40.409 -            } 
  40.410 +            }
  40.411  
  40.412  
  40.413              if (verify) {
  40.414  
  40.415                  int res = memcmp(buf, (region_base + i*PAGE_SIZE), PAGE_SIZE);
  40.416  
  40.417 -                if (res) { 
  40.418 +                if (res) {
  40.419  
  40.420                      int v;
  40.421  
  40.422                      DPRINTF("************** pfn=%lx type=%lx gotcs=%08lx "
  40.423 -                            "actualcs=%08lx\n", pfn, pfn_type[pfn], 
  40.424 -                            csum_page(region_base + i*PAGE_SIZE), 
  40.425 +                            "actualcs=%08lx\n", pfn, pfn_type[pfn],
  40.426 +                            csum_page(region_base + i*PAGE_SIZE),
  40.427                              csum_page(buf));
  40.428  
  40.429                      for (v = 0; v < 4; v++) {
  40.430 -                        
  40.431 -                        unsigned long *p = (unsigned long *) 
  40.432 +
  40.433 +                        unsigned long *p = (unsigned long *)
  40.434                              (region_base + i*PAGE_SIZE);
  40.435                          if (buf[v] != p[v])
  40.436                              DPRINTF("    %d: %08lx %08lx\n", v, buf[v], p[v]);
  40.437 @@ -384,8 +384,8 @@ int xc_linux_restore(int xc_handle, int 
  40.438                  }
  40.439              }
  40.440  
  40.441 -            if (xc_add_mmu_update(xc_handle, mmu, 
  40.442 -                                  (((unsigned long long)mfn) << PAGE_SHIFT) 
  40.443 +            if (xc_add_mmu_update(xc_handle, mmu,
  40.444 +                                  (((unsigned long long)mfn) << PAGE_SHIFT)
  40.445                                    | MMU_MACHPHYS_UPDATE, pfn)) {
  40.446                  ERR("failed machpys update mfn=%lx pfn=%lx", mfn, pfn);
  40.447                  goto out;
  40.448 @@ -398,149 +398,149 @@ int xc_linux_restore(int xc_handle, int 
  40.449  
  40.450      DPRINTF("Received all pages (%d races)\n", nraces);
  40.451  
  40.452 -    if(pt_levels == 3) { 
  40.453 +    if(pt_levels == 3) {
  40.454  
  40.455 -        /* 
  40.456 -        ** XXX SMH on PAE we need to ensure PGDs are in MFNs < 4G. This 
  40.457 +        /*
  40.458 +        ** XXX SMH on PAE we need to ensure PGDs are in MFNs < 4G. This
  40.459          ** is a little awkward and involves (a) finding all such PGDs and
  40.460 -        ** replacing them with 'lowmem' versions; (b) upating the p2m[] 
  40.461 +        ** replacing them with 'lowmem' versions; (b) upating the p2m[]
  40.462          ** with the new info; and (c) canonicalizing all the L1s using the
  40.463 -        ** (potentially updated) p2m[]. 
  40.464 -        ** 
  40.465 +        ** (potentially updated) p2m[].
  40.466 +        **
  40.467          ** This is relatively slow (and currently involves two passes through
  40.468          ** the pfn_type[] array), but at least seems to be correct. May wish
  40.469 -        ** to consider more complex approaches to optimize this later. 
  40.470 +        ** to consider more complex approaches to optimize this later.
  40.471          */
  40.472  
  40.473 -        int j, k; 
  40.474 +        int j, k;
  40.475  
  40.476          /* First pass: find all L3TABs current in > 4G mfns and get new mfns */
  40.477          for (i = 0; i < max_pfn; i++) {
  40.478 -            
  40.479 +
  40.480              if (((pfn_type[i] & LTABTYPE_MASK)==L3TAB) && (p2m[i]>0xfffffUL)) {
  40.481  
  40.482 -                unsigned long new_mfn; 
  40.483 -                uint64_t l3ptes[4]; 
  40.484 -                uint64_t *l3tab; 
  40.485 +                unsigned long new_mfn;
  40.486 +                uint64_t l3ptes[4];
  40.487 +                uint64_t *l3tab;
  40.488  
  40.489                  l3tab = (uint64_t *)
  40.490 -                    xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 
  40.491 -                                         PROT_READ, p2m[i]); 
  40.492 +                    xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
  40.493 +                                         PROT_READ, p2m[i]);
  40.494  
  40.495 -                for(j = 0; j < 4; j++) 
  40.496 -                    l3ptes[j] = l3tab[j]; 
  40.497 -                
  40.498 -                munmap(l3tab, PAGE_SIZE); 
  40.499 +                for(j = 0; j < 4; j++)
  40.500 +                    l3ptes[j] = l3tab[j];
  40.501 +
  40.502 +                munmap(l3tab, PAGE_SIZE);
  40.503  
  40.504                  if (!(new_mfn=xc_make_page_below_4G(xc_handle, dom, p2m[i]))) {
  40.505                      ERR("Couldn't get a page below 4GB :-(");
  40.506                      goto out;
  40.507                  }
  40.508 -                
  40.509 +
  40.510                  p2m[i] = new_mfn;
  40.511 -                if (xc_add_mmu_update(xc_handle, mmu, 
  40.512 -                                      (((unsigned long long)new_mfn) 
  40.513 -                                       << PAGE_SHIFT) | 
  40.514 +                if (xc_add_mmu_update(xc_handle, mmu,
  40.515 +                                      (((unsigned long long)new_mfn)
  40.516 +                                       << PAGE_SHIFT) |
  40.517                                        MMU_MACHPHYS_UPDATE, i)) {
  40.518                      ERR("Couldn't m2p on PAE root pgdir");
  40.519                      goto out;
  40.520                  }
  40.521 -                
  40.522 +
  40.523                  l3tab = (uint64_t *)
  40.524 -                    xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 
  40.525 -                                         PROT_READ | PROT_WRITE, p2m[i]); 
  40.526 -                
  40.527 -                for(j = 0; j < 4; j++) 
  40.528 -                    l3tab[j] = l3ptes[j]; 
  40.529 -                
  40.530 -                munmap(l3tab, PAGE_SIZE); 
  40.531 -                
  40.532 +                    xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
  40.533 +                                         PROT_READ | PROT_WRITE, p2m[i]);
  40.534 +
  40.535 +                for(j = 0; j < 4; j++)
  40.536 +                    l3tab[j] = l3ptes[j];
  40.537 +
  40.538 +                munmap(l3tab, PAGE_SIZE);
  40.539 +
  40.540              }
  40.541          }
  40.542  
  40.543          /* Second pass: find all L1TABs and uncanonicalize them */
  40.544 -        j = 0; 
  40.545 +        j = 0;
  40.546  
  40.547 -        for(i = 0; i < max_pfn; i++) { 
  40.548 -            
  40.549 -            if (((pfn_type[i] & LTABTYPE_MASK)==L1TAB)) { 
  40.550 -                region_mfn[j] = p2m[i]; 
  40.551 -                j++; 
  40.552 +        for(i = 0; i < max_pfn; i++) {
  40.553 +
  40.554 +            if (((pfn_type[i] & LTABTYPE_MASK)==L1TAB)) {
  40.555 +                region_mfn[j] = p2m[i];
  40.556 +                j++;
  40.557              }
  40.558  
  40.559 -            if(i == (max_pfn-1) || j == MAX_BATCH_SIZE) { 
  40.560 +            if(i == (max_pfn-1) || j == MAX_BATCH_SIZE) {
  40.561  
  40.562                  if (!(region_base = xc_map_foreign_batch(
  40.563 -                          xc_handle, dom, PROT_READ | PROT_WRITE, 
  40.564 -                          region_mfn, j))) {  
  40.565 +                          xc_handle, dom, PROT_READ | PROT_WRITE,
  40.566 +                          region_mfn, j))) {
  40.567                      ERR("map batch failed");
  40.568                      goto out;
  40.569                  }
  40.570  
  40.571                  for(k = 0; k < j; k++) {
  40.572 -                    if(!uncanonicalize_pagetable(L1TAB, 
  40.573 +                    if(!uncanonicalize_pagetable(L1TAB,
  40.574                                                   region_base + k*PAGE_SIZE)) {
  40.575 -                        ERR("failed uncanonicalize pt!"); 
  40.576 -                        goto out; 
  40.577 -                    } 
  40.578 +                        ERR("failed uncanonicalize pt!");
  40.579 +                        goto out;
  40.580 +                    }
  40.581                  }
  40.582 -                
  40.583 -                munmap(region_base, j*PAGE_SIZE); 
  40.584 -                j = 0; 
  40.585 +
  40.586 +                munmap(region_base, j*PAGE_SIZE);
  40.587 +                j = 0;
  40.588              }
  40.589          }
  40.590  
  40.591      }
  40.592  
  40.593  
  40.594 -    if (xc_finish_mmu_updates(xc_handle, mmu)) { 
  40.595 -        ERR("Error doing finish_mmu_updates()"); 
  40.596 +    if (xc_finish_mmu_updates(xc_handle, mmu)) {
  40.597 +        ERR("Error doing finish_mmu_updates()");
  40.598          goto out;
  40.599 -    } 
  40.600 +    }
  40.601  
  40.602  
  40.603      /*
  40.604       * Pin page tables. Do this after writing to them as otherwise Xen
  40.605       * will barf when doing the type-checking.
  40.606       */
  40.607 -    nr_pins = 0; 
  40.608 +    nr_pins = 0;
  40.609      for (i = 0; i < max_pfn; i++) {
  40.610  
  40.611          if (i == (max_pfn-1) || nr_pins == MAX_PIN_BATCH) {
  40.612 -            if (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) { 
  40.613 -                ERR("Failed to pin batch of %d page tables", nr_pins); 
  40.614 +            if (xc_mmuext_op(xc_handle, pin, nr_pins, dom) < 0) {
  40.615 +                ERR("Failed to pin batch of %d page tables", nr_pins);
  40.616                  goto out;
  40.617 -            } 
  40.618 +            }
  40.619              nr_pins = 0;
  40.620          }
  40.621  
  40.622          if ( (pfn_type[i] & LPINTAB) == 0 )
  40.623              continue;
  40.624  
  40.625 -        switch(pfn_type[i]) { 
  40.626 +        switch(pfn_type[i]) {
  40.627  
  40.628 -        case (L1TAB|LPINTAB): 
  40.629 +        case (L1TAB|LPINTAB):
  40.630              pin[nr_pins].cmd = MMUEXT_PIN_L1_TABLE;
  40.631 -            break; 
  40.632 -            
  40.633 -        case (L2TAB|LPINTAB): 
  40.634 +            break;
  40.635 +
  40.636 +        case (L2TAB|LPINTAB):
  40.637              pin[nr_pins].cmd = MMUEXT_PIN_L2_TABLE;
  40.638 -            break; 
  40.639 -            
  40.640 -        case (L3TAB|LPINTAB): 
  40.641 +            break;
  40.642 +
  40.643 +        case (L3TAB|LPINTAB):
  40.644              pin[nr_pins].cmd = MMUEXT_PIN_L3_TABLE;
  40.645 -            break; 
  40.646 +            break;
  40.647  
  40.648          case (L4TAB|LPINTAB):
  40.649              pin[nr_pins].cmd = MMUEXT_PIN_L4_TABLE;
  40.650 -            break; 
  40.651 -            
  40.652 -        default: 
  40.653 -            continue; 
  40.654 +            break;
  40.655 +
  40.656 +        default:
  40.657 +            continue;
  40.658          }
  40.659  
  40.660          pin[nr_pins].arg1.mfn = p2m[i];
  40.661 -        nr_pins++; 
  40.662 +        nr_pins++;
  40.663  
  40.664      }
  40.665  
  40.666 @@ -553,17 +553,17 @@ int xc_linux_restore(int xc_handle, int 
  40.667          unsigned long *pfntab;
  40.668          int rc;
  40.669  
  40.670 -        if (!read_exact(io_fd, &count, sizeof(count))) { 
  40.671 +        if (!read_exact(io_fd, &count, sizeof(count))) {
  40.672              ERR("Error when reading pfn count");
  40.673              goto out;
  40.674          }
  40.675  
  40.676 -        if(!(pfntab = malloc(sizeof(unsigned long) * count))) { 
  40.677 +        if(!(pfntab = malloc(sizeof(unsigned long) * count))) {
  40.678              ERR("Out of memory");
  40.679              goto out;
  40.680          }
  40.681 -        
  40.682 -        if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) { 
  40.683 +
  40.684 +        if (!read_exact(io_fd, pfntab, sizeof(unsigned long)*count)) {
  40.685              ERR("Error when reading pfntab");
  40.686              goto out;
  40.687          }
  40.688 @@ -572,14 +572,14 @@ int xc_linux_restore(int xc_handle, int 
  40.689  
  40.690              unsigned long pfn = pfntab[i];
  40.691  
  40.692 -            if(pfn > max_pfn) 
  40.693 +            if(pfn > max_pfn)
  40.694                  /* shouldn't happen - continue optimistically */
  40.695 -                continue; 
  40.696 +                continue;
  40.697  
  40.698 -            pfntab[i] = p2m[pfn];   
  40.699 -            p2m[pfn]  = INVALID_P2M_ENTRY; // not in pseudo-physical map 
  40.700 +            pfntab[i] = p2m[pfn];
  40.701 +            p2m[pfn]  = INVALID_P2M_ENTRY; // not in pseudo-physical map
  40.702          }
  40.703 -        
  40.704 +
  40.705          if (count > 0) {
  40.706  
  40.707              struct xen_memory_reservation reservation = {
  40.708 @@ -590,16 +590,16 @@ int xc_linux_restore(int xc_handle, int 
  40.709              };
  40.710  
  40.711              if ((rc = xc_memory_op(xc_handle, XENMEM_decrease_reservation,
  40.712 -                                   &reservation)) != count) { 
  40.713 +                                   &reservation)) != count) {
  40.714                  ERR("Could not decrease reservation : %d", rc);
  40.715                  goto out;
  40.716              } else
  40.717                  DPRINTF("Decreased reservation by %d pages\n", count);
  40.718 -        } 
  40.719 +        }
  40.720      }
  40.721  
  40.722 -    if (!read_exact(io_fd, &ctxt, sizeof(ctxt)) || 
  40.723 -        !read_exact(io_fd, shared_info_page, PAGE_SIZE)) { 
  40.724 +    if (!read_exact(io_fd, &ctxt, sizeof(ctxt)) ||
  40.725 +        !read_exact(io_fd, shared_info_page, PAGE_SIZE)) {
  40.726          ERR("Error when reading ctxt or shared info page");
  40.727          goto out;
  40.728      }
  40.729 @@ -642,15 +642,15 @@ int xc_linux_restore(int xc_handle, int 
  40.730  
  40.731      if (pfn >= max_pfn) {
  40.732          ERR("PT base is bad: pfn=%lu max_pfn=%lu type=%08lx",
  40.733 -            pfn, max_pfn, pfn_type[pfn]); 
  40.734 +            pfn, max_pfn, pfn_type[pfn]);
  40.735          goto out;
  40.736      }
  40.737  
  40.738 -    if ( (pfn_type[pfn] & LTABTYPE_MASK) != 
  40.739 +    if ( (pfn_type[pfn] & LTABTYPE_MASK) !=
  40.740           ((unsigned long)pt_levels<<LTAB_SHIFT) ) {
  40.741          ERR("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx",
  40.742 -            pfn, max_pfn, pfn_type[pfn], 
  40.743 -            (unsigned long)pt_levels<<LTAB_SHIFT); 
  40.744 +            pfn, max_pfn, pfn_type[pfn],
  40.745 +            (unsigned long)pt_levels<<LTAB_SHIFT);
  40.746          goto out;
  40.747      }
  40.748  
  40.749 @@ -667,7 +667,7 @@ int xc_linux_restore(int xc_handle, int 
  40.750          xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
  40.751      memcpy(page, shared_info, sizeof(shared_info_t));
  40.752      munmap(page, PAGE_SIZE);
  40.753 -    
  40.754 +
  40.755      /* Uncanonicalise the pfn-to-mfn table frame-number list. */
  40.756      for (i = 0; i < P2M_FL_ENTRIES; i++) {
  40.757          pfn = p2m_frame_list[i];
  40.758 @@ -678,16 +678,16 @@ int xc_linux_restore(int xc_handle, int 
  40.759  
  40.760          p2m_frame_list[i] = p2m[pfn];
  40.761      }
  40.762 -    
  40.763 +
  40.764      /* Copy the P2M we've constructed to the 'live' P2M */
  40.765 -    if (!(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE, 
  40.766 +    if (!(live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_WRITE,
  40.767                                            p2m_frame_list, P2M_FL_ENTRIES))) {
  40.768          ERR("Couldn't map p2m table");
  40.769          goto out;
  40.770      }
  40.771  
  40.772 -    memcpy(live_p2m, p2m, P2M_SIZE); 
  40.773 -    munmap(live_p2m, P2M_SIZE); 
  40.774 +    memcpy(live_p2m, p2m, P2M_SIZE);
  40.775 +    munmap(live_p2m, P2M_SIZE);
  40.776  
  40.777      /*
  40.778       * Safety checking of saved context:
    41.1 --- a/tools/libxc/xc_linux_save.c	Sat Apr 15 19:25:09 2006 +0100
    41.2 +++ b/tools/libxc/xc_linux_save.c	Sat Apr 15 19:25:21 2006 +0100
    41.3 @@ -1,8 +1,8 @@
    41.4  /******************************************************************************
    41.5   * xc_linux_save.c
    41.6 - * 
    41.7 + *
    41.8   * Save the state of a running Linux session.
    41.9 - * 
   41.10 + *
   41.11   * Copyright (c) 2003, K A Fraser.
   41.12   */
   41.13  
   41.14 @@ -17,23 +17,23 @@
   41.15  
   41.16  /*
   41.17  ** Default values for important tuning parameters. Can override by passing
   41.18 -** non-zero replacement values to xc_linux_save().  
   41.19 +** non-zero replacement values to xc_linux_save().
   41.20  **
   41.21 -** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too. 
   41.22 -** 
   41.23 +** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
   41.24 +**
   41.25  */
   41.26 -#define DEF_MAX_ITERS   29   /* limit us to 30 times round loop   */ 
   41.27 +#define DEF_MAX_ITERS   29   /* limit us to 30 times round loop   */
   41.28  #define DEF_MAX_FACTOR   3   /* never send more than 3x nr_pfns   */
   41.29  
   41.30  
   41.31  /* max mfn of the whole machine */
   41.32 -static unsigned long max_mfn; 
   41.33 +static unsigned long max_mfn;
   41.34  
   41.35  /* virtual starting address of the hypervisor */
   41.36 -static unsigned long hvirt_start; 
   41.37 +static unsigned long hvirt_start;
   41.38  
   41.39  /* #levels of page tables used by the currrent guest */
   41.40 -static unsigned int pt_levels; 
   41.41 +static unsigned int pt_levels;
   41.42  
   41.43  /* total number of pages used by the current guest */
   41.44  static unsigned long max_pfn;
   41.45 @@ -56,8 +56,8 @@ static unsigned long *live_m2p = NULL;
   41.46  (((_mfn) < (max_mfn)) &&                        \
   41.47   ((mfn_to_pfn(_mfn) < (max_pfn)) &&               \
   41.48    (live_p2m[mfn_to_pfn(_mfn)] == (_mfn))))
   41.49 -    
   41.50 - 
   41.51 +
   41.52 +
   41.53  /* Returns TRUE if MFN is successfully converted to a PFN. */
   41.54  #define translate_mfn_to_pfn(_pmfn)                             \
   41.55  ({                                                              \
   41.56 @@ -70,12 +70,12 @@ static unsigned long *live_m2p = NULL;
   41.57      _res;                                                       \
   41.58  })
   41.59  
   41.60 -/* 
   41.61 -** During (live) save/migrate, we maintain a number of bitmaps to track 
   41.62 -** which pages we have to send, to fixup, and to skip. 
   41.63 +/*
   41.64 +** During (live) save/migrate, we maintain a number of bitmaps to track
   41.65 +** which pages we have to send, to fixup, and to skip.
   41.66  */
   41.67  
   41.68 -#define BITS_PER_LONG (sizeof(unsigned long) * 8) 
   41.69 +#define BITS_PER_LONG (sizeof(unsigned long) * 8)
   41.70  #define BITMAP_SIZE   ((max_pfn + BITS_PER_LONG - 1) / 8)
   41.71  
   41.72  #define BITMAP_ENTRY(_nr,_bmap) \
   41.73 @@ -85,17 +85,17 @@ static unsigned long *live_m2p = NULL;
   41.74  
   41.75  static inline int test_bit (int nr, volatile void * addr)
   41.76  {
   41.77 -    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1; 
   41.78 +    return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
   41.79  }
   41.80  
   41.81  static inline void clear_bit (int nr, volatile void * addr)
   41.82  {
   41.83 -    BITMAP_ENTRY(nr, addr) &= ~(1 << BITMAP_SHIFT(nr)); 
   41.84 +    BITMAP_ENTRY(nr, addr) &= ~(1 << BITMAP_SHIFT(nr));
   41.85  }
   41.86  
   41.87  static inline void set_bit ( int nr, volatile void * addr)
   41.88  {
   41.89 -    BITMAP_ENTRY(nr, addr) |= (1 << BITMAP_SHIFT(nr)); 
   41.90 +    BITMAP_ENTRY(nr, addr) |= (1 << BITMAP_SHIFT(nr));
   41.91  }
   41.92  
   41.93  /* Returns the hamming weight (i.e. the number of bits set) in a N-bit word */
   41.94 @@ -122,7 +122,7 @@ static inline int permute( int i, int nr
   41.95  {
   41.96      /* Need a simple permutation function so that we scan pages in a
   41.97         pseudo random order, enabling us to get a better estimate of
   41.98 -       the domain's page dirtying rate as we go (there are often 
   41.99 +       the domain's page dirtying rate as we go (there are often
  41.100         contiguous ranges of pfns that have similar behaviour, and we
  41.101         want to mix them up. */
  41.102  
  41.103 @@ -130,21 +130,21 @@ static inline int permute( int i, int nr
  41.104      /* 512MB domain, 128k pages, order 17 */
  41.105  
  41.106      /*
  41.107 -      QPONMLKJIHGFEDCBA  
  41.108 -             QPONMLKJIH  
  41.109 -      GFEDCBA  
  41.110 +      QPONMLKJIHGFEDCBA
  41.111 +             QPONMLKJIH
  41.112 +      GFEDCBA
  41.113       */
  41.114 -    
  41.115 +
  41.116      /*
  41.117 -      QPONMLKJIHGFEDCBA  
  41.118 -                  EDCBA  
  41.119 +      QPONMLKJIHGFEDCBA
  41.120 +                  EDCBA
  41.121               QPONM
  41.122        LKJIHGF
  41.123        */
  41.124  
  41.125      do { i = ((i>>(order_nr-10)) | ( i<<10 ) ) & ((1<<order_nr)-1); }
  41.126      while ( i >= nr ); /* this won't ever loop if nr is a power of 2 */
  41.127 -    
  41.128 +
  41.129      return i;
  41.130  }
  41.131  
  41.132 @@ -165,7 +165,7 @@ static uint64_t llgettimeofday(void)
  41.133  
  41.134  static uint64_t tv_delta(struct timeval *new, struct timeval *old)
  41.135  {
  41.136 -    return ((new->tv_sec - old->tv_sec)*1000000 ) + 
  41.137 +    return ((new->tv_sec - old->tv_sec)*1000000 ) +
  41.138          (new->tv_usec - old->tv_usec);
  41.139  }
  41.140  
  41.141 @@ -175,7 +175,7 @@ static uint64_t tv_delta(struct timeval 
  41.142  
  41.143  /*
  41.144  ** We control the rate at which we transmit (or save) to minimize impact
  41.145 -** on running domains (including the target if we're doing live migrate). 
  41.146 +** on running domains (including the target if we're doing live migrate).
  41.147  */
  41.148  
  41.149  #define MAX_MBIT_RATE    500      /* maximum transmit rate for migrate */
  41.150 @@ -193,10 +193,10 @@ static uint64_t tv_delta(struct timeval 
  41.151  static int mbit_rate, ombit_rate = 0;
  41.152  
  41.153  /* Have we reached the maximum transmission rate? */
  41.154 -#define RATE_IS_MAX() (mbit_rate == MAX_MBIT_RATE) 
  41.155 +#define RATE_IS_MAX() (mbit_rate == MAX_MBIT_RATE)
  41.156  
  41.157  
  41.158 -static inline void initialize_mbit_rate() 
  41.159 +static inline void initialize_mbit_rate()
  41.160  {
  41.161      mbit_rate = START_MBIT_RATE;
  41.162  }
  41.163 @@ -213,7 +213,7 @@ static int ratewrite(int io_fd, void *bu
  41.164  
  41.165      if (START_MBIT_RATE == 0)
  41.166          return write(io_fd, buf, n);
  41.167 -    
  41.168 +
  41.169      budget -= n;
  41.170      if (budget < 0) {
  41.171          if (mbit_rate != ombit_rate) {
  41.172 @@ -253,46 +253,46 @@ static int ratewrite(int io_fd, void *bu
  41.173  
  41.174  #else /* ! ADAPTIVE SAVE */
  41.175  
  41.176 -#define RATE_IS_MAX() (0) 
  41.177 -#define ratewrite(_io_fd, _buf, _n) write((_io_fd), (_buf), (_n)) 
  41.178 -#define initialize_mbit_rate() 
  41.179 +#define RATE_IS_MAX() (0)
  41.180 +#define ratewrite(_io_fd, _buf, _n) write((_io_fd), (_buf), (_n))
  41.181 +#define initialize_mbit_rate()
  41.182  
  41.183  #endif
  41.184  
  41.185  
  41.186  static inline ssize_t write_exact(int fd, void *buf, size_t count)
  41.187  {
  41.188 -    if(write(fd, buf, count) != count) 
  41.189 -        return 0; 
  41.190 -    return 1; 
  41.191 -} 
  41.192 +    if(write(fd, buf, count) != count)
  41.193 +        return 0;
  41.194 +    return 1;
  41.195 +}
  41.196  
  41.197  
  41.198  
  41.199 -static int print_stats(int xc_handle, uint32_t domid, int pages_sent, 
  41.200 +static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
  41.201                         xc_shadow_control_stats_t *stats, int print)
  41.202  {
  41.203      static struct timeval wall_last;
  41.204      static long long      d0_cpu_last;
  41.205      static long long      d1_cpu_last;
  41.206 -    
  41.207 +
  41.208      struct timeval        wall_now;
  41.209      long long             wall_delta;
  41.210      long long             d0_cpu_now, d0_cpu_delta;
  41.211      long long             d1_cpu_now, d1_cpu_delta;
  41.212 -    
  41.213 +
  41.214      gettimeofday(&wall_now, NULL);
  41.215 -    
  41.216 +
  41.217      d0_cpu_now = xc_domain_get_cpu_usage(xc_handle, 0, /* FIXME */ 0)/1000;
  41.218      d1_cpu_now = xc_domain_get_cpu_usage(xc_handle, domid, /* FIXME */ 0)/1000;
  41.219  
  41.220 -    if ( (d0_cpu_now == -1) || (d1_cpu_now == -1) ) 
  41.221 +    if ( (d0_cpu_now == -1) || (d1_cpu_now == -1) )
  41.222          fprintf(stderr, "ARRHHH!!\n");
  41.223 -    
  41.224 +
  41.225      wall_delta = tv_delta(&wall_now,&wall_last)/1000;
  41.226 -    
  41.227 +
  41.228      if (wall_delta == 0) wall_delta = 1;
  41.229 -    
  41.230 +
  41.231      d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000;
  41.232      d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000;
  41.233  
  41.234 @@ -300,14 +300,14 @@ static int print_stats(int xc_handle, ui
  41.235          fprintf(stderr,
  41.236                  "delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
  41.237                  "dirtied %dMb/s %" PRId32 " pages\n",
  41.238 -                wall_delta, 
  41.239 +                wall_delta,
  41.240                  (int)((d0_cpu_delta*100)/wall_delta),
  41.241                  (int)((d1_cpu_delta*100)/wall_delta),
  41.242                  (int)((pages_sent*PAGE_SIZE)/(wall_delta*(1000/8))),
  41.243                  (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))),
  41.244                  stats->dirty_count);
  41.245  
  41.246 -#ifdef ADAPTIVE_SAVE    
  41.247 +#ifdef ADAPTIVE_SAVE
  41.248      if (((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))) > mbit_rate) {
  41.249          mbit_rate = (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8)))
  41.250              + 50;
  41.251 @@ -315,16 +315,16 @@ static int print_stats(int xc_handle, ui
  41.252              mbit_rate = MAX_MBIT_RATE;
  41.253      }
  41.254  #endif
  41.255 -    
  41.256 +
  41.257      d0_cpu_last = d0_cpu_now;
  41.258      d1_cpu_last = d1_cpu_now;
  41.259 -    wall_last   = wall_now; 
  41.260 +    wall_last   = wall_now;
  41.261  
  41.262      return 0;
  41.263  }
  41.264  
  41.265  
  41.266 -static int analysis_phase(int xc_handle, uint32_t domid, int max_pfn, 
  41.267 +static int analysis_phase(int xc_handle, uint32_t domid, int max_pfn,
  41.268                            unsigned long *arr, int runs)
  41.269  {
  41.270      long long start, now;
  41.271 @@ -335,24 +335,24 @@ static int analysis_phase(int xc_handle,
  41.272  
  41.273      for (j = 0; j < runs; j++) {
  41.274          int i;
  41.275 -        
  41.276 +
  41.277          xc_shadow_control(xc_handle, domid, DOM0_SHADOW_CONTROL_OP_CLEAN,
  41.278                            arr, max_pfn, NULL);
  41.279          fprintf(stderr, "#Flush\n");
  41.280 -        for ( i = 0; i < 40; i++ ) {     
  41.281 -            usleep(50000);     
  41.282 +        for ( i = 0; i < 40; i++ ) {
  41.283 +            usleep(50000);
  41.284              now = llgettimeofday();
  41.285              xc_shadow_control(xc_handle, domid, DOM0_SHADOW_CONTROL_OP_PEEK,
  41.286                                NULL, 0, &stats);
  41.287 -            
  41.288 +
  41.289              fprintf(stderr, "now= %lld faults= %" PRId32 " dirty= %" PRId32
  41.290 -                    " dirty_net= %" PRId32 " dirty_block= %" PRId32"\n", 
  41.291 -                    ((now-start)+500)/1000, 
  41.292 +                    " dirty_net= %" PRId32 " dirty_block= %" PRId32"\n",
  41.293 +                    ((now-start)+500)/1000,
  41.294                      stats.fault_count, stats.dirty_count,
  41.295                      stats.dirty_net_count, stats.dirty_block_count);
  41.296          }
  41.297      }
  41.298 -    
  41.299 +
  41.300      return -1;
  41.301  }
  41.302  
  41.303 @@ -375,7 +375,7 @@ static int suspend_and_state(int (*suspe
  41.304          return -1;
  41.305      }
  41.306  
  41.307 -    if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt)) 
  41.308 +    if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt))
  41.309          ERR("Could not get vcpu context");
  41.310  
  41.311  
  41.312 @@ -383,22 +383,22 @@ static int suspend_and_state(int (*suspe
  41.313          return 0; // success
  41.314  
  41.315      if (info->paused) {
  41.316 -        // try unpausing domain, wait, and retest 
  41.317 +        // try unpausing domain, wait, and retest
  41.318          xc_domain_unpause( xc_handle, dom );
  41.319 -        
  41.320 +
  41.321          ERR("Domain was paused. Wait and re-test.");
  41.322          usleep(10000);  // 10ms
  41.323 -        
  41.324 +
  41.325          goto retry;
  41.326      }
  41.327  
  41.328  
  41.329      if( ++i < 100 ) {
  41.330          ERR("Retry suspend domain.");
  41.331 -        usleep(10000);  // 10ms 
  41.332 +        usleep(10000);  // 10ms
  41.333          goto retry;
  41.334      }
  41.335 -    
  41.336 +
  41.337      ERR("Unable to suspend domain.");
  41.338  
  41.339      return -1;
  41.340 @@ -406,173 +406,173 @@ static int suspend_and_state(int (*suspe
  41.341  
  41.342  
  41.343  /*
  41.344 -** During transfer (or in the state file), all page-table pages must be  
  41.345 -** converted into a 'canonical' form where references to actual mfns 
  41.346 -** are replaced with references to the corresponding pfns. 
  41.347 +** During transfer (or in the state file), all page-table pages must be
  41.348 +** converted into a 'canonical' form where references to actual mfns
  41.349 +** are replaced with references to the corresponding pfns.
  41.350  **
  41.351 -** This function performs the appropriate conversion, taking into account 
  41.352 -** which entries do not require canonicalization (in particular, those 
  41.353 -** entries which map the virtual address reserved for the hypervisor). 
  41.354 +** This function performs the appropriate conversion, taking into account
  41.355 +** which entries do not require canonicalization (in particular, those
  41.356 +** entries which map the virtual address reserved for the hypervisor).
  41.357  */
  41.358 -void canonicalize_pagetable(unsigned long type, unsigned long pfn, 
  41.359 -                             const void *spage, void *dpage) 
  41.360 -{ 
  41.361 -    
  41.362 +void canonicalize_pagetable(unsigned long type, unsigned long pfn,
  41.363 +                             const void *spage, void *dpage)
  41.364 +{
  41.365 +
  41.366      int i, pte_last, xen_start, xen_end;
  41.367      uint64_t pte;
  41.368  
  41.369 -    /* 
  41.370 +    /*
  41.371      ** We need to determine which entries in this page table hold
  41.372      ** reserved hypervisor mappings. This depends on the current
  41.373 -    ** page table type as well as the number of paging levels. 
  41.374 +    ** page table type as well as the number of paging levels.
  41.375      */
  41.376 -    xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8); 
  41.377 -    
  41.378 -    if (pt_levels == 2 && type == L2TAB)
  41.379 -        xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT); 
  41.380 +    xen_start = xen_end = pte_last = PAGE_SIZE / ((pt_levels == 2)? 4 : 8);
  41.381  
  41.382 -    if (pt_levels == 3 && type == L3TAB) 
  41.383 -        xen_start = L3_PAGETABLE_ENTRIES_PAE; 
  41.384 -        
  41.385 -    /* 
  41.386 -    ** in PAE only the L2 mapping the top 1GB contains Xen mappings. 
  41.387 +    if (pt_levels == 2 && type == L2TAB)
  41.388 +        xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT);
  41.389 +
  41.390 +    if (pt_levels == 3 && type == L3TAB)
  41.391 +        xen_start = L3_PAGETABLE_ENTRIES_PAE;
  41.392 +
  41.393 +    /*
  41.394 +    ** in PAE only the L2 mapping the top 1GB contains Xen mappings.
  41.395      ** We can spot this by looking for the guest linear mapping which
  41.396 -    ** Xen always ensures is present in that L2. Guests must ensure 
  41.397 -    ** that this check will fail for other L2s. 
  41.398 +    ** Xen always ensures is present in that L2. Guests must ensure
  41.399 +    ** that this check will fail for other L2s.
  41.400      */
  41.401      if (pt_levels == 3 && type == L2TAB) {
  41.402  
  41.403  /* XXX index of the L2 entry in PAE mode which holds the guest LPT */
  41.404 -#define PAE_GLPT_L2ENTRY (495) 
  41.405 -        pte = ((uint64_t*)spage)[PAE_GLPT_L2ENTRY]; 
  41.406 +#define PAE_GLPT_L2ENTRY (495)
  41.407 +        pte = ((uint64_t*)spage)[PAE_GLPT_L2ENTRY];
  41.408  
  41.409          if(((pte >> PAGE_SHIFT) & 0x0fffffff) == live_p2m[pfn])
  41.410 -            xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff; 
  41.411 +            xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff;
  41.412      }
  41.413  
  41.414 -    if (pt_levels == 4 && type == L4TAB) { 
  41.415 +    if (pt_levels == 4 && type == L4TAB) {
  41.416          /*
  41.417 -        ** XXX SMH: should compute these from hvirt_start (which we have) 
  41.418 -        ** and hvirt_end (which we don't) 
  41.419 +        ** XXX SMH: should compute these from hvirt_start (which we have)
  41.420 +        ** and hvirt_end (which we don't)
  41.421          */
  41.422 -        xen_start = 256; 
  41.423 -        xen_end   = 272; 
  41.424 +        xen_start = 256;
  41.425 +        xen_end   = 272;
  41.426      }
  41.427  
  41.428      /* Now iterate through the page table, canonicalizing each PTE */
  41.429      for (i = 0; i < pte_last; i++ ) {
  41.430  
  41.431 -        unsigned long pfn, mfn; 
  41.432 -        
  41.433 +        unsigned long pfn, mfn;
  41.434 +
  41.435          if (pt_levels == 2)
  41.436              pte = ((uint32_t*)spage)[i];
  41.437          else
  41.438              pte = ((uint64_t*)spage)[i];
  41.439 -        
  41.440 +
  41.441          if (i >= xen_start && i < xen_end)
  41.442              pte = 0;
  41.443 -        
  41.444 +
  41.445          if (pte & _PAGE_PRESENT) {
  41.446 -            
  41.447 -            mfn = (pte >> PAGE_SHIFT) & 0xfffffff;      
  41.448 +
  41.449 +            mfn = (pte >> PAGE_SHIFT) & 0xfffffff;
  41.450              if (!MFN_IS_IN_PSEUDOPHYS_MAP(mfn)) {
  41.451 -                /* This will happen if the type info is stale which 
  41.452 +                /* This will happen if the type info is stale which
  41.453                     is quite feasible under live migration */
  41.454                  DPRINTF("PT Race: [%08lx,%d] pte=%llx, mfn=%08lx\n",
  41.455 -                        type, i, (unsigned long long)pte, mfn); 
  41.456 +                        type, i, (unsigned long long)pte, mfn);
  41.457                  pfn = 0; /* zap it - we'll retransmit this page later */
  41.458 -            } else 
  41.459 +            } else
  41.460                  pfn = mfn_to_pfn(mfn);
  41.461 -            
  41.462 +
  41.463              pte &= 0xffffff0000000fffULL;
  41.464              pte |= (uint64_t)pfn << PAGE_SHIFT;
  41.465          }
  41.466 -        
  41.467 +
  41.468          if (pt_levels == 2)
  41.469              ((uint32_t*)dpage)[i] = pte;
  41.470          else
  41.471 -            ((uint64_t*)dpage)[i] = pte;		       
  41.472 -        
  41.473 -    } 
  41.474 -    
  41.475 -    return; 
  41.476 +            ((uint64_t*)dpage)[i] = pte;
  41.477 +
  41.478 +    }
  41.479 +
  41.480 +    return;
  41.481  }
  41.482  
  41.483  
  41.484  
  41.485 -static unsigned long *xc_map_m2p(int xc_handle, 
  41.486 -                                 unsigned long max_mfn, 
  41.487 -                                 int prot) 
  41.488 -{ 
  41.489 +static unsigned long *xc_map_m2p(int xc_handle,
  41.490 +                                 unsigned long max_mfn,
  41.491 +                                 int prot)
  41.492 +{
  41.493      struct xen_machphys_mfn_list xmml;
  41.494 -    privcmd_mmap_t ioctlx; 
  41.495 -    privcmd_mmap_entry_t *entries; 
  41.496 -    unsigned long m2p_chunks, m2p_size; 
  41.497 -    unsigned long *m2p; 
  41.498 -    int i, rc; 
  41.499 +    privcmd_mmap_t ioctlx;
  41.500 +    privcmd_mmap_entry_t *entries;
  41.501 +    unsigned long m2p_chunks, m2p_size;
  41.502 +    unsigned long *m2p;
  41.503 +    int i, rc;
  41.504  
  41.505 -    m2p_size   = M2P_SIZE(max_mfn); 
  41.506 -    m2p_chunks = M2P_CHUNKS(max_mfn); 
  41.507 +    m2p_size   = M2P_SIZE(max_mfn);
  41.508 +    m2p_chunks = M2P_CHUNKS(max_mfn);
  41.509  
  41.510      xmml.max_extents = m2p_chunks;
  41.511 -    if (!(xmml.extent_start = malloc(m2p_chunks * sizeof(unsigned long)))) { 
  41.512 -        ERR("failed to allocate space for m2p mfns"); 
  41.513 -        return NULL; 
  41.514 -    } 
  41.515 +    if (!(xmml.extent_start = malloc(m2p_chunks * sizeof(unsigned long)))) {
  41.516 +        ERR("failed to allocate space for m2p mfns");
  41.517 +        return NULL;
  41.518 +    }
  41.519  
  41.520      if (xc_memory_op(xc_handle, XENMEM_machphys_mfn_list, &xmml) ||
  41.521          (xmml.nr_extents != m2p_chunks)) {
  41.522 -        ERR("xc_get_m2p_mfns"); 
  41.523 +        ERR("xc_get_m2p_mfns");
  41.524          return NULL;
  41.525      }
  41.526  
  41.527 -    if ((m2p = mmap(NULL, m2p_size, prot, 
  41.528 +    if ((m2p = mmap(NULL, m2p_size, prot,
  41.529                      MAP_SHARED, xc_handle, 0)) == MAP_FAILED) {
  41.530 -        ERR("failed to mmap m2p"); 
  41.531 -        return NULL; 
  41.532 -    } 
  41.533 +        ERR("failed to mmap m2p");
  41.534 +        return NULL;
  41.535 +    }
  41.536  
  41.537 -    if (!(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t)))) { 
  41.538 -        ERR("failed to allocate space for mmap entries"); 
  41.539 -        return NULL; 
  41.540 -    } 
  41.541 +    if (!(entries = malloc(m2p_chunks * sizeof(privcmd_mmap_entry_t)))) {
  41.542 +        ERR("failed to allocate space for mmap entries");
  41.543 +        return NULL;
  41.544 +    }
  41.545  
  41.546      ioctlx.num   = m2p_chunks;
  41.547 -    ioctlx.dom   = DOMID_XEN; 
  41.548 -    ioctlx.entry = entries; 
  41.549 -    
  41.550 -    for (i=0; i < m2p_chunks; i++) { 
  41.551 -        entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE)); 
  41.552 +    ioctlx.dom   = DOMID_XEN;
  41.553 +    ioctlx.entry = entries;
  41.554 +
  41.555 +    for (i=0; i < m2p_chunks; i++) {
  41.556 +        entries[i].va = (unsigned long)(((void *)m2p) + (i * M2P_CHUNK_SIZE));
  41.557          entries[i].mfn = xmml.extent_start[i];
  41.558          entries[i].npages = M2P_CHUNK_SIZE >> PAGE_SHIFT;
  41.559      }
  41.560  
  41.561      if ((rc = ioctl(xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx)) < 0) {
  41.562 -        ERR("ioctl_mmap failed (rc = %d)", rc); 
  41.563 -        return NULL; 
  41.564 +        ERR("ioctl_mmap failed (rc = %d)", rc);
  41.565 +        return NULL;
  41.566      }
  41.567  
  41.568      free(xmml.extent_start);
  41.569 -    free(entries); 
  41.570 +    free(entries);
  41.571  
  41.572 -    return m2p; 
  41.573 +    return m2p;
  41.574  }
  41.575  
  41.576  
  41.577  
  41.578 -int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters, 
  41.579 +int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
  41.580                    uint32_t max_factor, uint32_t flags, int (*suspend)(int))
  41.581  {
  41.582      xc_dominfo_t info;
  41.583  
  41.584      int rc = 1, i, j, last_iter, iter = 0;
  41.585 -    int live  = (flags & XCFLAGS_LIVE); 
  41.586 -    int debug = (flags & XCFLAGS_DEBUG); 
  41.587 +    int live  = (flags & XCFLAGS_LIVE);
  41.588 +    int debug = (flags & XCFLAGS_DEBUG);
  41.589      int sent_last_iter, skip_this_iter;
  41.590  
  41.591      /* The new domain's shared-info frame number. */
  41.592      unsigned long shared_info_frame;
  41.593 -    
  41.594 +
  41.595      /* A copy of the CPU context of the guest. */
  41.596      vcpu_guest_context_t ctxt;
  41.597  
  41.598 @@ -581,7 +581,7 @@ int xc_linux_save(int xc_handle, int io_
  41.599      unsigned long *pfn_batch = NULL;
  41.600  
  41.601      /* A temporary mapping, and a copy, of one frame of guest memory. */
  41.602 -    char page[PAGE_SIZE]; 
  41.603 +    char page[PAGE_SIZE];
  41.604  
  41.605      /* Double and single indirect references to the live P2M table */
  41.606      unsigned long *live_p2m_frame_list_list = NULL;
  41.607 @@ -597,14 +597,14 @@ int xc_linux_save(int xc_handle, int io_
  41.608      unsigned char *region_base = NULL;
  41.609  
  41.610      /* power of 2 order of max_pfn */
  41.611 -    int order_nr; 
  41.612 +    int order_nr;
  41.613  
  41.614      /* bitmap of pages:
  41.615 -       - that should be sent this iteration (unless later marked as skip); 
  41.616 +       - that should be sent this iteration (unless later marked as skip);
  41.617         - to skip this iteration because already dirty;
  41.618         - to fixup by sending at the end if not already resent; */
  41.619      unsigned long *to_send = NULL, *to_skip = NULL, *to_fix = NULL;
  41.620 -    
  41.621 +
  41.622      xc_shadow_control_stats_t stats;
  41.623  
  41.624      unsigned long needed_to_fix = 0;
  41.625 @@ -612,29 +612,29 @@ int xc_linux_save(int xc_handle, int io_
  41.626  
  41.627  
  41.628      /* If no explicit control parameters given, use defaults */
  41.629 -    if(!max_iters) 
  41.630 -        max_iters = DEF_MAX_ITERS; 
  41.631 -    if(!max_factor) 
  41.632 -        max_factor = DEF_MAX_FACTOR; 
  41.633 -    
  41.634 -    initialize_mbit_rate(); 
  41.635 +    if(!max_iters)
  41.636 +        max_iters = DEF_MAX_ITERS;
  41.637 +    if(!max_factor)
  41.638 +        max_factor = DEF_MAX_FACTOR;
  41.639  
  41.640 -    if(!get_platform_info(xc_handle, dom, 
  41.641 +    initialize_mbit_rate();
  41.642 +
  41.643 +    if(!get_platform_info(xc_handle, dom,
  41.644                            &max_mfn, &hvirt_start, &pt_levels)) {
  41.645 -        ERR("Unable to get platform info."); 
  41.646 +        ERR("Unable to get platform info.");
  41.647          return 1;
  41.648      }
  41.649  
  41.650      if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
  41.651          ERR("Could not get domain info");
  41.652 -        return 1; 
  41.653 +        return 1;
  41.654      }
  41.655  
  41.656      if (mlock(&ctxt, sizeof(ctxt))) {
  41.657          ERR("Unable to mlock ctxt");
  41.658          return 1;
  41.659      }
  41.660 -    
  41.661 +
  41.662      /* Only have to worry about vcpu 0 even for SMP */
  41.663      if (xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt)) {
  41.664          ERR("Could not get vcpu context");
  41.665 @@ -648,16 +648,16 @@ int xc_linux_save(int xc_handle, int io_
  41.666          ERR("Domain is not in a valid Linux guest OS state");
  41.667          goto out;
  41.668      }
  41.669 -  
  41.670 +
  41.671     /* cheesy sanity check */
  41.672      if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
  41.673 -        ERR("Invalid state record -- pfn count out of range: %lu", 
  41.674 -            (info.max_memkb >> (PAGE_SHIFT - 10))); 
  41.675 +        ERR("Invalid state record -- pfn count out of range: %lu",
  41.676 +            (info.max_memkb >> (PAGE_SHIFT - 10)));
  41.677          goto out;
  41.678       }
  41.679 - 
  41.680 +
  41.681      /* Map the shared info frame */
  41.682 -    if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 
  41.683 +    if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
  41.684                                              PROT_READ, shared_info_frame))) {
  41.685          ERR("Couldn't map live_shinfo");
  41.686          goto out;
  41.687 @@ -665,8 +665,8 @@ int xc_linux_save(int xc_handle, int io_
  41.688  
  41.689      max_pfn = live_shinfo->arch.max_pfn;
  41.690  
  41.691 -    live_p2m_frame_list_list = 
  41.692 -        xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ, 
  41.693 +    live_p2m_frame_list_list =
  41.694 +        xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
  41.695                               live_shinfo->arch.pfn_to_mfn_frame_list_list);
  41.696  
  41.697      if (!live_p2m_frame_list_list) {
  41.698 @@ -674,24 +674,24 @@ int xc_linux_save(int xc_handle, int io_
  41.699          goto out;
  41.700      }
  41.701  
  41.702 -    live_p2m_frame_list = 
  41.703 +    live_p2m_frame_list =
  41.704          xc_map_foreign_batch(xc_handle, dom, PROT_READ,
  41.705                               live_p2m_frame_list_list,
  41.706 -                             P2M_FLL_ENTRIES); 
  41.707 -    
  41.708 +                             P2M_FLL_ENTRIES);
  41.709 +
  41.710      if (!live_p2m_frame_list) {
  41.711          ERR("Couldn't map p2m_frame_list");
  41.712          goto out;
  41.713      }
  41.714  
  41.715 -    /* Map all the frames of the pfn->mfn table. For migrate to succeed, 
  41.716 -       the guest must not change which frames are used for this purpose. 
  41.717 +    /* Map all the frames of the pfn->mfn table. For migrate to succeed,
  41.718 +       the guest must not change which frames are used for this purpose.
  41.719         (its not clear why it would want to change them, and we'll be OK
  41.720         from a safety POV anyhow. */
  41.721  
  41.722      live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_READ,
  41.723                                      live_p2m_frame_list,
  41.724 -                                    P2M_FL_ENTRIES); 
  41.725 +                                    P2M_FL_ENTRIES);
  41.726  
  41.727      if (!live_p2m) {
  41.728          ERR("Couldn't map p2m table");
  41.729 @@ -699,25 +699,25 @@ int xc_linux_save(int xc_handle, int io_
  41.730      }
  41.731  
  41.732      /* Setup the mfn_to_pfn table mapping */
  41.733 -    if(!(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ))) { 
  41.734 -        ERR("Failed to map live M2P table"); 
  41.735 -        goto out; 
  41.736 -    } 
  41.737 +    if(!(live_m2p = xc_map_m2p(xc_handle, max_mfn, PROT_READ))) {
  41.738 +        ERR("Failed to map live M2P table");
  41.739 +        goto out;
  41.740 +    }
  41.741  
  41.742 -    
  41.743 +
  41.744      /* Get a local copy of the live_P2M_frame_list */
  41.745 -    if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) { 
  41.746 +    if(!(p2m_frame_list = malloc(P2M_FL_SIZE))) {
  41.747          ERR("Couldn't allocate p2m_frame_list array");
  41.748          goto out;
  41.749      }
  41.750 -    memcpy(p2m_frame_list, live_p2m_frame_list, P2M_FL_SIZE); 
  41.751 +    memcpy(p2m_frame_list, live_p2m_frame_list, P2M_FL_SIZE);
  41.752  
  41.753      /* Canonicalise the pfn-to-mfn table frame-number list. */
  41.754      for (i = 0; i < max_pfn; i += ulpp) {
  41.755 -        if (!translate_mfn_to_pfn(&p2m_frame_list[i/ulpp])) { 
  41.756 +        if (!translate_mfn_to_pfn(&p2m_frame_list[i/ulpp])) {
  41.757              ERR("Frame# in pfn-to-mfn frame list is not in pseudophys");
  41.758 -            ERR("entry %d: p2m_frame_list[%ld] is 0x%lx", i, i/ulpp, 
  41.759 -                p2m_frame_list[i/ulpp]); 
  41.760 +            ERR("entry %d: p2m_frame_list[%ld] is 0x%lx", i, i/ulpp,
  41.761 +                p2m_frame_list[i/ulpp]);
  41.762              goto out;
  41.763          }
  41.764      }
  41.765 @@ -725,31 +725,31 @@ int xc_linux_save(int xc_handle, int io_
  41.766      /* Domain is still running at this point */
  41.767      if (live) {
  41.768  
  41.769 -        if (xc_shadow_control(xc_handle, dom, 
  41.770 +        if (xc_shadow_control(xc_handle, dom,
  41.771                                DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
  41.772 -                              NULL, 0, NULL ) < 0) { 
  41.773 +                              NULL, 0, NULL ) < 0) {
  41.774              ERR("Couldn't enable shadow mode");
  41.775              goto out;
  41.776          }
  41.777 -        
  41.778 +
  41.779          last_iter = 0;
  41.780 -        
  41.781 +
  41.782      } else {
  41.783 -        
  41.784 +
  41.785          /* This is a non-live suspend. Issue the call back to get the
  41.786             domain suspended */
  41.787 -        
  41.788 +
  41.789          last_iter = 1;
  41.790 -        
  41.791 +
  41.792          if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt)) {
  41.793              ERR("Domain appears not to have suspended");
  41.794              goto out;
  41.795          }
  41.796 -        
  41.797 +
  41.798      }
  41.799  
  41.800      /* pretend we sent all the pages last iteration */
  41.801 -    sent_last_iter = max_pfn; 
  41.802 +    sent_last_iter = max_pfn;
  41.803  
  41.804  
  41.805      /* calculate the power of 2 order of max_pfn, e.g.
  41.806 @@ -758,15 +758,15 @@ int xc_linux_save(int xc_handle, int io_
  41.807          continue;
  41.808  
  41.809      /* Setup to_send / to_fix and to_skip bitmaps */
  41.810 -    to_send = malloc(BITMAP_SIZE); 
  41.811 -    to_fix  = calloc(1, BITMAP_SIZE); 
  41.812 -    to_skip = malloc(BITMAP_SIZE); 
  41.813 -    
  41.814 +    to_send = malloc(BITMAP_SIZE);
  41.815 +    to_fix  = calloc(1, BITMAP_SIZE);
  41.816 +    to_skip = malloc(BITMAP_SIZE);
  41.817 +
  41.818      if (!to_send || !to_fix || !to_skip) {
  41.819          ERR("Couldn't allocate to_send array");
  41.820          goto out;
  41.821      }
  41.822 -    
  41.823 +
  41.824      memset(to_send, 0xff, BITMAP_SIZE);
  41.825  
  41.826      if (mlock(to_send, BITMAP_SIZE)) {
  41.827 @@ -779,7 +779,7 @@ int xc_linux_save(int xc_handle, int io_
  41.828          ERR("Unable to mlock to_skip");
  41.829          return 1;
  41.830      }
  41.831 -        
  41.832 +
  41.833      analysis_phase(xc_handle, dom, max_pfn, to_skip, 0);
  41.834  
  41.835      /* We want zeroed memory so use calloc rather than malloc. */
  41.836 @@ -787,7 +787,7 @@ int xc_linux_save(int xc_handle, int io_
  41.837      pfn_batch = calloc(MAX_BATCH_SIZE, sizeof(unsigned long));
  41.838  
  41.839      if ((pfn_type == NULL) || (pfn_batch == NULL)) {
  41.840 -        ERR("failed to alloc memory for pfn_type and/or pfn_batch arrays"); 
  41.841 +        ERR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
  41.842          errno = ENOMEM;
  41.843          goto out;
  41.844      }
  41.845 @@ -803,12 +803,12 @@ int xc_linux_save(int xc_handle, int io_
  41.846       */
  41.847      {
  41.848          int err=0;
  41.849 -        unsigned long mfn; 
  41.850 +        unsigned long mfn;
  41.851          for (i = 0; i < max_pfn; i++) {
  41.852  
  41.853              mfn = live_p2m[i];
  41.854 -            if((mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i)) { 
  41.855 -                DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i, 
  41.856 +            if((mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i)) {
  41.857 +                DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i,
  41.858                          mfn, mfn_to_pfn(mfn));
  41.859                  err++;
  41.860              }
  41.861 @@ -819,16 +819,16 @@ int xc_linux_save(int xc_handle, int io_
  41.862  
  41.863      /* Start writing out the saved-domain record. */
  41.864  
  41.865 -    if(!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) { 
  41.866 +    if(!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) {
  41.867          ERR("write: max_pfn");
  41.868          goto out;
  41.869      }
  41.870  
  41.871 -    if(!write_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) { 
  41.872 +    if(!write_exact(io_fd, p2m_frame_list, P2M_FL_SIZE)) {
  41.873          ERR("write: p2m_frame_list");
  41.874          goto out;
  41.875      }
  41.876 -    
  41.877 +
  41.878      print_stats(xc_handle, dom, 0, &stats, 0);
  41.879  
  41.880      /* Now write out each data page, canonicalising page tables as we go... */
  41.881 @@ -853,8 +853,8 @@ int xc_linux_save(int xc_handle, int io_
  41.882                  DPRINTF("\b\b\b\b%3d%%", this_pc);
  41.883                  prev_pc = this_pc;
  41.884              }
  41.885 -            
  41.886 -            /* slightly wasteful to peek the whole array evey time, 
  41.887 +
  41.888 +            /* slightly wasteful to peek the whole array evey time,
  41.889                 but this is fast enough for the moment. */
  41.890              if (!last_iter && xc_shadow_control(
  41.891                      xc_handle, dom, DOM0_SHADOW_CONTROL_OP_PEEK,
  41.892 @@ -862,7 +862,7 @@ int xc_linux_save(int xc_handle, int io_
  41.893                  ERR("Error peeking shadow bitmap");
  41.894                  goto out;
  41.895              }
  41.896 -     
  41.897 +
  41.898  
  41.899              /* load pfn_type[] with the mfn of all the pages we're doing in
  41.900                 this batch. */
  41.901 @@ -873,11 +873,11 @@ int xc_linux_save(int xc_handle, int io_
  41.902                  if (debug) {
  41.903                      DPRINTF("%d pfn= %08lx mfn= %08lx %d  [mfn]= %08lx\n",
  41.904                              iter, (unsigned long)n, live_p2m[n],
  41.905 -                            test_bit(n, to_send), 
  41.906 +                            test_bit(n, to_send),
  41.907                              mfn_to_pfn(live_p2m[n]&0xFFFFF));
  41.908                  }
  41.909 -                
  41.910 -                if (!last_iter && test_bit(n, to_send)&& test_bit(n, to_skip)) 
  41.911 +
  41.912 +                if (!last_iter && test_bit(n, to_send)&& test_bit(n, to_skip))
  41.913                      skip_this_iter++; /* stats keeping */
  41.914  
  41.915                  if (!((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
  41.916 @@ -885,13 +885,13 @@ int xc_linux_save(int xc_handle, int io_
  41.917                        (test_bit(n, to_fix)  && last_iter)))
  41.918                      continue;
  41.919  
  41.920 -                /* 
  41.921 +                /*
  41.922                  ** we get here if:
  41.923                  **  1. page is marked to_send & hasn't already been re-dirtied
  41.924                  **  2. (ignore to_skip in last iteration)
  41.925                  **  3. add in pages that still need fixup (net bufs)
  41.926                  */
  41.927 -  
  41.928 +
  41.929                  pfn_batch[batch] = n;
  41.930                  pfn_type[batch]  = live_p2m[n];
  41.931  
  41.932 @@ -914,80 +914,80 @@ int xc_linux_save(int xc_handle, int io_
  41.933                              iter,n,pfn_type[batch]);
  41.934                  }
  41.935  
  41.936 -                clear_bit(n, to_fix); 
  41.937 -                
  41.938 +                clear_bit(n, to_fix);
  41.939 +
  41.940                  batch++;
  41.941              }
  41.942 -     
  41.943 +
  41.944              if (batch == 0)
  41.945                  goto skip; /* vanishingly unlikely... */
  41.946 -      
  41.947 +
  41.948              if ((region_base = xc_map_foreign_batch(
  41.949 -                     xc_handle, dom, PROT_READ, pfn_type, batch)) == 0) { 
  41.950 +                     xc_handle, dom, PROT_READ, pfn_type, batch)) == 0) {
  41.951                  ERR("map batch failed");
  41.952                  goto out;
  41.953              }
  41.954 -     
  41.955 +
  41.956              if (xc_get_pfn_type_batch(xc_handle, dom, batch, pfn_type)) {
  41.957                  ERR("get_pfn_type_batch failed");
  41.958                  goto out;
  41.959              }
  41.960 -     
  41.961 +
  41.962              for (j = 0; j < batch; j++) {
  41.963  
  41.964                  if ((pfn_type[j] & LTAB_MASK) == XTAB) {
  41.965                      DPRINTF("type fail: page %i mfn %08lx\n", j, pfn_type[j]);
  41.966                      continue;
  41.967                  }
  41.968 -  
  41.969 -                if (debug) 
  41.970 +
  41.971 +                if (debug)
  41.972                      fprintf(stderr, "%d pfn= %08lx mfn= %08lx [mfn]= %08lx"
  41.973                              " sum= %08lx\n",
  41.974 -                            iter, 
  41.975 +                            iter,
  41.976                              (pfn_type[j] & LTAB_MASK) | pfn_batch[j],
  41.977                              pfn_type[j],
  41.978                              mfn_to_pfn(pfn_type[j]&(~LTAB_MASK)),
  41.979                              csum_page(region_base + (PAGE_SIZE*j)));
  41.980 -                
  41.981 +
  41.982                  /* canonicalise mfn->pfn */
  41.983                  pfn_type[j] = (pfn_type[j] & LTAB_MASK) | pfn_batch[j];
  41.984              }
  41.985  
  41.986 -            if(!write_exact(io_fd, &batch, sizeof(unsigned int))) { 
  41.987 +            if(!write_exact(io_fd, &batch, sizeof(unsigned int))) {
  41.988                  ERR("Error when writing to state file (2)");
  41.989                  goto out;
  41.990              }
  41.991  
  41.992 -            if(!write_exact(io_fd, pfn_type, sizeof(unsigned long)*j)) { 
  41.993 +            if(!write_exact(io_fd, pfn_type, sizeof(unsigned long)*j)) {
  41.994                  ERR("Error when writing to state file (3)");
  41.995                  goto out;
  41.996              }
  41.997 -            
  41.998 +
  41.999              /* entering this loop, pfn_type is now in pfns (Not mfns) */
 41.1000              for (j = 0; j < batch; j++) {
 41.1001 -                
 41.1002 -                unsigned long pfn      = pfn_type[j] & ~LTAB_MASK; 
 41.1003 -                unsigned long pagetype = pfn_type[j] & LTAB_MASK; 
 41.1004 -                void *spage            = (void *) region_base + (PAGE_SIZE*j); 
 41.1005 +
 41.1006 +                unsigned long pfn      = pfn_type[j] & ~LTAB_MASK;
 41.1007 +                unsigned long pagetype = pfn_type[j] & LTAB_MASK;
 41.1008 +                void *spage            = (void *) region_base + (PAGE_SIZE*j);
 41.1009  
 41.1010  
 41.1011                  /* write out pages in batch */
 41.1012                  if (pagetype == XTAB)
 41.1013                      continue;
 41.1014  
 41.1015 -                pagetype &= LTABTYPE_MASK; 
 41.1016 -                
 41.1017 +                pagetype &= LTABTYPE_MASK;
 41.1018 +
 41.1019                  if (pagetype >= L1TAB && pagetype <= L4TAB) {
 41.1020 -                    
 41.1021 +
 41.1022                      /* We have a pagetable page: need to rewrite it. */
 41.1023 -                    canonicalize_pagetable(pagetype, pfn, spage, page); 
 41.1024 -                    
 41.1025 +                    canonicalize_pagetable(pagetype, pfn, spage, page);
 41.1026 +
 41.1027                      if (ratewrite(io_fd, page, PAGE_SIZE) != PAGE_SIZE) {
 41.1028                          ERR("Error when writing to state file (4)");
 41.1029                          goto out;
 41.1030                      }
 41.1031 -                    
 41.1032 -                }  else {  
 41.1033 +
 41.1034 +                }  else {
 41.1035  
 41.1036                      /* We have a normal page: just write it directly. */
 41.1037                      if (ratewrite(io_fd, spage, PAGE_SIZE) != PAGE_SIZE) {
 41.1038 @@ -996,36 +996,36 @@ int xc_linux_save(int xc_handle, int io_
 41.1039                      }
 41.1040                  }
 41.1041              } /* end of the write out for this batch */
 41.1042 -            
 41.1043 +
 41.1044              sent_this_iter += batch;
 41.1045  
 41.1046              munmap(region_base, batch*PAGE_SIZE);
 41.1047 -        
 41.1048 +
 41.1049          } /* end of this while loop for this iteration */
 41.1050 -        
 41.1051 -      skip: 
 41.1052 -        
 41.1053 +
 41.1054 +      skip:
 41.1055 +
 41.1056          total_sent += sent_this_iter;
 41.1057  
 41.1058 -        DPRINTF("\r %d: sent %d, skipped %d, ", 
 41.1059 +        DPRINTF("\r %d: sent %d, skipped %d, ",
 41.1060                  iter, sent_this_iter, skip_this_iter );
 41.1061  
 41.1062          if (last_iter) {
 41.1063              print_stats( xc_handle, dom, sent_this_iter, &stats, 1);
 41.1064  
 41.1065 -            DPRINTF("Total pages sent= %ld (%.2fx)\n", 
 41.1066 +            DPRINTF("Total pages sent= %ld (%.2fx)\n",
 41.1067                      total_sent, ((float)total_sent)/max_pfn );
 41.1068              DPRINTF("(of which %ld were fixups)\n", needed_to_fix  );
 41.1069 -        }       
 41.1070 +        }
 41.1071  
 41.1072          if (last_iter && debug){
 41.1073              int minusone = -1;
 41.1074 -            memset(to_send, 0xff, BITMAP_SIZE); 
 41.1075 +            memset(to_send, 0xff, BITMAP_SIZE);
 41.1076              debug = 0;
 41.1077              fprintf(stderr, "Entering debug resend-all mode\n");
 41.1078 -    
 41.1079 +
 41.1080              /* send "-1" to put receiver into debug mode */
 41.1081 -            if(!write_exact(io_fd, &minusone, sizeof(int))) { 
 41.1082 +            if(!write_exact(io_fd, &minusone, sizeof(int))) {
 41.1083                  ERR("Error when writing to state file (6)");
 41.1084                  goto out;
 41.1085              }
 41.1086 @@ -1033,34 +1033,34 @@ int xc_linux_save(int xc_handle, int io_
 41.1087              continue;
 41.1088          }
 41.1089  
 41.1090 -        if (last_iter) break; 
 41.1091 +        if (last_iter) break;
 41.1092  
 41.1093          if (live) {
 41.1094  
 41.1095  
 41.1096 -            if( 
 41.1097 +            if(
 41.1098                  ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
 41.1099                  (iter >= max_iters) ||
 41.1100                  (sent_this_iter+skip_this_iter < 50) ||
 41.1101 -                (total_sent > max_pfn*max_factor) ) { 
 41.1102 +                (total_sent > max_pfn*max_factor) ) {
 41.1103  
 41.1104                  DPRINTF("Start last iteration\n");
 41.1105                  last_iter = 1;
 41.1106 -                
 41.1107 +
 41.1108                  if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
 41.1109                                        &ctxt)) {
 41.1110                      ERR("Domain appears not to have suspended");
 41.1111                      goto out;
 41.1112                  }
 41.1113 -                
 41.1114 -                DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n", 
 41.1115 -                        info.shared_info_frame, 
 41.1116 -                        (unsigned long)ctxt.user_regs.eip, 
 41.1117 +
 41.1118 +                DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n",
 41.1119 +                        info.shared_info_frame,
 41.1120 +                        (unsigned long)ctxt.user_regs.eip,
 41.1121                          (unsigned long)ctxt.user_regs.edx);
 41.1122 -            } 
 41.1123 -            
 41.1124 +            }
 41.1125 +
 41.1126              if (xc_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_CLEAN,
 41.1127 -                                  to_send, max_pfn, &stats ) != max_pfn) {  
 41.1128 +                                  to_send, max_pfn, &stats ) != max_pfn) {
 41.1129                  ERR("Error flushing shadow PT");
 41.1130                  goto out;
 41.1131              }
 41.1132 @@ -1068,7 +1068,7 @@ int xc_linux_save(int xc_handle, int io_
 41.1133              sent_last_iter = sent_this_iter;
 41.1134  
 41.1135              print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
 41.1136 -     
 41.1137 +
 41.1138          }
 41.1139  
 41.1140  
 41.1141 @@ -1077,8 +1077,8 @@ int xc_linux_save(int xc_handle, int io_
 41.1142      DPRINTF("All memory is saved\n");
 41.1143  
 41.1144      /* Zero terminate */
 41.1145 -    i = 0; 
 41.1146 -    if (!write_exact(io_fd, &i, sizeof(int))) { 
 41.1147 +    i = 0;
 41.1148 +    if (!write_exact(io_fd, &i, sizeof(int))) {
 41.1149          ERR("Error when writing to state file (6)");
 41.1150          goto out;
 41.1151      }
 41.1152 @@ -1086,18 +1086,18 @@ int xc_linux_save(int xc_handle, int io_
 41.1153      /* Send through a list of all the PFNs that were not in map at the close */
 41.1154      {
 41.1155          unsigned int i,j;
 41.1156 -        unsigned long pfntab[1024]; 
 41.1157 +        unsigned long pfntab[1024];
 41.1158  
 41.1159          for (i = 0, j = 0; i < max_pfn; i++) {
 41.1160              if (!is_mapped(live_p2m[i]))
 41.1161                  j++;
 41.1162          }
 41.1163 -        
 41.1164 -        if(!write_exact(io_fd, &j, sizeof(unsigned int))) { 
 41.1165 +
 41.1166 +        if(!write_exact(io_fd, &j, sizeof(unsigned int))) {
 41.1167              ERR("Error when writing to state file (6a)");
 41.1168              goto out;
 41.1169 -        }	
 41.1170 -        
 41.1171 +        }
 41.1172 +
 41.1173          for (i = 0, j = 0; i < max_pfn; ) {
 41.1174  
 41.1175              if (!is_mapped(live_p2m[i]))
 41.1176 @@ -1105,16 +1105,16 @@ int xc_linux_save(int xc_handle, int io_
 41.1177  
 41.1178              i++;
 41.1179              if (j == 1024 || i == max_pfn) {
 41.1180 -                if(!write_exact(io_fd, &pfntab, sizeof(unsigned long)*j)) { 
 41.1181 +                if(!write_exact(io_fd, &pfntab, sizeof(unsigned long)*j)) {
 41.1182                      ERR("Error when writing to state file (6b)");
 41.1183                      goto out;
 41.1184 -                } 
 41.1185 +                }
 41.1186                  j = 0;
 41.1187              }
 41.1188          }
 41.1189  
 41.1190      }
 41.1191 -    
 41.1192 +
 41.1193      /* Canonicalise the suspend-record frame number. */
 41.1194      if ( !translate_mfn_to_pfn(&ctxt.user_regs.edx) ){
 41.1195          ERR("Suspend record is not in range of pseudophys map");
 41.1196 @@ -1138,7 +1138,7 @@ int xc_linux_save(int xc_handle, int io_
 41.1197          PAGE_SHIFT;
 41.1198  
 41.1199      if (!write_exact(io_fd, &ctxt, sizeof(ctxt)) ||
 41.1200 -        !write_exact(io_fd, live_shinfo, PAGE_SIZE)) { 
 41.1201 +        !write_exact(io_fd, live_shinfo, PAGE_SIZE)) {
 41.1202          ERR("Error when writing to state file (1)");
 41.1203          goto out;
 41.1204      }
 41.1205 @@ -1149,26 +1149,26 @@ int xc_linux_save(int xc_handle, int io_
 41.1206   out:
 41.1207  
 41.1208      if (live) {
 41.1209 -        if(xc_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_OFF, 
 41.1210 -                             NULL, 0, NULL ) < 0) { 
 41.1211 +        if(xc_shadow_control(xc_handle, dom, DOM0_SHADOW_CONTROL_OP_OFF,
 41.1212 +                             NULL, 0, NULL ) < 0) {
 41.1213              DPRINTF("Warning - couldn't disable shadow mode");
 41.1214          }
 41.1215      }
 41.1216 -    
 41.1217 +
 41.1218      if (live_shinfo)
 41.1219          munmap(live_shinfo, PAGE_SIZE);
 41.1220 -    
 41.1221 -    if (live_p2m_frame_list_list) 
 41.1222 -        munmap(live_p2m_frame_list_list, PAGE_SIZE); 
 41.1223  
 41.1224 -    if (live_p2m_frame_list) 
 41.1225 -        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE); 
 41.1226 +    if (live_p2m_frame_list_list)
 41.1227 +        munmap(live_p2m_frame_list_list, PAGE_SIZE);
 41.1228  
 41.1229 -    if(live_p2m) 
 41.1230 -        munmap(live_p2m, P2M_SIZE); 
 41.1231 +    if (live_p2m_frame_list)
 41.1232 +        munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
 41.1233  
 41.1234 -    if(live_m2p) 
 41.1235 -        munmap(live_m2p, M2P_SIZE(max_mfn)); 
 41.1236 +    if(live_p2m)
 41.1237 +        munmap(live_p2m, P2M_SIZE);
 41.1238 +
 41.1239 +    if(live_m2p)
 41.1240 +        munmap(live_m2p, M2P_SIZE(max_mfn));
 41.1241  
 41.1242      free(pfn_type);
 41.1243      free(pfn_batch);
    42.1 --- a/tools/libxc/xc_load_aout9.c	Sat Apr 15 19:25:09 2006 +0100
    42.2 +++ b/tools/libxc/xc_load_aout9.c	Sat Apr 15 19:25:21 2006 +0100
    42.3 @@ -22,7 +22,7 @@ static void copyout(int, uint32_t, unsig
    42.4  struct Exec *get_header(const char *, unsigned long, struct Exec *);
    42.5  
    42.6  
    42.7 -int 
    42.8 +int
    42.9  probe_aout9(
   42.10      const char *image,
   42.11      unsigned long image_size,
   42.12 @@ -40,7 +40,7 @@ probe_aout9(
   42.13      return 0;
   42.14  }
   42.15  
   42.16 -static int 
   42.17 +static int
   42.18  parseaout9image(
   42.19      const char *image,
   42.20      unsigned long image_size,
   42.21 @@ -74,7 +74,7 @@ parseaout9image(
   42.22      return 0;
   42.23  }
   42.24  
   42.25 -static int 
   42.26 +static int
   42.27  loadaout9image(
   42.28      const char *image,
   42.29      unsigned long image_size,
   42.30 @@ -123,7 +123,7 @@ copyout(
   42.31          if(chunksz > PAGE_SIZE - pgoff)
   42.32              chunksz = PAGE_SIZE - pgoff;
   42.33  
   42.34 -        pg = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_WRITE, 
   42.35 +        pg = xc_map_foreign_range(xch, dom, PAGE_SIZE, PROT_WRITE,
   42.36                                    parray[off>>PAGE_SHIFT]);
   42.37          memcpy(pg + pgoff, buf, chunksz);
   42.38          munmap(pg, PAGE_SIZE);
    43.1 --- a/tools/libxc/xc_load_bin.c	Sat Apr 15 19:25:09 2006 +0100
    43.2 +++ b/tools/libxc/xc_load_bin.c	Sat Apr 15 19:25:21 2006 +0100
    43.3 @@ -161,7 +161,7 @@ findtable(const char *image, unsigned lo
    43.4      return NULL;
    43.5  }
    43.6  
    43.7 -static int parsebinimage(const char *image, 
    43.8 +static int parsebinimage(const char *image,
    43.9                           unsigned long image_size,
   43.10                           struct domain_setup_info *dsi)
   43.11  {
    44.1 --- a/tools/libxc/xc_load_elf.c	Sat Apr 15 19:25:09 2006 +0100
    44.2 +++ b/tools/libxc/xc_load_elf.c	Sat Apr 15 19:25:21 2006 +0100
    44.3 @@ -51,7 +51,7 @@ static inline int is_loadable_phdr(Elf_P
    44.4              ((phdr->p_flags & (PF_W|PF_X)) != 0));
    44.5  }
    44.6  
    44.7 -static int parseelfimage(const char *image, 
    44.8 +static int parseelfimage(const char *image,
    44.9                           unsigned long elfsize,
   44.10                           struct domain_setup_info *dsi)
   44.11  {
   44.12 @@ -102,10 +102,10 @@ static int parseelfimage(const char *ima
   44.13          ERROR("ELF image has no section-header strings table (shstrtab).");
   44.14          return -EINVAL;
   44.15      }
   44.16 -    shdr = (Elf_Shdr *)(image + ehdr->e_shoff + 
   44.17 +    shdr = (Elf_Shdr *)(image + ehdr->e_shoff +
   44.18                          (ehdr->e_shstrndx*ehdr->e_shentsize));
   44.19      shstrtab = image + shdr->sh_offset;
   44.20 -    
   44.21 +
   44.22      /* Find the special '__xen_guest' section and check its contents. */
   44.23      for ( h = 0; h < ehdr->e_shnum; h++ )
   44.24      {
   44.25 @@ -148,7 +148,7 @@ static int parseelfimage(const char *ima
   44.26  
   44.27      dsi->xen_guest_string = guestinfo;
   44.28  
   44.29 -    for ( h = 0; h < ehdr->e_phnum; h++ ) 
   44.30 +    for ( h = 0; h < ehdr->e_phnum; h++ )
   44.31      {
   44.32          phdr = (Elf_Phdr *)(image + ehdr->e_phoff + (h*ehdr->e_phentsize));
   44.33          if ( !is_loadable_phdr(phdr) )
   44.34 @@ -159,8 +159,8 @@ static int parseelfimage(const char *ima
   44.35              kernend = phdr->p_paddr + phdr->p_memsz;
   44.36      }
   44.37  
   44.38 -    if ( (kernstart > kernend) || 
   44.39 -         (ehdr->e_entry < kernstart) || 
   44.40 +    if ( (kernstart > kernend) ||
   44.41 +         (ehdr->e_entry < kernstart) ||
   44.42           (ehdr->e_entry > kernend) )
   44.43      {
   44.44          ERROR("Malformed ELF image.");
   44.45 @@ -196,12 +196,12 @@ loadelfimage(
   44.46      char         *va;
   44.47      unsigned long pa, done, chunksz;
   44.48  
   44.49 -    for ( h = 0; h < ehdr->e_phnum; h++ ) 
   44.50 +    for ( h = 0; h < ehdr->e_phnum; h++ )
   44.51      {
   44.52          phdr = (Elf_Phdr *)(image + ehdr->e_phoff + (h*ehdr->e_phentsize));
   44.53          if ( !is_loadable_phdr(phdr) )
   44.54              continue;
   44.55 -        
   44.56 +
   44.57          for ( done = 0; done < phdr->p_filesz; done += chunksz )
   44.58          {
   44.59              pa = (phdr->p_paddr + done) - dsi->v_start;
   44.60 @@ -265,7 +265,7 @@ loadelfsymtab(
   44.61      shdr = (Elf_Shdr *)(p + sizeof(int) + sizeof(Elf_Ehdr));
   44.62      memcpy(shdr, image + ehdr->e_shoff, ehdr->e_shnum * sizeof(Elf_Shdr));
   44.63  
   44.64 -    for ( h = 0; h < ehdr->e_shnum; h++ ) 
   44.65 +    for ( h = 0; h < ehdr->e_shnum; h++ )
   44.66      {
   44.67          if ( shdr[h].sh_type == SHT_STRTAB )
   44.68          {
    45.1 --- a/tools/libxc/xc_misc.c	Sat Apr 15 19:25:09 2006 +0100
    45.2 +++ b/tools/libxc/xc_misc.c	Sat Apr 15 19:25:21 2006 +0100
    45.3 @@ -1,6 +1,6 @@
    45.4  /******************************************************************************
    45.5   * xc_misc.c
    45.6 - * 
    45.7 + *
    45.8   * Miscellaneous control interface functions.
    45.9   */
   45.10  
   45.11 @@ -21,7 +21,7 @@ int xc_interface_close(int xc_handle)
   45.12  
   45.13  int xc_readconsolering(int xc_handle,
   45.14                         char **pbuffer,
   45.15 -                       unsigned int *pnr_chars, 
   45.16 +                       unsigned int *pnr_chars,
   45.17                         int clear)
   45.18  {
   45.19      int ret;
   45.20 @@ -46,14 +46,14 @@ int xc_readconsolering(int xc_handle,
   45.21      safe_munlock(buffer, nr_chars);
   45.22  
   45.23      return ret;
   45.24 -}    
   45.25 +}
   45.26  
   45.27  int xc_physinfo(int xc_handle,
   45.28                  xc_physinfo_t *put_info)
   45.29  {
   45.30      int ret;
   45.31      DECLARE_DOM0_OP;
   45.32 -    
   45.33 +
   45.34      op.cmd = DOM0_PHYSINFO;
   45.35      op.interface_version = DOM0_INTERFACE_VERSION;
   45.36  
   45.37 @@ -70,15 +70,15 @@ int xc_sched_id(int xc_handle,
   45.38  {
   45.39      int ret;
   45.40      DECLARE_DOM0_OP;
   45.41 -    
   45.42 +
   45.43      op.cmd = DOM0_SCHED_ID;
   45.44      op.interface_version = DOM0_INTERFACE_VERSION;
   45.45 -    
   45.46 +
   45.47      if ( (ret = do_dom0_op(xc_handle, &op)) != 0 )
   45.48          return ret;
   45.49 -    
   45.50 +
   45.51      *sched_id = op.u.sched_id.sched_id;
   45.52 -    
   45.53 +
   45.54      return 0;
   45.55  }
   45.56  
   45.57 @@ -100,9 +100,9 @@ int xc_perfc_control(int xc_handle,
   45.58  
   45.59  long long xc_msr_read(int xc_handle, int cpu_mask, int msr)
   45.60  {
   45.61 -    int rc;    
   45.62 +    int rc;
   45.63      DECLARE_DOM0_OP;
   45.64 -    
   45.65 +
   45.66      op.cmd = DOM0_MSR;
   45.67      op.u.msr.write = 0;
   45.68      op.u.msr.msr = msr;
   45.69 @@ -116,9 +116,9 @@ long long xc_msr_read(int xc_handle, int
   45.70  int xc_msr_write(int xc_handle, int cpu_mask, int msr, unsigned int low,
   45.71                    unsigned int high)
   45.72  {
   45.73 -    int rc;    
   45.74 +    int rc;
   45.75      DECLARE_DOM0_OP;
   45.76 -    
   45.77 +
   45.78      op.cmd = DOM0_MSR;
   45.79      op.u.msr.write = 1;
   45.80      op.u.msr.msr = msr;
   45.81 @@ -127,7 +127,7 @@ int xc_msr_write(int xc_handle, int cpu_
   45.82      op.u.msr.in2 = high;
   45.83  
   45.84      rc = do_dom0_op(xc_handle, &op);
   45.85 -    
   45.86 +
   45.87      return rc;
   45.88  }
   45.89  
    46.1 --- a/tools/libxc/xc_physdev.c	Sat Apr 15 19:25:09 2006 +0100
    46.2 +++ b/tools/libxc/xc_physdev.c	Sat Apr 15 19:25:21 2006 +0100
    46.3 @@ -1,8 +1,8 @@
    46.4  /******************************************************************************
    46.5   * xc_physdev.c
    46.6 - * 
    46.7 + *
    46.8   * API for manipulating physical-device access permissions.
    46.9 - * 
   46.10 + *
   46.11   * Copyright (c) 2004, Rolf Neugebauer (Intel Research Cambridge)
   46.12   * Copyright (c) 2004, K A Fraser (University of Cambridge)
   46.13   */
    47.1 --- a/tools/libxc/xc_private.c	Sat Apr 15 19:25:09 2006 +0100
    47.2 +++ b/tools/libxc/xc_private.c	Sat Apr 15 19:25:21 2006 +0100
    47.3 @@ -1,6 +1,6 @@
    47.4  /******************************************************************************
    47.5   * xc_private.c
    47.6 - * 
    47.7 + *
    47.8   * Helper functions for the rest of the library.
    47.9   */
   47.10  
   47.11 @@ -10,7 +10,7 @@
   47.12  void *xc_map_foreign_batch(int xc_handle, uint32_t dom, int prot,
   47.13                             unsigned long *arr, int num )
   47.14  {
   47.15 -    privcmd_mmapbatch_t ioctlx; 
   47.16 +    privcmd_mmapbatch_t ioctlx;
   47.17      void *addr;
   47.18      addr = mmap(NULL, num*PAGE_SIZE, prot, MAP_SHARED, xc_handle, 0);
   47.19      if ( addr == MAP_FAILED )
   47.20 @@ -38,8 +38,8 @@ void *xc_map_foreign_range(int xc_handle
   47.21                             int size, int prot,
   47.22                             unsigned long mfn )
   47.23  {
   47.24 -    privcmd_mmap_t ioctlx; 
   47.25 -    privcmd_mmap_entry_t entry; 
   47.26 +    privcmd_mmap_t ioctlx;
   47.27 +    privcmd_mmap_entry_t entry;
   47.28      void *addr;
   47.29      addr = mmap(NULL, size, prot, MAP_SHARED, xc_handle, 0);
   47.30      if ( addr == MAP_FAILED )
   47.31 @@ -64,7 +64,7 @@ void *xc_map_foreign_range(int xc_handle
   47.32  /*******************/
   47.33  
   47.34  /* NB: arr must be mlock'ed */
   47.35 -int xc_get_pfn_type_batch(int xc_handle, 
   47.36 +int xc_get_pfn_type_batch(int xc_handle,
   47.37                            uint32_t dom, int num, unsigned long *arr)
   47.38  {
   47.39      DECLARE_DOM0_OP;
   47.40 @@ -76,8 +76,8 @@ int xc_get_pfn_type_batch(int xc_handle,
   47.41  }
   47.42  
   47.43  #define GETPFN_ERR (~0U)
   47.44 -unsigned int get_pfn_type(int xc_handle, 
   47.45 -                          unsigned long mfn, 
   47.46 +unsigned int get_pfn_type(int xc_handle,
   47.47 +                          unsigned long mfn,
   47.48                            uint32_t dom)
   47.49  {
   47.50      DECLARE_DOM0_OP;
   47.51 @@ -119,7 +119,7 @@ int xc_mmuext_op(
   47.52  
   47.53   out1:
   47.54      return ret;
   47.55 -}    
   47.56 +}
   47.57  
   47.58  static int flush_mmu_updates(int xc_handle, xc_mmu_t *mmu)
   47.59  {
   47.60 @@ -166,7 +166,7 @@ xc_mmu_t *xc_init_mmu_updates(int xc_han
   47.61      return mmu;
   47.62  }
   47.63  
   47.64 -int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 
   47.65 +int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
   47.66                        unsigned long long ptr, unsigned long long val)
   47.67  {
   47.68      mmu->updates[mmu->idx].ptr = ptr;
   47.69 @@ -288,7 +288,7 @@ int xc_memory_op(int xc_handle,
   47.70  
   47.71   out1:
   47.72      return ret;
   47.73 -}    
   47.74 +}
   47.75  
   47.76  
   47.77  long long xc_domain_get_cpu_usage( int xc_handle, domid_t domid, int vcpu )
   47.78 @@ -308,8 +308,8 @@ long long xc_domain_get_cpu_usage( int x
   47.79  
   47.80  
   47.81  int xc_get_pfn_list(int xc_handle,
   47.82 -                    uint32_t domid, 
   47.83 -                    unsigned long *pfn_buf, 
   47.84 +                    uint32_t domid,
   47.85 +                    unsigned long *pfn_buf,
   47.86                      unsigned long max_pfns)
   47.87  {
   47.88      DECLARE_DOM0_OP;
   47.89 @@ -327,7 +327,7 @@ int xc_get_pfn_list(int xc_handle,
   47.90      {
   47.91          PERROR("xc_get_pfn_list: pfn_buf mlock failed");
   47.92          return -1;
   47.93 -    }    
   47.94 +    }
   47.95  
   47.96      ret = do_dom0_op(xc_handle, &op);
   47.97  
   47.98 @@ -356,13 +356,13 @@ long xc_get_tot_pages(int xc_handle, uin
   47.99      DECLARE_DOM0_OP;
  47.100      op.cmd = DOM0_GETDOMAININFO;
  47.101      op.u.getdomaininfo.domain = (domid_t)domid;
  47.102 -    return (do_dom0_op(xc_handle, &op) < 0) ? 
  47.103 +    return (do_dom0_op(xc_handle, &op) < 0) ?
  47.104          -1 : op.u.getdomaininfo.tot_pages;
  47.105  }
  47.106  
  47.107  int xc_copy_to_domain_page(int xc_handle,
  47.108                             uint32_t domid,
  47.109 -                           unsigned long dst_pfn, 
  47.110 +                           unsigned long dst_pfn,
  47.111                             const char *src_page)
  47.112  {
  47.113      void *vaddr = xc_map_foreign_range(
  47.114 @@ -481,7 +481,7 @@ unsigned long xc_make_page_below_4G(
  47.115  {
  47.116      unsigned long new_mfn;
  47.117  
  47.118 -    if ( xc_domain_memory_decrease_reservation( 
  47.119 +    if ( xc_domain_memory_decrease_reservation(
  47.120          xc_handle, domid, 1, 0, &mfn) != 0 )
  47.121      {
  47.122          fprintf(stderr,"xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
    48.1 --- a/tools/libxc/xc_private.h	Sat Apr 15 19:25:09 2006 +0100
    48.2 +++ b/tools/libxc/xc_private.h	Sat Apr 15 19:25:21 2006 +0100
    48.3 @@ -57,7 +57,7 @@ static inline void safe_munlock(const vo
    48.4  }
    48.5  
    48.6  static inline int do_privcmd(int xc_handle,
    48.7 -                             unsigned int cmd, 
    48.8 +                             unsigned int cmd,
    48.9                               unsigned long data)
   48.10  {
   48.11      return ioctl(xc_handle, cmd, data);
   48.12 @@ -67,7 +67,7 @@ static inline int do_xen_hypercall(int x
   48.13                                     privcmd_hypercall_t *hypercall)
   48.14  {
   48.15      return do_privcmd(xc_handle,
   48.16 -                      IOCTL_PRIVCMD_HYPERCALL, 
   48.17 +                      IOCTL_PRIVCMD_HYPERCALL,
   48.18                        (unsigned long)hypercall);
   48.19  }
   48.20  
   48.21 @@ -78,7 +78,7 @@ static inline int do_xen_version(int xc_
   48.22      hypercall.op     = __HYPERVISOR_xen_version;
   48.23      hypercall.arg[0] = (unsigned long) cmd;
   48.24      hypercall.arg[1] = (unsigned long) dest;
   48.25 -    
   48.26 +
   48.27      return do_xen_hypercall(xc_handle, &hypercall);
   48.28  }
   48.29  
   48.30 @@ -121,13 +121,13 @@ typedef struct privcmd_mmap_entry {
   48.31      unsigned long va;
   48.32      unsigned long mfn;
   48.33      unsigned long npages;
   48.34 -} privcmd_mmap_entry_t; 
   48.35 +} privcmd_mmap_entry_t;
   48.36  
   48.37  typedef struct privcmd_mmap {
   48.38      int num;
   48.39      domid_t dom;
   48.40      privcmd_mmap_entry_t *entry;
   48.41 -} privcmd_mmap_t; 
   48.42 +} privcmd_mmap_t;
   48.43  */
   48.44  
   48.45  #endif /* __XC_PRIVATE_H__ */
    49.1 --- a/tools/libxc/xc_ptrace.c	Sat Apr 15 19:25:09 2006 +0100
    49.2 +++ b/tools/libxc/xc_ptrace.c	Sat Apr 15 19:25:21 2006 +0100
    49.3 @@ -46,7 +46,7 @@ static cpumap_t                 regs_val
    49.4  static vcpu_guest_context_t     ctxt[MAX_VIRT_CPUS];
    49.5  
    49.6  extern int ffsll(long long int);
    49.7 -#define FOREACH_CPU(cpumap, i)  for ( cpumap = online_cpumap; (i = ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) ) 
    49.8 +#define FOREACH_CPU(cpumap, i)  for ( cpumap = online_cpumap; (i = ffsll(cpumap)); cpumap &= ~(1 << (index - 1)) )
    49.9  
   49.10  
   49.11  static int
   49.12 @@ -58,22 +58,22 @@ fetch_regs(int xc_handle, int cpu, int *
   49.13      if (online)
   49.14          *online = 0;
   49.15      if ( !(regs_valid & (1 << cpu)) )
   49.16 -    { 
   49.17 -        retval = xc_vcpu_getcontext(xc_handle, current_domid, 
   49.18 -						cpu, &ctxt[cpu]);
   49.19 -        if ( retval ) 
   49.20 +    {
   49.21 +        retval = xc_vcpu_getcontext(xc_handle, current_domid,
   49.22 +                cpu, &ctxt[cpu]);
   49.23 +        if ( retval )
   49.24              goto done;
   49.25 -	regs_valid |= (1 << cpu);
   49.26 +        regs_valid |= (1 << cpu);
   49.27  
   49.28      }
   49.29 -	if ( online == NULL )
   49.30 -	    goto done;
   49.31 +    if ( online == NULL )
   49.32 +        goto done;
   49.33  
   49.34 -	retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
   49.35 -	*online = info.online;
   49.36 -    
   49.37 +    retval = xc_vcpu_getinfo(xc_handle, current_domid, cpu, &info);
   49.38 +    *online = info.online;
   49.39 +
   49.40   done:
   49.41 -    return retval;    
   49.42 +    return retval;
   49.43  }
   49.44  
   49.45  static struct thr_ev_handlers {
   49.46 @@ -81,8 +81,8 @@ static struct thr_ev_handlers {
   49.47      thr_ev_handler_t td_death;
   49.48  } handlers;
   49.49  
   49.50 -void 
   49.51 -xc_register_event_handler(thr_ev_handler_t h, 
   49.52 +void
   49.53 +xc_register_event_handler(thr_ev_handler_t h,
   49.54                            td_event_e e)
   49.55  {
   49.56      switch (e) {
   49.57 @@ -97,7 +97,7 @@ xc_register_event_handler(thr_ev_handler
   49.58      }
   49.59  }
   49.60  
   49.61 -static inline int 
   49.62 +static inline int
   49.63  paging_enabled(vcpu_guest_context_t *v)
   49.64  {
   49.65      unsigned long cr0 = v->ctrlreg[0];
   49.66 @@ -114,19 +114,19 @@ static int
   49.67  get_online_cpumap(int xc_handle, dom0_getdomaininfo_t *d, cpumap_t *cpumap)
   49.68  {
   49.69      int i, online, retval;
   49.70 -    
   49.71 +
   49.72      *cpumap = 0;
   49.73      for (i = 0; i <= d->max_vcpu_id; i++) {
   49.74          if ((retval = fetch_regs(xc_handle, i, &online)))
   49.75              return retval;
   49.76          if (online)
   49.77 -            *cpumap |= (1 << i);            
   49.78 +            *cpumap |= (1 << i);
   49.79      }
   49.80 -    
   49.81 +
   49.82      return 0;
   49.83  }
   49.84  
   49.85 -/* 
   49.86 +/*
   49.87   * Notify GDB of any vcpus that have come online or gone offline
   49.88   * update online_cpumap
   49.89   *
   49.90 @@ -137,7 +137,7 @@ online_vcpus_changed(cpumap_t cpumap)
   49.91  {
   49.92      cpumap_t changed_cpumap = cpumap ^ online_cpumap;
   49.93      int index;
   49.94 -    
   49.95 +
   49.96      while ( (index = ffsll(changed_cpumap)) ) {
   49.97          if ( cpumap & (1 << (index - 1)) )
   49.98          {
   49.99 @@ -149,7 +149,7 @@ online_vcpus_changed(cpumap_t cpumap)
  49.100          changed_cpumap &= ~(1 << (index - 1));
  49.101      }
  49.102      online_cpumap = cpumap;
  49.103 -    
  49.104 +
  49.105  }
  49.106  
  49.107  /* --------------------- */
  49.108 @@ -172,7 +172,7 @@ map_domain_va_32(
  49.109      static unsigned long  pde_phys[MAX_VIRT_CPUS];
  49.110      static uint32_t *pde_virt[MAX_VIRT_CPUS];
  49.111      static unsigned long  page_phys[MAX_VIRT_CPUS];
  49.112 -    static uint32_t *page_virt[MAX_VIRT_CPUS];    
  49.113 +    static uint32_t *page_virt[MAX_VIRT_CPUS];
  49.114      static int            prev_perm[MAX_VIRT_CPUS];
  49.115  
  49.116     if (ctxt[cpu].ctrlreg[3] == 0)
  49.117 @@ -221,7 +221,7 @@ map_domain_va_32(
  49.118              return NULL;
  49.119          }
  49.120          prev_perm[cpu] = perm;
  49.121 -    } 
  49.122 +    }
  49.123  
  49.124      return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
  49.125  }
  49.126 @@ -284,7 +284,7 @@ map_domain_va_64(
  49.127      if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
  49.128          return map_domain_va_32(xc_handle, cpu, guest_va, perm);
  49.129  
  49.130 -    l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE, 
  49.131 +    l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
  49.132              PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
  49.133      if ( l4 == NULL )
  49.134          return NULL;
  49.135 @@ -349,7 +349,7 @@ map_domain_va(
  49.136              mode = MODE_64;
  49.137          else if ( strstr(caps, "-x86_32p") )
  49.138              mode = MODE_PAE;
  49.139 -        else if ( strstr(caps, "-x86_32") ) 
  49.140 +        else if ( strstr(caps, "-x86_32") )
  49.141              mode = MODE_32;
  49.142      }
  49.143  
  49.144 @@ -374,7 +374,7 @@ map_domain_va(
  49.145      if (fetch_regs(xc_handle, cpu, NULL))
  49.146          return NULL;
  49.147  
  49.148 -    if (!paging_enabled(&ctxt[cpu])) { 
  49.149 +    if (!paging_enabled(&ctxt[cpu])) {
  49.150          static void * v;
  49.151          unsigned long page;
  49.152  
  49.153 @@ -383,9 +383,9 @@ map_domain_va(
  49.154  
  49.155          page = page_array[va >> PAGE_SHIFT] << PAGE_SHIFT;
  49.156  
  49.157 -        v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE, 
  49.158 +        v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE,
  49.159                  perm, page >> PAGE_SHIFT);
  49.160 -    
  49.161 +
  49.162          if ( v == NULL )
  49.163              return NULL;
  49.164  
  49.165 @@ -403,7 +403,7 @@ map_domain_va(
  49.166  
  49.167  int control_c_pressed_flag = 0;
  49.168  
  49.169 -static int 
  49.170 +static int
  49.171  __xc_waitdomain(
  49.172      int xc_handle,
  49.173      int domain,
  49.174 @@ -420,7 +420,7 @@ static int
  49.175  
  49.176      op.cmd = DOM0_GETDOMAININFO;
  49.177      op.u.getdomaininfo.domain = domain;
  49.178 -    
  49.179 +
  49.180   retry:
  49.181      retval = do_dom0_op(xc_handle, &op);
  49.182      if ( retval || (op.u.getdomaininfo.domain != domain) )
  49.183 @@ -429,7 +429,7 @@ static int
  49.184          goto done;
  49.185      }
  49.186      *status = op.u.getdomaininfo.flags;
  49.187 -    
  49.188 +
  49.189      if ( options & WNOHANG )
  49.190          goto done;
  49.191  
  49.192 @@ -472,16 +472,16 @@ xc_ptrace(
  49.193      void           *data = (char *)edata;
  49.194  
  49.195      cpu = (request != PTRACE_ATTACH) ? domid_tid : 0;
  49.196 -    
  49.197 +
  49.198      switch ( request )
  49.199 -    { 
  49.200 +    {
  49.201      case PTRACE_PEEKTEXT:
  49.202      case PTRACE_PEEKDATA:
  49.203          if (current_isfile)
  49.204 -            guest_va = (unsigned long *)map_domain_va_core(current_domid, 
  49.205 +            guest_va = (unsigned long *)map_domain_va_core(current_domid,
  49.206                                  cpu, addr, ctxt);
  49.207          else
  49.208 -            guest_va = (unsigned long *)map_domain_va(xc_handle, 
  49.209 +            guest_va = (unsigned long *)map_domain_va(xc_handle,
  49.210                                  cpu, addr, PROT_READ);
  49.211          if ( guest_va == NULL )
  49.212              goto out_error;
  49.213 @@ -492,26 +492,26 @@ xc_ptrace(
  49.214      case PTRACE_POKEDATA:
  49.215          /* XXX assume that all CPUs have the same address space */
  49.216          if (current_isfile)
  49.217 -            guest_va = (unsigned long *)map_domain_va_core(current_domid, 
  49.218 +            guest_va = (unsigned long *)map_domain_va_core(current_domid,
  49.219                                  cpu, addr, ctxt);
  49.220          else
  49.221 -            guest_va = (unsigned long *)map_domain_va(xc_handle, 
  49.222 +            guest_va = (unsigned long *)map_domain_va(xc_handle,
  49.223                                  cpu, addr, PROT_READ|PROT_WRITE);
  49.224 -        if ( guest_va == NULL ) 
  49.225 +        if ( guest_va == NULL )
  49.226              goto out_error;
  49.227          *guest_va = (unsigned long)data;
  49.228          break;
  49.229  
  49.230      case PTRACE_GETREGS:
  49.231 -        if (!current_isfile && fetch_regs(xc_handle, cpu, NULL)) 
  49.232 +        if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
  49.233              goto out_error;
  49.234 -        SET_PT_REGS(pt, ctxt[cpu].user_regs); 
  49.235 +        SET_PT_REGS(pt, ctxt[cpu].user_regs);
  49.236          memcpy(data, &pt, sizeof(struct gdb_regs));
  49.237          break;
  49.238  
  49.239      case PTRACE_GETFPREGS:
  49.240      case PTRACE_GETFPXREGS:
  49.241 -        if (!current_isfile && fetch_regs(xc_handle, cpu, NULL)) 
  49.242 +        if (!current_isfile && fetch_regs(xc_handle, cpu, NULL))
  49.243                  goto out_error;
  49.244          memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
  49.245          break;
  49.246 @@ -520,7 +520,7 @@ xc_ptrace(
  49.247          if (current_isfile)
  49.248                  goto out_unspported; /* XXX not yet supported */
  49.249          SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
  49.250 -        if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, 
  49.251 +        if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
  49.252                                  &ctxt[cpu])))
  49.253              goto out_error_dom0;
  49.254          break;
  49.255 @@ -531,8 +531,8 @@ xc_ptrace(
  49.256          /*  XXX we can still have problems if the user switches threads
  49.257           *  during single-stepping - but that just seems retarded
  49.258           */
  49.259 -        ctxt[cpu].user_regs.eflags |= PSL_T; 
  49.260 -        if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, 
  49.261 +        ctxt[cpu].user_regs.eflags |= PSL_T;
  49.262 +        if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu,
  49.263                                  &ctxt[cpu])))
  49.264              goto out_error_dom0;
  49.265          /* FALLTHROUGH */
  49.266 @@ -545,13 +545,13 @@ xc_ptrace(
  49.267          {
  49.268              FOREACH_CPU(cpumap, index) {
  49.269                  cpu = index - 1;
  49.270 -                if (fetch_regs(xc_handle, cpu, NULL)) 
  49.271 +                if (fetch_regs(xc_handle, cpu, NULL))
  49.272                      goto out_error;
  49.273                  /* Clear trace flag */
  49.274 -                if ( ctxt[cpu].user_regs.eflags & PSL_T ) 
  49.275 +                if ( ctxt[cpu].user_regs.eflags & PSL_T )
  49.276                  {
  49.277                      ctxt[cpu].user_regs.eflags &= ~PSL_T;
  49.278 -                    if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, 
  49.279 +                    if ((retval = xc_vcpu_setcontext(xc_handle, current_domid,
  49.280                                                  cpu, &ctxt[cpu])))
  49.281                          goto out_error_dom0;
  49.282                  }
  49.283 @@ -566,7 +566,7 @@ xc_ptrace(
  49.284                  goto out_error_dom0;
  49.285          }
  49.286          regs_valid = 0;
  49.287 -        if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ? 
  49.288 +        if ((retval = xc_domain_unpause(xc_handle, current_domid > 0 ?
  49.289                                  current_domid : -current_domid)))
  49.290              goto out_error_dom0;
  49.291          break;
  49.292 @@ -627,7 +627,7 @@ xc_ptrace(
  49.293  
  49.294  }
  49.295  
  49.296 -int 
  49.297 +int
  49.298  xc_waitdomain(
  49.299      int xc_handle,
  49.300      int domain,
    50.1 --- a/tools/libxc/xc_ptrace.h	Sat Apr 15 19:25:09 2006 +0100
    50.2 +++ b/tools/libxc/xc_ptrace.h	Sat Apr 15 19:25:21 2006 +0100
    50.3 @@ -107,7 +107,7 @@ struct gdb_regs {
    50.4      long esi; /* 12 */
    50.5      long edi; /* 16 */
    50.6      long ebp; /* 20 */
    50.7 -    long eax; /* 24 */ 
    50.8 +    long eax; /* 24 */
    50.9      int  xds; /* 28 */
   50.10      int  xes; /* 32 */
   50.11      int  xfs; /* 36 */
   50.12 @@ -116,7 +116,7 @@ struct gdb_regs {
   50.13      long eip;    /* 48 */
   50.14      int  xcs;    /* 52 */
   50.15      long eflags; /* 56 */
   50.16 -    long esp;    /* 60 */     
   50.17 +    long esp;    /* 60 */
   50.18      int  xss;    /* 64 */
   50.19  };
   50.20  
   50.21 @@ -169,20 +169,20 @@ struct gdb_regs {
   50.22  typedef void (*thr_ev_handler_t)(long);
   50.23  
   50.24  void xc_register_event_handler(
   50.25 -    thr_ev_handler_t h, 
   50.26 +    thr_ev_handler_t h,
   50.27      td_event_e e);
   50.28  
   50.29  long xc_ptrace(
   50.30      int xc_handle,
   50.31 -    enum __ptrace_request request, 
   50.32 +    enum __ptrace_request request,
   50.33      uint32_t  domid,
   50.34 -    long addr, 
   50.35 +    long addr,
   50.36      long data);
   50.37  
   50.38  int xc_waitdomain(
   50.39      int xc_handle,
   50.40 -    int domain, 
   50.41 -    int *status, 
   50.42 +    int domain,
   50.43 +    int *status,
   50.44      int options);
   50.45  
   50.46  #endif /* XC_PTRACE */
    51.1 --- a/tools/libxc/xc_ptrace_core.c	Sat Apr 15 19:25:09 2006 +0100
    51.2 +++ b/tools/libxc/xc_ptrace_core.c	Sat Apr 15 19:25:21 2006 +0100
    51.3 @@ -39,7 +39,7 @@ map_domain_va_core(unsigned long domfd, 
    51.4      static unsigned long  page_phys[MAX_VIRT_CPUS];
    51.5      static unsigned long *page_virt[MAX_VIRT_CPUS];
    51.6  
    51.7 -    if (cr3[cpu] != cr3_phys[cpu]) 
    51.8 +    if (cr3[cpu] != cr3_phys[cpu])
    51.9      {
   51.10          cr3_phys[cpu] = cr3[cpu];
   51.11          if (cr3_virt[cpu])
   51.12 @@ -53,12 +53,12 @@ map_domain_va_core(unsigned long domfd, 
   51.13              return NULL;
   51.14          }
   51.15          cr3_virt[cpu] = v;
   51.16 -    } 
   51.17 +    }
   51.18      if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */
   51.19          return NULL;
   51.20      if (ctxt[cpu].flags & VGCF_HVM_GUEST)
   51.21          pde = p2m_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
   51.22 -    if (pde != pde_phys[cpu]) 
   51.23 +    if (pde != pde_phys[cpu])
   51.24      {
   51.25          pde_phys[cpu] = pde;
   51.26          if (pde_virt[cpu])
   51.27 @@ -74,7 +74,7 @@ map_domain_va_core(unsigned long domfd, 
   51.28          return NULL;
   51.29      if (ctxt[cpu].flags & VGCF_HVM_GUEST)
   51.30          page = p2m_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
   51.31 -    if (page != page_phys[cpu]) 
   51.32 +    if (page != page_phys[cpu])
   51.33      {
   51.34          page_phys[cpu] = page;
   51.35          if (page_virt[cpu])
   51.36 @@ -89,11 +89,11 @@ map_domain_va_core(unsigned long domfd, 
   51.37              return NULL;
   51.38          }
   51.39          page_virt[cpu] = v;
   51.40 -    } 
   51.41 +    }
   51.42      return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
   51.43  }
   51.44  
   51.45 -int 
   51.46 +int
   51.47  xc_waitdomain_core(
   51.48      int xc_handle,
   51.49      int domfd,
   51.50 @@ -122,7 +122,7 @@ xc_waitdomain_core(
   51.51          nr_vcpus = header.xch_nr_vcpus;
   51.52          pages_offset = header.xch_pages_offset;
   51.53  
   51.54 -        if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) != 
   51.55 +        if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
   51.56              sizeof(vcpu_guest_context_t)*nr_vcpus)
   51.57              return -1;
   51.58  
   51.59 @@ -134,7 +134,7 @@ xc_waitdomain_core(
   51.60              printf("Could not allocate p2m_array\n");
   51.61              return -1;
   51.62          }
   51.63 -        if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) != 
   51.64 +        if (read(domfd, p2m_array, sizeof(unsigned long)*nr_pages) !=
   51.65              sizeof(unsigned long)*nr_pages)
   51.66              return -1;
   51.67  
    52.1 --- a/tools/libxc/xc_sedf.c	Sat Apr 15 19:25:09 2006 +0100
    52.2 +++ b/tools/libxc/xc_sedf.c	Sat Apr 15 19:25:21 2006 +0100
    52.3 @@ -1,8 +1,8 @@
    52.4  /******************************************************************************
    52.5   * xc_sedf.c
    52.6 - * 
    52.7 + *
    52.8   * API for manipulating parameters of the Simple EDF scheduler.
    52.9 - * 
   52.10 + *
   52.11   * changes by Stephan Diestelhorst
   52.12   * based on code
   52.13   * by Mark Williamson, Copyright (c) 2004 Intel Research Cambridge.
   52.14 @@ -35,7 +35,7 @@ int xc_sedf_domain_get(int xc_handle, ui
   52.15      int ret;
   52.16      struct sedf_adjdom *p = &op.u.adjustdom.u.sedf;
   52.17  
   52.18 -    op.cmd = DOM0_ADJUSTDOM;    
   52.19 +    op.cmd = DOM0_ADJUSTDOM;
   52.20      op.u.adjustdom.domain = (domid_t)domid;
   52.21      op.u.adjustdom.sched_id = SCHED_SEDF;
   52.22      op.u.adjustdom.direction = SCHED_INFO_GET;
    53.1 --- a/tools/libxc/xc_tbuf.c	Sat Apr 15 19:25:09 2006 +0100
    53.2 +++ b/tools/libxc/xc_tbuf.c	Sat Apr 15 19:25:21 2006 +0100
    53.3 @@ -1,8 +1,8 @@
    53.4  /******************************************************************************
    53.5   * xc_tbuf.c
    53.6 - * 
    53.7 + *
    53.8   * API for manipulating and accessing trace buffer parameters
    53.9 - * 
   53.10 + *
   53.11   * Copyright (c) 2005, Rob Gardner
   53.12   */
   53.13  
   53.14 @@ -18,7 +18,7 @@ int xc_tbuf_enable(int xc_handle, int en
   53.15      op.u.tbufcontrol.op  = DOM0_TBUF_ENABLE;
   53.16    else
   53.17      op.u.tbufcontrol.op  = DOM0_TBUF_DISABLE;
   53.18 -  
   53.19 +
   53.20    return xc_dom0_op(xc_handle, &op);
   53.21  }
   53.22  
   53.23 @@ -30,10 +30,10 @@ int xc_tbuf_set_size(int xc_handle, uint
   53.24    op.interface_version = DOM0_INTERFACE_VERSION;
   53.25    op.u.tbufcontrol.op  = DOM0_TBUF_SET_SIZE;
   53.26    op.u.tbufcontrol.size = size;
   53.27 -  
   53.28 +
   53.29    return xc_dom0_op(xc_handle, &op);
   53.30  }
   53.31 -  
   53.32 +
   53.33  int xc_tbuf_get_size(int xc_handle, uint32_t *size)
   53.34  {
   53.35    int rc;
    54.1 --- a/tools/libxc/xenctrl.h	Sat Apr 15 19:25:09 2006 +0100
    54.2 +++ b/tools/libxc/xenctrl.h	Sat Apr 15 19:25:21 2006 +0100
    54.3 @@ -1,8 +1,8 @@
    54.4  /******************************************************************************
    54.5   * xenctrl.h
    54.6 - * 
    54.7 + *
    54.8   * A library for low-level access to the Xen control interfaces.
    54.9 - * 
   54.10 + *
   54.11   * Copyright (c) 2003-2004, K A Fraser.
   54.12   */
   54.13  
   54.14 @@ -30,7 +30,7 @@
   54.15  
   54.16  /*
   54.17   *  DEFINITIONS FOR CPU BARRIERS
   54.18 - */ 
   54.19 + */
   54.20  
   54.21  #if defined(__i386__)
   54.22  #define mb()  __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" )
   54.23 @@ -51,7 +51,7 @@
   54.24  
   54.25  /*
   54.26   *  INITIALIZATION FUNCTIONS
   54.27 - */ 
   54.28 + */
   54.29  
   54.30  /**
   54.31   * This function opens a handle to the hypervisor interface.  This function can
   54.32 @@ -96,20 +96,20 @@ typedef struct xc_core_header {
   54.33  
   54.34  long xc_ptrace_core(
   54.35      int xc_handle,
   54.36 -    enum __ptrace_request request, 
   54.37 -    uint32_t domid, 
   54.38 -    long addr, 
   54.39 +    enum __ptrace_request request,
   54.40 +    uint32_t domid,
   54.41 +    long addr,
   54.42      long data,
   54.43      vcpu_guest_context_t *ctxt);
   54.44  void * map_domain_va_core(
   54.45 -    unsigned long domfd, 
   54.46 -    int cpu, 
   54.47 +    unsigned long domfd,
   54.48 +    int cpu,
   54.49      void *guest_va,
   54.50      vcpu_guest_context_t *ctxt);
   54.51  int xc_waitdomain_core(
   54.52      int xc_handle,
   54.53 -    int domain, 
   54.54 -    int *status, 
   54.55 +    int domain,
   54.56 +    int *status,
   54.57      int options,
   54.58      vcpu_guest_context_t *ctxt);
   54.59  
   54.60 @@ -120,7 +120,7 @@ int xc_waitdomain_core(
   54.61  typedef struct {
   54.62      uint32_t      domid;
   54.63      uint32_t      ssidref;
   54.64 -    unsigned int  dying:1, crashed:1, shutdown:1, 
   54.65 +    unsigned int  dying:1, crashed:1, shutdown:1,
   54.66                    paused:1, blocked:1, running:1;
   54.67      unsigned int  shutdown_reason; /* only meaningful if shutdown==1 */
   54.68      unsigned long nr_pages;
   54.69 @@ -133,7 +133,7 @@ typedef struct {
   54.70  } xc_dominfo_t;
   54.71  
   54.72  typedef dom0_getdomaininfo_t xc_domaininfo_t;
   54.73 -int xc_domain_create(int xc_handle, 
   54.74 +int xc_domain_create(int xc_handle,
   54.75                       uint32_t ssidref,
   54.76                       xen_domain_handle_t handle,
   54.77                       uint32_t *pdomid);
   54.78 @@ -144,7 +144,7 @@ int xc_domain_create(int xc_handle,
   54.79   *  xc_domain_dumpcore_via_callback - produces a dump, using a specified
   54.80   *                                    callback function
   54.81   */
   54.82 -int xc_domain_dumpcore(int xc_handle, 
   54.83 +int xc_domain_dumpcore(int xc_handle,
   54.84                         uint32_t domid,
   54.85                         const char *corename);
   54.86  
   54.87 @@ -156,7 +156,7 @@ int xc_domain_dumpcore(int xc_handle,
   54.88   */
   54.89  typedef int (dumpcore_rtn_t)(void *arg, char *buffer, unsigned int length);
   54.90  
   54.91 -int xc_domain_dumpcore_via_callback(int xc_handle, 
   54.92 +int xc_domain_dumpcore_via_callback(int xc_handle,
   54.93                                      uint32_t domid,
   54.94                                      void *arg,
   54.95                                      dumpcore_rtn_t dump_rtn);
   54.96 @@ -170,7 +170,7 @@ int xc_domain_dumpcore_via_callback(int 
   54.97   * @return 0 on success, -1 on failure.
   54.98   */
   54.99  int xc_domain_max_vcpus(int xc_handle,
  54.100 -                        uint32_t domid, 
  54.101 +                        uint32_t domid,
  54.102                          unsigned int max);
  54.103  
  54.104  /**
  54.105 @@ -181,7 +181,7 @@ int xc_domain_max_vcpus(int xc_handle,
  54.106   * @parm domid the domain id to pause
  54.107   * @return 0 on success, -1 on failure.
  54.108   */
  54.109 -int xc_domain_pause(int xc_handle, 
  54.110 +int xc_domain_pause(int xc_handle,
  54.111                      uint32_t domid);
  54.112  /**
  54.113   * This function unpauses a domain.  The domain should have been previously
  54.114 @@ -191,7 +191,7 @@ int xc_domain_pause(int xc_handle,
  54.115   * @parm domid the domain id to unpause
  54.116   * return 0 on success, -1 on failure
  54.117   */
  54.118 -int xc_domain_unpause(int xc_handle, 
  54.119 +int xc_domain_unpause(int xc_handle,
  54.120                        uint32_t domid);
  54.121  
  54.122  /**
  54.123 @@ -203,7 +203,7 @@ int xc_domain_unpause(int xc_handle,
  54.124   * @parm domid the domain id to destroy
  54.125   * @return 0 on success, -1 on failure
  54.126   */
  54.127 -int xc_domain_destroy(int xc_handle, 
  54.128 +int xc_domain_destroy(int xc_handle,
  54.129                        uint32_t domid);
  54.130  
  54.131  /**
  54.132 @@ -217,7 +217,7 @@ int xc_domain_destroy(int xc_handle,
  54.133   * @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
  54.134   * @return 0 on success, -1 on failure
  54.135   */
  54.136 -int xc_domain_shutdown(int xc_handle, 
  54.137 +int xc_domain_shutdown(int xc_handle,
  54.138                         uint32_t domid,
  54.139                         int reason);
  54.140  
  54.141 @@ -242,7 +242,7 @@ int xc_vcpu_setaffinity(int xc_handle,
  54.142   * @return the number of domains enumerated or -1 on error
  54.143   */
  54.144  int xc_domain_getinfo(int xc_handle,
  54.145 -                      uint32_t first_domid, 
  54.146 +                      uint32_t first_domid,
  54.147                        unsigned int max_doms,
  54.148                        xc_dominfo_t *info);
  54.149  
  54.150 @@ -307,12 +307,12 @@ long long xc_domain_get_cpu_usage(int xc
  54.151                                    domid_t domid,
  54.152                                    int vcpu);
  54.153  
  54.154 -int xc_domain_sethandle(int xc_handle, uint32_t domid, 
  54.155 +int xc_domain_sethandle(int xc_handle, uint32_t domid,
  54.156                          xen_domain_handle_t handle);
  54.157  
  54.158  typedef dom0_shadow_control_stats_t xc_shadow_control_stats_t;
  54.159  int xc_shadow_control(int xc_handle,
  54.160 -                      uint32_t domid, 
  54.161 +                      uint32_t domid,
  54.162                        unsigned int sop,
  54.163                        unsigned long *dirty_bitmap,
  54.164                        unsigned long pages,
  54.165 @@ -386,7 +386,7 @@ int xc_physdev_pci_access_modify(int xc_
  54.166  
  54.167  int xc_readconsolering(int xc_handle,
  54.168                         char **pbuffer,
  54.169 -                       unsigned int *pnr_chars, 
  54.170 +                       unsigned int *pnr_chars,
  54.171                         int clear);
  54.172  
  54.173  typedef dom0_physinfo_t xc_physinfo_t;
  54.174 @@ -397,18 +397,18 @@ int xc_sched_id(int xc_handle,
  54.175                  int *sched_id);
  54.176  
  54.177  int xc_domain_setmaxmem(int xc_handle,
  54.178 -                        uint32_t domid, 
  54.179 +                        uint32_t domid,
  54.180                          unsigned int max_memkb);
  54.181  
  54.182  int xc_domain_memory_increase_reservation(int xc_handle,
  54.183 -                                          uint32_t domid, 
  54.184 +                                          uint32_t domid,
  54.185                                            unsigned long nr_extents,
  54.186                                            unsigned int extent_order,
  54.187                                            unsigned int address_bits,
  54.188                                            unsigned long *extent_start);
  54.189  
  54.190  int xc_domain_memory_decrease_reservation(int xc_handle,
  54.191 -                                          uint32_t domid, 
  54.192 +                                          uint32_t domid,
  54.193                                            unsigned long nr_extents,
  54.194                                            unsigned int extent_order,
  54.195                                            unsigned long *extent_start);
  54.196 @@ -443,7 +443,7 @@ int xc_domain_iomem_permission(int xc_ha
  54.197                                 unsigned long nr_mfns,
  54.198                                 uint8_t allow_access);
  54.199  
  54.200 -unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid, 
  54.201 +unsigned long xc_make_page_below_4G(int xc_handle, uint32_t domid,
  54.202                                      unsigned long mfn);
  54.203  
  54.204  typedef dom0_perfc_desc_t xc_perfc_desc_t;
  54.205 @@ -492,11 +492,11 @@ void *xc_map_foreign_batch(int xc_handle
  54.206  unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
  54.207                                             int vcpu, unsigned long long virt);
  54.208  
  54.209 -int xc_get_pfn_list(int xc_handle, uint32_t domid, unsigned long *pfn_buf, 
  54.210 +int xc_get_pfn_list(int xc_handle, uint32_t domid, unsigned long *pfn_buf,
  54.211                      unsigned long max_pfns);
  54.212  
  54.213  int xc_ia64_get_pfn_list(int xc_handle, uint32_t domid,
  54.214 -                         unsigned long *pfn_buf, 
  54.215 +                         unsigned long *pfn_buf,
  54.216                           unsigned int start_page, unsigned int nr_pages);
  54.217  
  54.218  int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
  54.219 @@ -551,7 +551,7 @@ int xc_tbuf_enable(int xc_handle, int en
  54.220  int xc_tbuf_set_size(int xc_handle, uint32_t size);
  54.221  
  54.222  /**
  54.223 - * This function retrieves the current size of the trace buffers. 
  54.224 + * This function retrieves the current size of the trace buffers.
  54.225   * Note that the size returned is in terms of bytes, not pages.
  54.226  
  54.227   * @parm xc_handle a handle to an open hypervisor interface
  54.228 @@ -577,7 +577,7 @@ struct xc_mmu {
  54.229  };
  54.230  typedef struct xc_mmu xc_mmu_t;
  54.231  xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
  54.232 -int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 
  54.233 +int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
  54.234                     unsigned long long ptr, unsigned long long val);
  54.235  int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
  54.236  
    55.1 --- a/tools/libxc/xenguest.h	Sat Apr 15 19:25:09 2006 +0100
    55.2 +++ b/tools/libxc/xenguest.h	Sat Apr 15 19:25:21 2006 +0100
    55.3 @@ -1,8 +1,8 @@
    55.4  /******************************************************************************
    55.5   * xenguest.h
    55.6 - * 
    55.7 + *
    55.8   * A library for guest domain management in Xen.
    55.9 - * 
   55.10 + *
   55.11   * Copyright (c) 2003-2004, K A Fraser.
   55.12   */
   55.13  
   55.14 @@ -21,7 +21,7 @@
   55.15   * @parm dom the id of the domain
   55.16   * @return 0 on success, -1 on failure
   55.17   */
   55.18 -int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters, 
   55.19 +int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
   55.20                    uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
   55.21                    int (*suspend)(int domid));
   55.22  
   55.23 @@ -37,8 +37,8 @@ int xc_linux_save(int xc_handle, int io_
   55.24   * @parm store_mfn returned with the mfn of the store page
   55.25   * @return 0 on success, -1 on failure
   55.26   */
   55.27 -int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom, 
   55.28 -                     unsigned long nr_pfns, unsigned int store_evtchn, 
   55.29 +int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom,
   55.30 +                     unsigned long nr_pfns, unsigned int store_evtchn,
   55.31                       unsigned long *store_mfn, unsigned int console_evtchn,
   55.32                       unsigned long *console_mfn);
   55.33  
    56.1 --- a/tools/libxc/xg_private.c	Sat Apr 15 19:25:09 2006 +0100
    56.2 +++ b/tools/libxc/xg_private.c	Sat Apr 15 19:25:21 2006 +0100
    56.3 @@ -1,6 +1,6 @@
    56.4  /******************************************************************************
    56.5   * xg_private.c
    56.6 - * 
    56.7 + *
    56.8   * Helper functions for the rest of the library.
    56.9   */
   56.10  
    57.1 --- a/tools/libxc/xg_private.h	Sat Apr 15 19:25:09 2006 +0100
    57.2 +++ b/tools/libxc/xg_private.h	Sat Apr 15 19:25:21 2006 +0100
    57.3 @@ -11,7 +11,7 @@
    57.4  #include <sys/stat.h>
    57.5  
    57.6  #include "xenctrl.h"
    57.7 -#include "xenguest.h" 
    57.8 +#include "xenguest.h"
    57.9  
   57.10  #include <xen/linux/privcmd.h>
   57.11  #include <xen/memory.h>
   57.12 @@ -62,7 +62,7 @@ unsigned long csum_page (void * page);
   57.13  #define L2_PAGETABLE_ENTRIES_PAE  512
   57.14  #define L3_PAGETABLE_ENTRIES_PAE    4
   57.15  
   57.16 -#if defined(__i386__) 
   57.17 +#if defined(__i386__)
   57.18  #define L1_PAGETABLE_ENTRIES   1024
   57.19  #define L2_PAGETABLE_ENTRIES   1024
   57.20  #elif defined(__x86_64__)
   57.21 @@ -71,7 +71,7 @@ unsigned long csum_page (void * page);
   57.22  #define L3_PAGETABLE_ENTRIES    512
   57.23  #define L4_PAGETABLE_ENTRIES    512
   57.24  #endif
   57.25 - 
   57.26 +
   57.27  #define PAGE_SHIFT              XC_PAGE_SHIFT
   57.28  #define PAGE_SIZE               (1UL << PAGE_SHIFT)
   57.29  #define PAGE_MASK               (~(PAGE_SIZE-1))
   57.30 @@ -167,8 +167,8 @@ typedef struct mfn_mapper {
   57.31      int error;
   57.32      int max_queue_size;
   57.33      void * addr;
   57.34 -    privcmd_mmap_t ioctl; 
   57.35 -    
   57.36 +    privcmd_mmap_t ioctl;
   57.37 +
   57.38  } mfn_mapper_t;
   57.39  
   57.40  int xc_copy_to_domain_page(int xc_handle, uint32_t domid,
    58.1 --- a/tools/libxc/xg_save_restore.h	Sat Apr 15 19:25:09 2006 +0100
    58.2 +++ b/tools/libxc/xg_save_restore.h	Sat Apr 15 19:25:21 2006 +0100
    58.3 @@ -1,7 +1,7 @@
    58.4  /*
    58.5  ** xg_save_restore.h
    58.6 -** 
    58.7 -** Defintions and utilities for save / restore. 
    58.8 +**
    58.9 +** Defintions and utilities for save / restore.
   58.10  */
   58.11  
   58.12  #include "xc_private.h"
   58.13 @@ -29,8 +29,8 @@ while (0)
   58.14  
   58.15  
   58.16  /*
   58.17 -** We process save/restore/migrate in batches of pages; the below 
   58.18 -** determines how many pages we (at maximum) deal with in each batch. 
   58.19 +** We process save/restore/migrate in batches of pages; the below
   58.20 +** determines how many pages we (at maximum) deal with in each batch.
   58.21  */
   58.22  #define MAX_BATCH_SIZE 1024   /* up to 1024 pages (4MB) at a time */
   58.23  
   58.24 @@ -40,56 +40,56 @@ while (0)
   58.25  
   58.26  
   58.27  /*
   58.28 -** Determine various platform information required for save/restore, in 
   58.29 -** particular: 
   58.30 +** Determine various platform information required for save/restore, in
   58.31 +** particular:
   58.32  **
   58.33 -**    - the maximum MFN on this machine, used to compute the size of 
   58.34 -**      the M2P table; 
   58.35 -** 
   58.36 -**    - the starting virtual address of the the hypervisor; we use this 
   58.37 -**      to determine which parts of guest address space(s) do and don't 
   58.38 -**      require canonicalization during save/restore; and 
   58.39 -** 
   58.40 -**    - the number of page-table levels for save/ restore. This should 
   58.41 -**      be a property of the domain, but for the moment we just read it 
   58.42 +**    - the maximum MFN on this machine, used to compute the size of
   58.43 +**      the M2P table;
   58.44 +**
   58.45 +**    - the starting virtual address of the the hypervisor; we use this
   58.46 +**      to determine which parts of guest address space(s) do and don't
   58.47 +**      require canonicalization during save/restore; and
   58.48 +**
   58.49 +**    - the number of page-table levels for save/ restore. This should
   58.50 +**      be a property of the domain, but for the moment we just read it
   58.51  **      from the hypervisor.
   58.52  **
   58.53 -** Returns 1 on success, 0 on failure. 
   58.54 +** Returns 1 on success, 0 on failure.
   58.55  */
   58.56 -static int get_platform_info(int xc_handle, uint32_t dom, 
   58.57 -                             /* OUT */ unsigned long *max_mfn,  
   58.58 -                             /* OUT */ unsigned long *hvirt_start, 
   58.59 +static int get_platform_info(int xc_handle, uint32_t dom,
   58.60 +                             /* OUT */ unsigned long *max_mfn,
   58.61 +                             /* OUT */ unsigned long *hvirt_start,
   58.62                               /* OUT */ unsigned int *pt_levels)
   58.63 -    
   58.64 -{ 
   58.65 +
   58.66 +{
   58.67      xen_capabilities_info_t xen_caps = "";
   58.68      xen_platform_parameters_t xen_params;
   58.69  
   58.70      if (xc_version(xc_handle, XENVER_platform_parameters, &xen_params) != 0)
   58.71          return 0;
   58.72 -    
   58.73 +
   58.74      if (xc_version(xc_handle, XENVER_capabilities, &xen_caps) != 0)
   58.75          return 0;
   58.76  
   58.77      *max_mfn = xc_memory_op(xc_handle, XENMEM_maximum_ram_page, NULL);
   58.78 -    
   58.79 +
   58.80      *hvirt_start = xen_params.virt_start;
   58.81  
   58.82      if (strstr(xen_caps, "xen-3.0-x86_64"))
   58.83          *pt_levels = 4;
   58.84      else if (strstr(xen_caps, "xen-3.0-x86_32p"))
   58.85 -        *pt_levels = 3; 
   58.86 +        *pt_levels = 3;
   58.87      else if (strstr(xen_caps, "xen-3.0-x86_32"))
   58.88 -        *pt_levels = 2; 
   58.89 -    else 
   58.90 -        return 0; 
   58.91 -    
   58.92 +        *pt_levels = 2;
   58.93 +    else
   58.94 +        return 0;
   58.95 +
   58.96      return 1;
   58.97 -} 
   58.98 +}
   58.99  
  58.100  
  58.101 -/* 
  58.102 -** Save/restore deal with the mfn_to_pfn (M2P) and pfn_to_mfn (P2M) tables. 
  58.103 +/*
  58.104 +** Save/restore deal with the mfn_to_pfn (M2P) and pfn_to_mfn (P2M) tables.
  58.105  ** The M2P simply holds the corresponding PFN, while the top bit of a P2M
  58.106  ** entry tell us whether or not the the PFN is currently mapped.
  58.107  */
  58.108 @@ -98,18 +98,18 @@ static int get_platform_info(int xc_hand
  58.109  #define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
  58.110  
  58.111  
  58.112 -/* 
  58.113 -** The M2P is made up of some number of 'chunks' of at least 2MB in size. 
  58.114 -** The below definitions and utility function(s) deal with mapping the M2P 
  58.115 -** regarldess of the underlying machine memory size or architecture. 
  58.116 +/*
  58.117 +** The M2P is made up of some number of 'chunks' of at least 2MB in size.
  58.118 +** The below definitions and utility function(s) deal with mapping the M2P
  58.119 +** regarldess of the underlying machine memory size or architecture.
  58.120  */
  58.121 -#define M2P_SHIFT       L2_PAGETABLE_SHIFT_PAE 
  58.122 -#define M2P_CHUNK_SIZE  (1 << M2P_SHIFT) 
  58.123 -#define M2P_SIZE(_m)    ROUNDUP(((_m) * sizeof(unsigned long)), M2P_SHIFT) 
  58.124 +#define M2P_SHIFT       L2_PAGETABLE_SHIFT_PAE
  58.125 +#define M2P_CHUNK_SIZE  (1 << M2P_SHIFT)
  58.126 +#define M2P_SIZE(_m)    ROUNDUP(((_m) * sizeof(unsigned long)), M2P_SHIFT)
  58.127  #define M2P_CHUNKS(_m)  (M2P_SIZE((_m)) >> M2P_SHIFT)
  58.128  
  58.129  /* Size in bytes of the P2M (rounded up to the nearest PAGE_SIZE bytes) */
  58.130 -#define P2M_SIZE        ROUNDUP((max_pfn * sizeof(unsigned long)), PAGE_SHIFT) 
  58.131 +#define P2M_SIZE        ROUNDUP((max_pfn * sizeof(unsigned long)), PAGE_SHIFT)
  58.132  
  58.133  /* Number of unsigned longs in a page */
  58.134  #define ulpp            (PAGE_SIZE/sizeof(unsigned long))
  58.135 @@ -127,12 +127,12 @@ static int get_platform_info(int xc_hand
  58.136  #define NR_SLACK_ENTRIES   ((8 * 1024 * 1024) / PAGE_SIZE)
  58.137  
  58.138  /* Is the given PFN within the 'slack' region at the top of the P2M? */
  58.139 -#define IS_REAL_PFN(_pfn)  ((max_pfn - (_pfn)) > NR_SLACK_ENTRIES) 
  58.140 +#define IS_REAL_PFN(_pfn)  ((max_pfn - (_pfn)) > NR_SLACK_ENTRIES)
  58.141  
  58.142  /* Returns TRUE if the PFN is currently mapped */
  58.143  #define is_mapped(pfn_type) (!((pfn_type) & 0x80000000UL))
  58.144  
  58.145 -#define INVALID_P2M_ENTRY   (~0UL) 
  58.146 +#define INVALID_P2M_ENTRY   (~0UL)
  58.147  
  58.148  
  58.149  
    59.1 --- a/tools/misc/xen-clone	Sat Apr 15 19:25:09 2006 +0100
    59.2 +++ b/tools/misc/xen-clone	Sat Apr 15 19:25:21 2006 +0100
    59.3 @@ -113,7 +113,7 @@ else
    59.4  
    59.5   # Turn linux into xenolinux then build it
    59.6   cd xenolinux-${LINUX_VER}-sparse
    59.7 - ./mkbuildtree ../../linux-${LINUX_VER}
    59.8 + bash ./mkbuildtree ../../linux-${LINUX_VER}
    59.9   cd ../..
   59.10   mv linux-${LINUX_VER} xenolinux-${LINUX_VER}
   59.11   cd xenolinux-${LINUX_VER}
    60.1 --- a/tools/python/xen/lowlevel/xs/xs.c	Sat Apr 15 19:25:09 2006 +0100
    60.2 +++ b/tools/python/xen/lowlevel/xs/xs.c	Sat Apr 15 19:25:21 2006 +0100
    60.3 @@ -589,7 +589,7 @@ static PyObject *xspy_transaction_end(Xs
    60.4  
    60.5  static PyObject *xspy_introduce_domain(XsHandle *self, PyObject *args)
    60.6  {
    60.7 -    domid_t dom;
    60.8 +    uint32_t dom;
    60.9      unsigned long page;
   60.10      unsigned int port;
   60.11  
   60.12 @@ -620,7 +620,7 @@ static PyObject *xspy_introduce_domain(X
   60.13  
   60.14  static PyObject *xspy_release_domain(XsHandle *self, PyObject *args)
   60.15  {
   60.16 -    domid_t dom;
   60.17 +    uint32_t dom;
   60.18  
   60.19      struct xs_handle *xh = xshandle(self);
   60.20      bool result = 0;
   60.21 @@ -677,7 +677,7 @@ static PyObject *xspy_close(XsHandle *se
   60.22  static PyObject *xspy_get_domain_path(XsHandle *self, PyObject *args)
   60.23  {
   60.24      struct xs_handle *xh = xshandle(self);
   60.25 -    int domid;
   60.26 +    uint32_t domid;
   60.27      char *xsval;
   60.28  
   60.29      if (!xh)
    61.1 --- a/tools/python/xen/xend/XendCheckpoint.py	Sat Apr 15 19:25:09 2006 +0100
    61.2 +++ b/tools/python/xen/xend/XendCheckpoint.py	Sat Apr 15 19:25:21 2006 +0100
    61.3 @@ -53,7 +53,7 @@ def read_exact(fd, size, errmsg):
    61.4  
    61.5  
    61.6  
    61.7 -def save(fd, dominfo, live):
    61.8 +def save(fd, dominfo, live, dst):
    61.9      write_exact(fd, SIGNATURE, "could not write guest state file: signature")
   61.10  
   61.11      config = sxp.to_string(dominfo.sxpr())
   61.12 @@ -65,6 +65,8 @@ def save(fd, dominfo, live):
   61.13      dominfo.setName('migrating-' + domain_name)
   61.14  
   61.15      try:
   61.16 +        dominfo.migrateDevices(live, dst, 1, domain_name)
   61.17 +
   61.18          write_exact(fd, pack("!i", len(config)),
   61.19                      "could not write guest state file: config len")
   61.20          write_exact(fd, config, "could not write guest state file: config")
   61.21 @@ -85,7 +87,9 @@ def save(fd, dominfo, live):
   61.22                  log.debug("Suspending %d ...", dominfo.getDomid())
   61.23                  dominfo.shutdown('suspend')
   61.24                  dominfo.waitForShutdown()
   61.25 +                dominfo.migrateDevices(live, dst, 2, domain_name)
   61.26                  log.info("Domain %d suspended.", dominfo.getDomid())
   61.27 +                dominfo.migrateDevices(live, dst, 3, domain_name)
   61.28                  tochild.write("done\n")
   61.29                  tochild.flush()
   61.30                  log.debug('Written done')
    62.1 --- a/tools/python/xen/xend/XendDomain.py	Sat Apr 15 19:25:09 2006 +0100
    62.2 +++ b/tools/python/xen/xend/XendDomain.py	Sat Apr 15 19:25:21 2006 +0100
    62.3 @@ -405,6 +405,9 @@ class XendDomain:
    62.4          if dominfo.getDomid() == PRIV_DOMAIN:
    62.5              raise XendError("Cannot migrate privileged domain %i" % domid)
    62.6  
    62.7 +        """ The following call may raise a XendError exception """
    62.8 +        dominfo.testMigrateDevices(live, dst)
    62.9 +
   62.10          if port == 0:
   62.11              port = xroot.get_xend_relocation_port()
   62.12          try:
   62.13 @@ -414,8 +417,8 @@ class XendDomain:
   62.14              raise XendError("can't connect: %s" % err[1])
   62.15  
   62.16          sock.send("receive\n")
   62.17 -        sock.recv(80) 
   62.18 -        XendCheckpoint.save(sock.fileno(), dominfo, live)
   62.19 +        sock.recv(80)
   62.20 +        XendCheckpoint.save(sock.fileno(), dominfo, live, dst)
   62.21  
   62.22  
   62.23      def domain_save(self, domid, dst):
   62.24 @@ -435,7 +438,7 @@ class XendDomain:
   62.25              fd = os.open(dst, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
   62.26              try:
   62.27                  # For now we don't support 'live checkpoint' 
   62.28 -                return XendCheckpoint.save(fd, dominfo, False)
   62.29 +                return XendCheckpoint.save(fd, dominfo, False, dst)
   62.30              finally:
   62.31                  os.close(fd)
   62.32          except OSError, ex:
    63.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Sat Apr 15 19:25:09 2006 +0100
    63.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Sat Apr 15 19:25:21 2006 +0100
    63.3 @@ -1395,6 +1395,38 @@ class XendDomainInfo:
    63.4          if self.image:
    63.5              self.image.createDeviceModel()
    63.6  
    63.7 +    ## public:
    63.8 +
    63.9 +    def testMigrateDevices(self, live, dst):
   63.10 +        """ Notify all device about intention of migration
   63.11 +        @raise: XendError for a device that cannot be migrated
   63.12 +        """
   63.13 +        for (n, c) in self.info['device']:
   63.14 +            rc = self.migrateDevice(n, c, live, dst, 0)
   63.15 +            if rc != 0:
   63.16 +                raise XendError("Device of type '%s' refuses migration." % n)
   63.17 +
   63.18 +    def migrateDevices(self, live, dst, step, domName=''):
   63.19 +        """Notify the devices about migration
   63.20 +        """
   63.21 +        ctr = 0
   63.22 +        try:
   63.23 +            for (n, c) in self.info['device']:
   63.24 +                self.migrateDevice(n, c, live, dst, step, domName)
   63.25 +                ctr = ctr + 1
   63.26 +        except:
   63.27 +            for (n, c) in self.info['device']:
   63.28 +                if ctr == 0:
   63.29 +                    step = step - 1
   63.30 +                ctr = ctr - 1
   63.31 +                self.recoverMigrateDevice(n, c, live, dst, step, domName)
   63.32 +            raise
   63.33 +
   63.34 +    def migrateDevice(self, deviceClass, deviceConfig, live, dst, step, domName=''):
   63.35 +        return self.getDeviceController(deviceClass).migrate(deviceConfig, live, dst, step, domName)
   63.36 +
   63.37 +    def recoverMigrateDevice(self, deviceClass, deviceConfig, live, dst, step, domName=''):
   63.38 +        return self.getDeviceController(deviceClass).recover_migrate(deviceConfig, live, dst, step, domName)
   63.39  
   63.40      def waitForDevices(self):
   63.41          """Wait for this domain's configured devices to connect.
    64.1 --- a/tools/python/xen/xend/XendRoot.py	Sat Apr 15 19:25:09 2006 +0100
    64.2 +++ b/tools/python/xen/xend/XendRoot.py	Sat Apr 15 19:25:21 2006 +0100
    64.3 @@ -86,6 +86,9 @@ class XendRoot:
    64.4      server (deprecated)."""
    64.5      xend_unix_server_default = 'no'
    64.6  
    64.7 +    """Default external migration tool """
    64.8 +    external_migration_tool_default = ''
    64.9 +
   64.10      """Default path the unix-domain server listens at."""
   64.11      xend_unix_path_default = '/var/lib/xend/xend-socket'
   64.12  
   64.13 @@ -250,6 +253,9 @@ class XendRoot:
   64.14          else:
   64.15              return None
   64.16  
   64.17 +    def get_external_migration_tool(self):
   64.18 +        """@return the name of the tool to handle virtual TPM migration."""
   64.19 +        return self.get_config_value('external-migration-tool', self.external_migration_tool_default)
   64.20  
   64.21      def get_enable_dump(self):
   64.22          return self.get_config_bool('enable-dump', 'no')
    65.1 --- a/tools/python/xen/xend/server/DevController.py	Sat Apr 15 19:25:09 2006 +0100
    65.2 +++ b/tools/python/xen/xend/server/DevController.py	Sat Apr 15 19:25:21 2006 +0100
    65.3 @@ -267,6 +267,41 @@ class DevController:
    65.4  
    65.5          raise NotImplementedError()
    65.6  
    65.7 +    def migrate(self, deviceConfig, live, dst, step, domName):
    65.8 +        """ Migration of a device. The 'live' parameter indicates
    65.9 +            whether the device is live-migrated (live=1). 'dst' then gives
   65.10 +            the hostname of the machine to migrate to.
   65.11 +        This function is called for 4 steps:
   65.12 +        If step == 0: Check whether the device is ready to be migrated
   65.13 +                      or can at all be migrated; return a '-1' if
   65.14 +                      the device is NOT ready, a '0' otherwise. If it is
   65.15 +                      not ready ( = not possible to migrate this device),
   65.16 +                      migration will not take place.
   65.17 +           step == 1: Called immediately after step 0; migration
   65.18 +                      of the kernel has started;
   65.19 +           step == 2: Called after the suspend has been issued
   65.20 +                      to the domain and the domain is not scheduled anymore.
   65.21 +                      Synchronize with what was started in step 1, if necessary.
   65.22 +                      Now the device should initiate its transfer to the
   65.23 +                      given target. Since there might be more than just
   65.24 +                      one device initiating a migration, this step should
   65.25 +                      put the process performing the transfer into the
   65.26 +                      background and return immediately to achieve as much
   65.27 +                      concurrency as possible.
   65.28 +           step == 3: Synchronize with the migration of the device that
   65.29 +                      was initiated in step 2.
   65.30 +                      Make sure that the migration has finished and only
   65.31 +                      then return from the call.
   65.32 +        """
   65.33 +        return 0
   65.34 +
   65.35 +
   65.36 +    def recover_migrate(self, deviceConfig, list, dst, step, domName):
   65.37 +        """ Recover from device migration. The given step was the
   65.38 +            last one that was successfully executed.
   65.39 +        """
   65.40 +        return 0
   65.41 +
   65.42  
   65.43      def getDomid(self):
   65.44          """Stub to {@link XendDomainInfo.getDomid}, for use by our
    66.1 --- a/tools/python/xen/xend/server/tpmif.py	Sat Apr 15 19:25:09 2006 +0100
    66.2 +++ b/tools/python/xen/xend/server/tpmif.py	Sat Apr 15 19:25:21 2006 +0100
    66.3 @@ -23,9 +23,17 @@
    66.4  
    66.5  from xen.xend import sxp
    66.6  from xen.xend.XendLogging import log
    66.7 +from xen.xend.XendError import XendError
    66.8 +from xen.xend import XendRoot
    66.9  
   66.10  from xen.xend.server.DevController import DevController
   66.11  
   66.12 +import os
   66.13 +import re
   66.14 +
   66.15 +
   66.16 +xroot = XendRoot.instance()
   66.17 +
   66.18  
   66.19  class TPMifController(DevController):
   66.20      """TPM interface controller. Handles all TPM devices for a domain.
   66.21 @@ -61,3 +69,43 @@ class TPMifController(DevController):
   66.22              result.append(['instance', instance])
   66.23  
   66.24          return result
   66.25 +
   66.26 +    def migrate(self, deviceConfig, live, dst, step, domName):
   66.27 +        """@see DevContoller.migrate"""
   66.28 +        if live:
   66.29 +            tool = xroot.get_external_migration_tool()
   66.30 +            if tool != '':
   66.31 +                log.info("Request to live-migrate device to %s. step=%d.",
   66.32 +                         dst, step)
   66.33 +
   66.34 +                if step == 0:
   66.35 +                    """Assuming for now that everything is ok and migration
   66.36 +                       with the given tool can proceed.
   66.37 +                    """
   66.38 +                    return 0
   66.39 +                else:
   66.40 +                    fd = os.popen("%s -type vtpm -step %d -host %s -domname %s" %
   66.41 +                                  (tool, step, dst, domName),
   66.42 +                                  'r')
   66.43 +                    for line in fd.readlines():
   66.44 +                        mo = re.search('Error', line)
   66.45 +                        if mo:
   66.46 +                            raise XendError("vtpm: Fatal error in migration step %d." %
   66.47 +                                            step)
   66.48 +                    return 0
   66.49 +            else:
   66.50 +                log.debug("External migration tool not in configuration.")
   66.51 +                return -1
   66.52 +        return 0
   66.53 +
   66.54 +    def recover_migrate(self, deviceConfig, live, dst, step, domName):
   66.55 +        """@see DevContoller.recover_migrate"""
   66.56 +        if live:
   66.57 +            tool = xroot.get_external_migration_tool()
   66.58 +            if tool != '':
   66.59 +                log.info("Request to recover live-migrated device. last good step=%d.",
   66.60 +                         step)
   66.61 +                fd = os.popen("%s -type vtpm -step %d -host %s -domname %s -recover" %
   66.62 +                              (tool, step, dst, domName),
   66.63 +                              'r')
   66.64 +        return 0
    67.1 --- a/tools/python/xen/xm/create.py	Sat Apr 15 19:25:09 2006 +0100
    67.2 +++ b/tools/python/xen/xm/create.py	Sat Apr 15 19:25:21 2006 +0100
    67.3 @@ -158,7 +158,7 @@ gopts.var('cpu', val='CPU',
    67.4            use="CPU to run the VCPU0 on.")
    67.5  
    67.6  gopts.var('cpus', val='CPUS',
    67.7 -          fn=set_int, default=None,
    67.8 +          fn=set_value, default=None,
    67.9            use="CPUS to run the domain on.")
   67.10  
   67.11  gopts.var('pae', val='PAE',
    68.1 --- a/tools/xenmon/README	Sat Apr 15 19:25:09 2006 +0100
    68.2 +++ b/tools/xenmon/README	Sat Apr 15 19:25:21 2006 +0100
    68.3 @@ -84,6 +84,16 @@ Usage Notes and issues
    68.4     events cause a trace record to be emitted.
    68.5   - To exit xenmon, type 'q'
    68.6   - To cycle the display to other physical cpu's, type 'c'
    68.7 + - The first time xenmon is run, it attempts to allocate xen trace buffers
    68.8 +   using a default size. If you wish to use a non-default value for the
    68.9 +   trace buffer size, run the 'setsize' program (located in tools/xentrace)
   68.10 +   and specify the number of memory pages as a parameter. The default is 20.
   68.11 + - Not well tested with domains using more than 1 virtual cpu
   68.12 + - If you create a lot of domains, or repeatedly kill a domain and restart it,
   68.13 +   and the domain id's get to be bigger than NDOMAINS, then xenmon behaves badly.
   68.14 +   This is a bug that is due to xenbaked's treatment of domain id's vs. domain
   68.15 +   indices in a data array. Will be fixed in a future release; Workaround:
   68.16 +   Increase NDOMAINS in xenbaked and rebuild.
   68.17  
   68.18  Future Work
   68.19  -----------
    69.1 --- a/tools/xenmon/xenbaked.c	Sat Apr 15 19:25:09 2006 +0100
    69.2 +++ b/tools/xenmon/xenbaked.c	Sat Apr 15 19:25:21 2006 +0100
    69.3 @@ -7,6 +7,7 @@
    69.4   *
    69.5   * Copyright (C) 2004 by Intel Research Cambridge
    69.6   * Copyright (C) 2005 by Hewlett Packard, Palo Alto and Fort Collins
    69.7 + * Copyright (C) 2006 by Hewlett Packard Fort Collins
    69.8   *
    69.9   * Authors: Diwaker Gupta, diwaker.gupta@hp.com
   69.10   *          Rob Gardner, rob.gardner@hp.com
   69.11 @@ -42,6 +43,8 @@
   69.12  #include <xenctrl.h>
   69.13  #include <xen/xen.h>
   69.14  #include <string.h>
   69.15 +#include <sys/select.h>
   69.16 +#include <xen/linux/evtchn.h>
   69.17  
   69.18  #include "xc_private.h"
   69.19  typedef struct { int counter; } atomic_t;
   69.20 @@ -81,6 +84,7 @@ settings_t opts;
   69.21  
   69.22  int interrupted = 0; /* gets set if we get a SIGHUP */
   69.23  int rec_count = 0;
   69.24 +int wakeups = 0;
   69.25  time_t start_time;
   69.26  int dom0_flips = 0;
   69.27  
   69.28 @@ -88,8 +92,6 @@ int dom0_flips = 0;
   69.29  _new_qos_data **cpu_qos_data;
   69.30  
   69.31  
   69.32 -#define ID(X) ((X>NDOMAINS-1)?(NDOMAINS-1):X)
   69.33 -
   69.34  // array of currently running domains, indexed by cpu
   69.35  int *running = NULL;
   69.36  
   69.37 @@ -223,6 +225,9 @@ void dump_stats(void)
   69.38      printf("processed %d total records in %d seconds (%ld per second)\n",
   69.39              rec_count, (int)run_time, rec_count/run_time);
   69.40  
   69.41 +    printf("woke up %d times in %d seconds (%ld per second)\n", wakeups,
   69.42 +	   (int) run_time, wakeups/run_time);
   69.43 +
   69.44      check_gotten_sum();
   69.45  }
   69.46  
   69.47 @@ -243,6 +248,112 @@ void log_event(int event_id)
   69.48          stat_map[0].event_count++;	// other
   69.49  }
   69.50  
   69.51 +#define EVTCHN_DEV_NAME  "/dev/xen/evtchn"
   69.52 +#define EVTCHN_DEV_MAJOR 10
   69.53 +#define EVTCHN_DEV_MINOR 201
   69.54 +
   69.55 +int virq_port;
   69.56 +int eventchn_fd = -1;
   69.57 +
   69.58 +/* Returns the event channel handle. */
   69.59 +/* Stolen from xenstore code */
   69.60 +int eventchn_init(void)
   69.61 +{
   69.62 +  struct stat st;
   69.63 +  struct ioctl_evtchn_bind_virq bind;
   69.64 +  int rc;
   69.65 +  
   69.66 +  // to revert to old way:
   69.67 +  if (0)
   69.68 +    return -1;
   69.69 +  
   69.70 +  /* Make sure any existing device file links to correct device. */
   69.71 +  if ((lstat(EVTCHN_DEV_NAME, &st) != 0) || !S_ISCHR(st.st_mode) ||
   69.72 +      (st.st_rdev != makedev(EVTCHN_DEV_MAJOR, EVTCHN_DEV_MINOR)))
   69.73 +    (void)unlink(EVTCHN_DEV_NAME);
   69.74 +  
   69.75 + reopen:
   69.76 +  eventchn_fd = open(EVTCHN_DEV_NAME, O_NONBLOCK|O_RDWR);
   69.77 +  if (eventchn_fd == -1) {
   69.78 +    if ((errno == ENOENT) &&
   69.79 +	((mkdir("/dev/xen", 0755) == 0) || (errno == EEXIST)) &&
   69.80 +	(mknod(EVTCHN_DEV_NAME, S_IFCHR|0600,
   69.81 +	       makedev(EVTCHN_DEV_MAJOR, EVTCHN_DEV_MINOR)) == 0))
   69.82 +      goto reopen;
   69.83 +    return -errno;
   69.84 +  }
   69.85 +  
   69.86 +  if (eventchn_fd < 0)
   69.87 +    perror("Failed to open evtchn device");
   69.88 +  
   69.89 +  bind.virq = VIRQ_TBUF;
   69.90 +  rc = ioctl(eventchn_fd, IOCTL_EVTCHN_BIND_VIRQ, &bind);
   69.91 +  if (rc == -1)
   69.92 +    perror("Failed to bind to domain exception virq port");
   69.93 +  virq_port = rc;
   69.94 +  
   69.95 +  return eventchn_fd;
   69.96 +}
   69.97 +
   69.98 +void wait_for_event(void)
   69.99 +{
  69.100 +  int ret;
  69.101 +  fd_set inset;
  69.102 +  evtchn_port_t port;
  69.103 +  struct timeval tv;
  69.104 +  
  69.105 +  if (eventchn_fd < 0) {
  69.106 +    nanosleep(&opts.poll_sleep, NULL);
  69.107 +    return;
  69.108 +  }
  69.109 +
  69.110 +  FD_ZERO(&inset);
  69.111 +  FD_SET(eventchn_fd, &inset);
  69.112 +  tv.tv_sec = 1;
  69.113 +  tv.tv_usec = 0;
  69.114 +  // tv = millis_to_timespec(&opts.poll_sleep);
  69.115 +  ret = select(eventchn_fd+1, &inset, NULL, NULL, &tv);
  69.116 +  
  69.117 +  if ( (ret == 1) && FD_ISSET(eventchn_fd, &inset)) {
  69.118 +    if (read(eventchn_fd, &port, sizeof(port)) != sizeof(port))
  69.119 +      perror("Failed to read from event fd");
  69.120 +    
  69.121 +    //    if (port == virq_port)
  69.122 +    //      printf("got the event I was looking for\r\n");
  69.123 +    
  69.124 +    if (write(eventchn_fd, &port, sizeof(port)) != sizeof(port))
  69.125 +      perror("Failed to write to event fd");
  69.126 +  }
  69.127 +}
  69.128 +
  69.129 +void enable_tracing_or_die(int xc_handle) 
  69.130 +{
  69.131 +  int enable = 1;
  69.132 +  int tbsize = DEFAULT_TBUF_SIZE;
  69.133 +  
  69.134 +  if (xc_tbuf_enable(xc_handle, enable) != 0) {
  69.135 +    if (xc_tbuf_set_size(xc_handle, tbsize) != 0) {
  69.136 +      perror("set_size Hypercall failure");
  69.137 +      exit(1);
  69.138 +    }
  69.139 +    printf("Set default trace buffer allocation (%d pages)\n", tbsize);
  69.140 +    if (xc_tbuf_enable(xc_handle, enable) != 0) {
  69.141 +      perror("Could not enable trace buffers\n");
  69.142 +      exit(1);
  69.143 +    }
  69.144 +  }
  69.145 +  else
  69.146 +    printf("Tracing enabled\n");
  69.147 +}
  69.148 +
  69.149 +void disable_tracing(void)
  69.150 +{
  69.151 +  int enable = 0;
  69.152 +  int xc_handle = xc_interface_open();
  69.153 +    
  69.154 +  xc_tbuf_enable(xc_handle, enable);
  69.155 +  xc_interface_close(xc_handle);
  69.156 +}
  69.157  
  69.158  
  69.159  /**
  69.160 @@ -258,6 +369,17 @@ void get_tbufs(unsigned long *mfn, unsig
  69.161      int ret;
  69.162      dom0_op_t op;                        /* dom0 op we'll build             */
  69.163      int xc_handle = xc_interface_open(); /* for accessing control interface */
  69.164 +    unsigned int tbsize;
  69.165 +
  69.166 +    enable_tracing_or_die(xc_handle);
  69.167 +
  69.168 +    if (xc_tbuf_get_size(xc_handle, &tbsize) != 0) {
  69.169 +      perror("Failure to get tbuf info from Xen. Guess size is 0?");
  69.170 +      exit(1);
  69.171 +    }
  69.172 +    else
  69.173 +      printf("Current tbuf size: 0x%x\n", tbsize);
  69.174 +    
  69.175  
  69.176      op.cmd = DOM0_TBUFCONTROL;
  69.177      op.interface_version = DOM0_INTERFACE_VERSION;
  69.178 @@ -448,6 +570,11 @@ int monitor_tbufs(void)
  69.179      meta  = init_bufs_ptrs (tbufs_mapped, num, size);
  69.180      data  = init_rec_ptrs(meta, num);
  69.181  
  69.182 +    // Set up event channel for select()
  69.183 +    if (eventchn_init() < 0) {
  69.184 +      fprintf(stderr, "Failed to initialize event channel; Using POLL method\r\n");
  69.185 +    }
  69.186 +
  69.187      /* now, scan buffers for events */
  69.188      while ( !interrupted )
  69.189      {
  69.190 @@ -460,7 +587,8 @@ int monitor_tbufs(void)
  69.191                  meta[i]->cons++;
  69.192              }
  69.193  
  69.194 -        nanosleep(&opts.poll_sleep, NULL);
  69.195 +	wait_for_event();
  69.196 +	wakeups++;
  69.197      }
  69.198  
  69.199      /* cleanup */
  69.200 @@ -640,6 +768,7 @@ int main(int argc, char **argv)
  69.201  
  69.202      dump_stats();
  69.203      msync(new_qos, sizeof(_new_qos_data), MS_SYNC);
  69.204 +    disable_tracing();
  69.205  
  69.206      return ret;
  69.207  }
  69.208 @@ -737,7 +866,9 @@ void qos_update_thread(int cpu, int domi
  69.209          start = new_qos->domain_info[id].start_time;
  69.210          if (start > now) {		// wrapped around
  69.211              run_time = now + (~0ULL - start);
  69.212 -	    printf("warning: start > now\n");
  69.213 +	    // this could happen if there is nothing going on within a cpu;
  69.214 +	    // in this case the idle domain would run forever
  69.215 +	    //        printf("warning: start > now\n");
  69.216          }
  69.217          else
  69.218              run_time = now - start;
  69.219 @@ -746,11 +877,11 @@ void qos_update_thread(int cpu, int domi
  69.220          new_qos->domain_info[id].ns_oncpu_since_boot += run_time;
  69.221          new_qos->domain_info[id].start_time = now;
  69.222          new_qos->domain_info[id].ns_since_boot += time_since_update;
  69.223 -#if 1
  69.224 +
  69.225  	new_qos->qdata[n].ns_gotten[id] += run_time;
  69.226 -	if (domid == 0 && cpu == 1)
  69.227 -	  printf("adding run time for dom0 on cpu1\r\n");
  69.228 -#endif
  69.229 +	//	if (domid == 0 && cpu == 1)
  69.230 +	//	  printf("adding run time for dom0 on cpu1\r\n");
  69.231 +
  69.232      }
  69.233  
  69.234      new_qos->domain_info[id].runnable_at_last_update = domain_runnable(domid);
  69.235 @@ -916,13 +1047,13 @@ void qos_state_runnable(int cpu, int dom
  69.236  {
  69.237      int id = ID(domid);
  69.238  
  69.239 +    qos_update_thread_stats(cpu, domid, now);
  69.240 +
  69.241      if (domain_runnable(id))	// double call?
  69.242          return;
  69.243      new_qos->domain_info[id].runnable = 1;
  69.244      update_blocked_time(domid, now);
  69.245  
  69.246 -    qos_update_thread_stats(cpu, domid, now);
  69.247 -
  69.248      new_qos->domain_info[id].blocked_start_time = 0; /* invalidate */
  69.249      new_qos->domain_info[id].runnable_start_time = now;
  69.250      //  runnable_start_time[id] = now;
  69.251 @@ -951,7 +1082,7 @@ int domain_ok(int cpu, int domid, uint64
  69.252      if (domid == IDLE_DOMAIN_ID)
  69.253          domid = NDOMAINS-1;
  69.254      if (domid < 0 || domid >= NDOMAINS) {
  69.255 -        printf("bad domain id: %d\n", domid);
  69.256 +        printf("bad domain id: %d\r\n", domid);
  69.257          return 0;
  69.258      }
  69.259      if (new_qos->domain_info[domid].in_use == 0)
    70.1 --- a/tools/xenmon/xenbaked.h	Sat Apr 15 19:25:09 2006 +0100
    70.2 +++ b/tools/xenmon/xenbaked.h	Sat Apr 15 19:25:21 2006 +0100
    70.3 @@ -1,5 +1,5 @@
    70.4  /******************************************************************************
    70.5 - * tools/xenbaked.h
    70.6 + * TOOLS/xenbaked.h
    70.7   *
    70.8   * Header file for xenbaked
    70.9   *
   70.10 @@ -30,6 +30,7 @@
   70.11  #define million 1000000LL
   70.12  #define billion 1000000000LL
   70.13  
   70.14 +// caution: don't use QOS_ADD with negative numbers!
   70.15  #define QOS_ADD(N,A) ((N+A)<(NSAMPLES-1) ? (N+A) : A)
   70.16  #define QOS_INCR(N) ((N<(NSAMPLES-2)) ? (N+1) : 0)
   70.17  #define QOS_DECR(N) ((N==0) ? (NSAMPLES-1) : (N-1))
   70.18 @@ -43,6 +44,8 @@
   70.19  /* Number of data points to keep */
   70.20  #define NSAMPLES 100
   70.21  
   70.22 +#define ID(X) ((X>NDOMAINS-1)?(NDOMAINS-1):X)
   70.23 +#define DEFAULT_TBUF_SIZE 20
   70.24  
   70.25  // per domain stuff
   70.26  typedef struct 
    71.1 --- a/tools/xenmon/xenmon.py	Sat Apr 15 19:25:09 2006 +0100
    71.2 +++ b/tools/xenmon/xenmon.py	Sat Apr 15 19:25:21 2006 +0100
    71.3 @@ -5,7 +5,7 @@
    71.4  # There is a curses interface for live monitoring. XenMon also allows
    71.5  # logging to a file. For options, run python xenmon.py -h
    71.6  #
    71.7 -# Copyright (C) 2005 by Hewlett Packard, Palo Alto and Fort Collins
    71.8 +# Copyright (C) 2005,2006 by Hewlett Packard, Palo Alto and Fort Collins
    71.9  # Authors: Lucy Cherkasova, lucy.cherkasova@hp.com
   71.10  #          Rob Gardner, rob.gardner@hp.com
   71.11  #          Diwaker Gupta, diwaker.gupta@hp.com
   71.12 @@ -85,6 +85,33 @@ def setup_cmdline_parser():
   71.13      parser.add_option("--ms_per_sample", dest="mspersample",
   71.14              action="store", type="int", default=100,
   71.15              help = "determines how many ms worth of data goes in a sample")
   71.16 +    parser.add_option("--cpu", dest="cpu", action="store", type="int", default=0,
   71.17 +            help = "specifies which cpu to display data for")
   71.18 +
   71.19 +    parser.add_option("--allocated", dest="allocated", action="store_true",
   71.20 +                      default=False, help="Display allocated time for each domain")
   71.21 +    parser.add_option("--noallocated", dest="allocated", action="store_false",
   71.22 +                      default=False, help="Don't display allocated time for each domain")
   71.23 +
   71.24 +    parser.add_option("--blocked", dest="blocked", action="store_true",
   71.25 +                      default=True, help="Display blocked time for each domain")
   71.26 +    parser.add_option("--noblocked", dest="blocked", action="store_false",
   71.27 +                      default=True, help="Don't display blocked time for each domain")
   71.28 +
   71.29 +    parser.add_option("--waited", dest="waited", action="store_true",
   71.30 +                      default=True, help="Display waiting time for each domain")
   71.31 +    parser.add_option("--nowaited", dest="waited", action="store_false",
   71.32 +                      default=True, help="Don't display waiting time for each domain")
   71.33 +
   71.34 +    parser.add_option("--excount", dest="excount", action="store_true",
   71.35 +                      default=False, help="Display execution count for each domain")
   71.36 +    parser.add_option("--noexcount", dest="excount", action="store_false",
   71.37 +                      default=False, help="Don't display execution count for each domain")
   71.38 +    parser.add_option("--iocount", dest="iocount", action="store_true",
   71.39 +                      default=False, help="Display I/O count for each domain")
   71.40 +    parser.add_option("--noiocount", dest="iocount", action="store_false",
   71.41 +                      default=False, help="Don't display I/O count for each domain")
   71.42 +
   71.43      return parser
   71.44  
   71.45  # encapsulate information about a domain
   71.46 @@ -227,20 +254,18 @@ def display(scr, row, col, str, attr=0):
   71.47  
   71.48  
   71.49  # the live monitoring code
   71.50 -def show_livestats():
   71.51 -    cpu = 0          # cpu of interest to display data for
   71.52 +def show_livestats(cpu):
   71.53      ncpu = 1         # number of cpu's on this platform
   71.54      slen = 0         # size of shared data structure, incuding padding
   71.55 -    global dom_in_use
   71.56 +    cpu_1sec_usage = 0.0
   71.57 +    cpu_10sec_usage = 0.0
   71.58 +    heartbeat = 1
   71.59 +    global dom_in_use, options
   71.60      
   71.61      # mmap the (the first chunk of the) file
   71.62      shmf = open(SHM_FILE, "r+")
   71.63      shm = mmap.mmap(shmf.fileno(), QOS_DATA_SIZE)
   71.64  
   71.65 -    samples = []
   71.66 -    doms = []
   71.67 -    dom_in_use = []
   71.68 -
   71.69      # initialize curses
   71.70      stdscr = _c.initscr()
   71.71      _c.noecho()
   71.72 @@ -253,7 +278,8 @@ def show_livestats():
   71.73      # display in a loop
   71.74      while True:
   71.75  
   71.76 -        for cpuidx in range(0, ncpu):
   71.77 +        cpuidx = 0
   71.78 +        while cpuidx < ncpu:
   71.79  
   71.80              # calculate offset in mmap file to start from
   71.81              idx = cpuidx * slen
   71.82 @@ -261,6 +287,7 @@ def show_livestats():
   71.83  
   71.84              samples = []
   71.85              doms = []
   71.86 +            dom_in_use = []
   71.87  
   71.88              # read in data
   71.89              for i in range(0, NSAMPLES):
   71.90 @@ -279,6 +306,8 @@ def show_livestats():
   71.91  #		dom_in_use.append(in_use)
   71.92                  dom_in_use.append(dom[8])
   71.93                  idx += len
   71.94 +#            print "dom_in_use(cpu=%d): " % cpuidx, dom_in_use
   71.95 +
   71.96  
   71.97              len = struct.calcsize("4i")
   71.98              oldncpu = ncpu
   71.99 @@ -295,6 +324,8 @@ def show_livestats():
  71.100              if cpuidx == cpu:
  71.101                  break
  71.102  
  71.103 +            cpuidx = cpuidx + 1
  71.104 +
  71.105          # calculate starting and ending datapoints; never look at "next" since
  71.106          # it represents live data that may be in transition. 
  71.107          startat = next - 1
  71.108 @@ -312,13 +343,16 @@ def show_livestats():
  71.109          row = 0
  71.110          display(stdscr, row, 1, "CPU = %d" % cpu, _c.A_STANDOUT)
  71.111  
  71.112 -        display(stdscr, row, 10, "%sLast 10 seconds%sLast 1 second" % (6*' ', 30*' '), _c.A_BOLD)
  71.113 +        display(stdscr, row, 10, "%sLast 10 seconds (%3.2f%%)%sLast 1 second (%3.2f%%)" % (6*' ', cpu_10sec_usage, 30*' ', cpu_1sec_usage), _c.A_BOLD)
  71.114          row +=1
  71.115          display(stdscr, row, 1, "%s" % ((maxx-2)*'='))
  71.116  
  71.117          total_h1_cpu = 0
  71.118          total_h2_cpu = 0
  71.119  
  71.120 +        cpu_1sec_usage = 0.0
  71.121 +        cpu_10sec_usage = 0.0
  71.122 +
  71.123          for dom in range(0, NDOMAINS):
  71.124              if not dom_in_use[dom]:
  71.125                  continue
  71.126 @@ -332,92 +366,102 @@ def show_livestats():
  71.127                  display(stdscr, row, col, "%s" % time_scale(h2[dom][0][0]))
  71.128                  col += 12
  71.129                  display(stdscr, row, col, "%3.2f%%" % h2[dom][0][1])
  71.130 +                if dom != NDOMAINS - 1:
  71.131 +                    cpu_10sec_usage += h2[dom][0][1]
  71.132                  col += 12
  71.133                  display(stdscr, row, col, "%s/ex" % time_scale(h2[dom][0][2]))
  71.134                  col += 18
  71.135                  display(stdscr, row, col, "%s" % time_scale(h1[dom][0][0]))
  71.136                  col += 12
  71.137 -                display(stdscr, row, col, "%3.2f%%" % h1[dom][0][1])
  71.138 +                display(stdscr, row, col, "%3.2f%%" % h1[dom][0][1], _c.A_STANDOUT)
  71.139                  col += 12
  71.140                  display(stdscr, row, col, "%s/ex" % time_scale(h1[dom][0][2]))
  71.141                  col += 18
  71.142                  display(stdscr, row, col, "Gotten")
  71.143 +
  71.144 +                if dom != NDOMAINS - 1:
  71.145 +                    cpu_1sec_usage = cpu_1sec_usage + h1[dom][0][1]
  71.146      
  71.147                  # display allocated
  71.148 -                row += 1
  71.149 -                col = 2
  71.150 -                display(stdscr, row, col, "%d" % dom)
  71.151 -                col += 28
  71.152 -                display(stdscr, row, col, "%s/ex" % time_scale(h2[dom][1]))
  71.153 -                col += 42
  71.154 -                display(stdscr, row, col, "%s/ex" % time_scale(h1[dom][1]))
  71.155 -                col += 18
  71.156 -                display(stdscr, row, col, "Allocated")
  71.157 +                if options.allocated:
  71.158 +                    row += 1
  71.159 +                    col = 2
  71.160 +                    display(stdscr, row, col, "%d" % dom)
  71.161 +                    col += 28
  71.162 +                    display(stdscr, row, col, "%s/ex" % time_scale(h2[dom][1]))
  71.163 +                    col += 42
  71.164 +                    display(stdscr, row, col, "%s/ex" % time_scale(h1[dom][1]))
  71.165 +                    col += 18
  71.166 +                    display(stdscr, row, col, "Allocated")
  71.167  
  71.168                  # display blocked
  71.169 -                row += 1
  71.170 -                col = 2
  71.171 -                display(stdscr, row, col, "%d" % dom)
  71.172 -                col += 4
  71.173 -                display(stdscr, row, col, "%s" % time_scale(h2[dom][2][0]))
  71.174 -                col += 12
  71.175 -                display(stdscr, row, col, "%3.2f%%" % h2[dom][2][1])
  71.176 -                col += 12
  71.177 -                display(stdscr, row, col, "%s/io" % time_scale(h2[dom][2][2]))
  71.178 -                col += 18
  71.179 -                display(stdscr, row, col, "%s" % time_scale(h1[dom][2][0]))
  71.180 -                col += 12
  71.181 -                display(stdscr, row, col, "%3.2f%%" % h1[dom][2][1])
  71.182 -                col += 12
  71.183 -                display(stdscr, row, col, "%s/io" % time_scale(h1[dom][2][2]))
  71.184 -                col += 18
  71.185 -                display(stdscr, row, col, "Blocked")
  71.186 +                if options.blocked:
  71.187 +                    row += 1
  71.188 +                    col = 2
  71.189 +                    display(stdscr, row, col, "%d" % dom)
  71.190 +                    col += 4
  71.191 +                    display(stdscr, row, col, "%s" % time_scale(h2[dom][2][0]))
  71.192 +                    col += 12
  71.193 +                    display(stdscr, row, col, "%3.2f%%" % h2[dom][2][1])
  71.194 +                    col += 12
  71.195 +                    display(stdscr, row, col, "%s/io" % time_scale(h2[dom][2][2]))
  71.196 +                    col += 18
  71.197 +                    display(stdscr, row, col, "%s" % time_scale(h1[dom][2][0]))
  71.198 +                    col += 12
  71.199 +                    display(stdscr, row, col, "%3.2f%%" % h1[dom][2][1])
  71.200 +                    col += 12
  71.201 +                    display(stdscr, row, col, "%s/io" % time_scale(h1[dom][2][2]))
  71.202 +                    col += 18
  71.203 +                    display(stdscr, row, col, "Blocked")
  71.204  
  71.205                  # display waited
  71.206 -                row += 1
  71.207 -                col = 2
  71.208 -                display(stdscr, row, col, "%d" % dom)
  71.209 -                col += 4
  71.210 -                display(stdscr, row, col, "%s" % time_scale(h2[dom][3][0]))
  71.211 -                col += 12
  71.212 -                display(stdscr, row, col, "%3.2f%%" % h2[dom][3][1])
  71.213 -                col += 12
  71.214 -                display(stdscr, row, col, "%s/ex" % time_scale(h2[dom][3][2]))
  71.215 -                col += 18
  71.216 -                display(stdscr, row, col, "%s" % time_scale(h1[dom][3][0]))
  71.217 -                col += 12
  71.218 -                display(stdscr, row, col, "%3.2f%%" % h1[dom][3][1])
  71.219 -                col += 12
  71.220 -                display(stdscr, row, col, "%s/ex" % time_scale(h1[dom][3][2]))
  71.221 -                col += 18
  71.222 -                display(stdscr, row, col, "Waited")
  71.223 +                if options.waited:
  71.224 +                    row += 1
  71.225 +                    col = 2
  71.226 +                    display(stdscr, row, col, "%d" % dom)
  71.227 +                    col += 4
  71.228 +                    display(stdscr, row, col, "%s" % time_scale(h2[dom][3][0]))
  71.229 +                    col += 12
  71.230 +                    display(stdscr, row, col, "%3.2f%%" % h2[dom][3][1])
  71.231 +                    col += 12
  71.232 +                    display(stdscr, row, col, "%s/ex" % time_scale(h2[dom][3][2]))
  71.233 +                    col += 18
  71.234 +                    display(stdscr, row, col, "%s" % time_scale(h1[dom][3][0]))
  71.235 +                    col += 12
  71.236 +                    display(stdscr, row, col, "%3.2f%%" % h1[dom][3][1])
  71.237 +                    col += 12
  71.238 +                    display(stdscr, row, col, "%s/ex" % time_scale(h1[dom][3][2]))
  71.239 +                    col += 18
  71.240 +                    display(stdscr, row, col, "Waited")
  71.241  
  71.242                  # display ex count
  71.243 -                row += 1
  71.244 -                col = 2
  71.245 -                display(stdscr, row, col, "%d" % dom)
  71.246 -
  71.247 -                col += 28
  71.248 -                display(stdscr, row, col, "%d/s" % h2[dom][4])
  71.249 -                col += 42
  71.250 -                display(stdscr, row, col, "%d" % h1[dom][4])
  71.251 -                col += 18
  71.252 -                display(stdscr, row, col, "Execution count")
  71.253 +                if options.excount:
  71.254 +                    row += 1
  71.255 +                    col = 2
  71.256 +                    display(stdscr, row, col, "%d" % dom)
  71.257 +                    
  71.258 +                    col += 28
  71.259 +                    display(stdscr, row, col, "%d/s" % h2[dom][4])
  71.260 +                    col += 42
  71.261 +                    display(stdscr, row, col, "%d" % h1[dom][4])
  71.262 +                    col += 18
  71.263 +                    display(stdscr, row, col, "Execution count")
  71.264  
  71.265                  # display io count
  71.266 -                row += 1
  71.267 -                col = 2
  71.268 -                display(stdscr, row, col, "%d" % dom)
  71.269 -                col += 4
  71.270 -                display(stdscr, row, col, "%d/s" % h2[dom][5][0])
  71.271 -                col += 24
  71.272 -                display(stdscr, row, col, "%d/ex" % h2[dom][5][1])
  71.273 -                col += 18
  71.274 -                display(stdscr, row, col, "%d" % h1[dom][5][0])
  71.275 -                col += 24
  71.276 -                display(stdscr, row, col, "%3.2f/ex" % h1[dom][5][1])
  71.277 -                col += 18
  71.278 -                display(stdscr, row, col, "I/O Count")
  71.279 +                if options.iocount:
  71.280 +                    row += 1
  71.281 +                    col = 2
  71.282 +                    display(stdscr, row, col, "%d" % dom)
  71.283 +                    col += 4
  71.284 +                    display(stdscr, row, col, "%d/s" % h2[dom][5][0])
  71.285 +                    col += 24
  71.286 +                    display(stdscr, row, col, "%d/ex" % h2[dom][5][1])
  71.287 +                    col += 18
  71.288 +                    display(stdscr, row, col, "%d" % h1[dom][5][0])
  71.289 +                    col += 24
  71.290 +                    display(stdscr, row, col, "%3.2f/ex" % h1[dom][5][1])
  71.291 +                    col += 18
  71.292 +                    display(stdscr, row, col, "I/O Count")
  71.293  
  71.294              #row += 1
  71.295              #stdscr.hline(row, 1, '-', maxx - 2)
  71.296 @@ -426,6 +470,9 @@ def show_livestats():
  71.297  
  71.298  
  71.299          row += 1
  71.300 +        star = heartbeat * '*'
  71.301 +        heartbeat = 1 - heartbeat
  71.302 +        display(stdscr, row, 1, star)
  71.303          display(stdscr, row, 2, TOTALS % (total_h2_cpu, total_h1_cpu))
  71.304          row += 1
  71.305  #        display(stdscr, row, 2, 
  71.306 @@ -515,11 +562,11 @@ def writelog():
  71.307          outfiles[dom].delayed_write("# passed cpu dom cpu(tot) cpu(%) cpu/ex allocated/ex blocked(tot) blocked(%) blocked/io waited(tot) waited(%) waited/ex ex/s io(tot) io/ex\n")
  71.308  
  71.309      while options.duration == 0 or interval < (options.duration * 1000):
  71.310 -        for cpuidx in range(0, ncpu):
  71.311 +        cpuidx = 0
  71.312 +        while cpuidx < ncpu:
  71.313  
  71.314              idx = cpuidx * slen      # offset needed in mmap file
  71.315  
  71.316 -
  71.317              samples = []
  71.318              doms = []
  71.319              dom_in_use = []
  71.320 @@ -571,6 +618,7 @@ def writelog():
  71.321              curr = time.time()
  71.322              interval += (curr - last) * 1000
  71.323              last = curr
  71.324 +            cpuidx = cpuidx + 1
  71.325          time.sleep(options.interval / 1000.0)
  71.326  
  71.327      for dom in range(0, NDOMAINS):
  71.328 @@ -601,7 +649,7 @@ def main():
  71.329      
  71.330      start_xenbaked()
  71.331      if options.live:
  71.332 -        show_livestats()
  71.333 +        show_livestats(options.cpu)
  71.334      else:
  71.335          try:
  71.336              writelog()
    72.1 --- a/tools/xenstore/xenstored_core.c	Sat Apr 15 19:25:09 2006 +0100
    72.2 +++ b/tools/xenstore/xenstored_core.c	Sat Apr 15 19:25:21 2006 +0100
    72.3 @@ -77,6 +77,10 @@ static void check_store(void);
    72.4  	} while (0)
    72.5  
    72.6  
    72.7 +int quota_nb_entry_per_domain = 1000;
    72.8 +int quota_nb_watch_per_domain = 128;
    72.9 +int quota_max_entry_size = 2048; /* 2K */
   72.10 +
   72.11  #ifdef TESTING
   72.12  static bool failtest = false;
   72.13  
   72.14 @@ -455,6 +459,10 @@ static bool write_node(struct connection
   72.15  	data.dsize = 3*sizeof(uint32_t)
   72.16  		+ node->num_perms*sizeof(node->perms[0])
   72.17  		+ node->datalen + node->childlen;
   72.18 +
   72.19 +	if (data.dsize >= quota_max_entry_size)
   72.20 +		goto error;
   72.21 +
   72.22  	data.dptr = talloc_size(node, data.dsize);
   72.23  	((uint32_t *)data.dptr)[0] = node->num_perms;
   72.24  	((uint32_t *)data.dptr)[1] = node->datalen;
   72.25 @@ -470,10 +478,12 @@ static bool write_node(struct connection
   72.26  	/* TDB should set errno, but doesn't even set ecode AFAICT. */
   72.27  	if (tdb_store(tdb_context(conn), key, data, TDB_REPLACE) != 0) {
   72.28  		corrupt(conn, "Write of %s = %s failed", key, data);
   72.29 -		errno = ENOSPC;
   72.30 -		return false;
   72.31 +		goto error;
   72.32  	}
   72.33  	return true;
   72.34 + error:
   72.35 +	errno = ENOSPC;
   72.36 +	return false;
   72.37  }
   72.38  
   72.39  static enum xs_perm_type perm_for_conn(struct connection *conn,
   72.40 @@ -765,8 +775,11 @@ static void delete_node_single(struct co
   72.41  	key.dptr = (void *)node->name;
   72.42  	key.dsize = strlen(node->name);
   72.43  
   72.44 -	if (tdb_delete(tdb_context(conn), key) != 0)
   72.45 +	if (tdb_delete(tdb_context(conn), key) != 0) {
   72.46  		corrupt(conn, "Could not delete '%s'", node->name);
   72.47 +		return;
   72.48 +	}
   72.49 +	domain_entry_dec(conn);
   72.50  }
   72.51  
   72.52  /* Must not be / */
   72.53 @@ -788,7 +801,10 @@ static struct node *construct_node(struc
   72.54  		parent = construct_node(conn, parentname);
   72.55  	if (!parent)
   72.56  		return NULL;
   72.57 -	
   72.58 +
   72.59 +	if (domain_entry(conn) >= quota_nb_entry_per_domain)
   72.60 +		return NULL;
   72.61 +
   72.62  	/* Add child to parent. */
   72.63  	base = basename(name);
   72.64  	baselen = strlen(base) + 1;
   72.65 @@ -814,6 +830,7 @@ static struct node *construct_node(struc
   72.66  	node->children = node->data = NULL;
   72.67  	node->childlen = node->datalen = 0;
   72.68  	node->parent = parent;
   72.69 +	domain_entry_inc(conn);
   72.70  	return node;
   72.71  }
   72.72  
   72.73 @@ -848,8 +865,10 @@ static struct node *create_node(struct c
   72.74  	/* We write out the nodes down, setting destructor in case
   72.75  	 * something goes wrong. */
   72.76  	for (i = node; i; i = i->parent) {
   72.77 -		if (!write_node(conn, i))
   72.78 +		if (!write_node(conn, i)) {
   72.79 +			domain_entry_dec(conn);
   72.80  			return NULL;
   72.81 +		}
   72.82  		talloc_set_destructor(i, destroy_node);
   72.83  	}
   72.84  
   72.85 @@ -1706,6 +1725,9 @@ static void usage(void)
   72.86  "  --no-fork           to request that the daemon does not fork,\n"
   72.87  "  --output-pid        to request that the pid of the daemon is output,\n"
   72.88  "  --trace-file <file> giving the file for logging, and\n"
   72.89 +"  --entry-nb <nb>     limit the number of entries per domain,\n"
   72.90 +"  --entry-size <size> limit the size of entry per domain, and\n"
   72.91 +"  --entry-watch <nb>  limit the number of watches per domain,\n"
   72.92  "  --no-recovery       to request that no recovery should be attempted when\n"
   72.93  "                      the store is corrupted (debug only),\n"
   72.94  "  --preserve-local    to request that /local is preserved on start-up,\n"
   72.95 @@ -1715,14 +1737,17 @@ static void usage(void)
   72.96  
   72.97  static struct option options[] = {
   72.98  	{ "no-domain-init", 0, NULL, 'D' },
   72.99 +	{ "entry-nb", 1, NULL, 'E' },
  72.100  	{ "pid-file", 1, NULL, 'F' },
  72.101  	{ "help", 0, NULL, 'H' },
  72.102  	{ "no-fork", 0, NULL, 'N' },
  72.103  	{ "output-pid", 0, NULL, 'P' },
  72.104 +	{ "entry-size", 1, NULL, 'S' },
  72.105  	{ "trace-file", 1, NULL, 'T' },
  72.106  	{ "no-recovery", 0, NULL, 'R' },
  72.107  	{ "preserve-local", 0, NULL, 'L' },
  72.108  	{ "verbose", 0, NULL, 'V' },
  72.109 +	{ "watch-nb", 1, NULL, 'W' },
  72.110  	{ NULL, 0, NULL, 0 } };
  72.111  
  72.112  extern void dump_conn(struct connection *conn); 
  72.113 @@ -1737,12 +1762,15 @@ int main(int argc, char *argv[])
  72.114  	bool no_domain_init = false;
  72.115  	const char *pidfile = NULL;
  72.116  
  72.117 -	while ((opt = getopt_long(argc, argv, "DF:HNPT:RLV", options,
  72.118 +	while ((opt = getopt_long(argc, argv, "DE:F:HNPS:T:RLVW:", options,
  72.119  				  NULL)) != -1) {
  72.120  		switch (opt) {
  72.121  		case 'D':
  72.122  			no_domain_init = true;
  72.123  			break;
  72.124 +		case 'E':
  72.125 +			quota_nb_entry_per_domain = strtol(optarg, NULL, 10);
  72.126 +			break;
  72.127  		case 'F':
  72.128  			pidfile = optarg;
  72.129  			break;
  72.130 @@ -1761,12 +1789,18 @@ int main(int argc, char *argv[])
  72.131  		case 'L':
  72.132  			remove_local = false;
  72.133  			break;
  72.134 +		case 'S':
  72.135 +			quota_max_entry_size = strtol(optarg, NULL, 10);
  72.136 +			break;
  72.137  		case 'T':
  72.138  			tracefile = optarg;
  72.139  			break;
  72.140  		case 'V':
  72.141  			verbose = true;
  72.142  			break;
  72.143 +		case 'W':
  72.144 +			quota_nb_watch_per_domain = strtol(optarg, NULL, 10);
  72.145 +			break;
  72.146  		}
  72.147  	}
  72.148  	if (optind != argc)
    73.1 --- a/tools/xenstore/xenstored_domain.c	Sat Apr 15 19:25:09 2006 +0100
    73.2 +++ b/tools/xenstore/xenstored_domain.c	Sat Apr 15 19:25:21 2006 +0100
    73.3 @@ -74,6 +74,12 @@ struct domain
    73.4  
    73.5  	/* Have we noticed that this domain is shutdown? */
    73.6  	int shutdown;
    73.7 +
    73.8 +	/* number of entry from this domain in the store */
    73.9 +	int nbentry;
   73.10 +
   73.11 +	/* number of watch for this domain */
   73.12 +	int nbwatch;
   73.13  };
   73.14  
   73.15  static LIST_HEAD(domains);
   73.16 @@ -285,6 +291,8 @@ static struct domain *new_domain(void *c
   73.17  	domain->conn->id = domid;
   73.18  
   73.19  	domain->remote_port = port;
   73.20 +	domain->nbentry = 0;
   73.21 +	domain->nbwatch = 0;
   73.22  
   73.23  	return domain;
   73.24  }
   73.25 @@ -562,6 +570,50 @@ int domain_init(void)
   73.26  	return eventchn_fd;
   73.27  }
   73.28  
   73.29 +void domain_entry_inc(struct connection *conn)
   73.30 +{
   73.31 +	if (!conn || !conn->domain)
   73.32 +		return;
   73.33 +	conn->domain->nbentry++;
   73.34 +}
   73.35 +
   73.36 +void domain_entry_dec(struct connection *conn)
   73.37 +{
   73.38 +	if (!conn || !conn->domain)
   73.39 +		return;
   73.40 +	if (conn->domain->nbentry)
   73.41 +		conn->domain->nbentry--;
   73.42 +}
   73.43 +
   73.44 +int domain_entry(struct connection *conn)
   73.45 +{
   73.46 +	return (conn && conn->domain && conn->domain->domid)
   73.47 +		? conn->domain->nbentry
   73.48 +		: 0;
   73.49 +}
   73.50 +
   73.51 +void domain_watch_inc(struct connection *conn)
   73.52 +{
   73.53 +	if (!conn || !conn->domain)
   73.54 +		return;
   73.55 +	conn->domain->nbwatch++;
   73.56 +}
   73.57 +
   73.58 +void domain_watch_dec(struct connection *conn)
   73.59 +{
   73.60 +	if (!conn || !conn->domain)
   73.61 +		return;
   73.62 +	if (conn->domain->nbwatch)
   73.63 +		conn->domain->nbwatch--;
   73.64 +}
   73.65 +
   73.66 +int domain_watch(struct connection *conn)
   73.67 +{
   73.68 +	return (conn && conn->domain && conn->domain->domid)
   73.69 +		? conn->domain->nbwatch
   73.70 +		: 0;
   73.71 +}
   73.72 +
   73.73  /*
   73.74   * Local variables:
   73.75   *  c-file-style: "linux"
    74.1 --- a/tools/xenstore/xenstored_domain.h	Sat Apr 15 19:25:09 2006 +0100
    74.2 +++ b/tools/xenstore/xenstored_domain.h	Sat Apr 15 19:25:21 2006 +0100
    74.3 @@ -47,4 +47,12 @@ void restore_existing_connections(void);
    74.4  bool domain_can_read(struct connection *conn);
    74.5  bool domain_can_write(struct connection *conn);
    74.6  
    74.7 +/* Quota manipulation */
    74.8 +void domain_entry_inc(struct connection *conn);
    74.9 +void domain_entry_dec(struct connection *conn);
   74.10 +int domain_entry(struct connection *conn);
   74.11 +void domain_watch_inc(struct connection *conn);
   74.12 +void domain_watch_dec(struct connection *conn);
   74.13 +int domain_watch(struct connection *conn);
   74.14 +
   74.15  #endif /* _XENSTORED_DOMAIN_H */
    75.1 --- a/tools/xenstore/xenstored_watch.c	Sat Apr 15 19:25:09 2006 +0100
    75.2 +++ b/tools/xenstore/xenstored_watch.c	Sat Apr 15 19:25:21 2006 +0100
    75.3 @@ -32,6 +32,8 @@
    75.4  #include "xenstored_test.h"
    75.5  #include "xenstored_domain.h"
    75.6  
    75.7 +extern int quota_nb_watch_per_domain;
    75.8 +
    75.9  struct watch
   75.10  {
   75.11  	/* Watches on this connection */
   75.12 @@ -135,6 +137,11 @@ void do_watch(struct connection *conn, s
   75.13  		}
   75.14  	}
   75.15  
   75.16 +	if (domain_watch(conn) > quota_nb_watch_per_domain) {
   75.17 +		send_error(conn, E2BIG);
   75.18 +		return;
   75.19 +	}
   75.20 +
   75.21  	watch = talloc(conn, struct watch);
   75.22  	watch->node = talloc_strdup(watch, vec[0]);
   75.23  	watch->token = talloc_strdup(watch, vec[1]);
   75.24 @@ -145,6 +152,7 @@ void do_watch(struct connection *conn, s
   75.25  
   75.26  	INIT_LIST_HEAD(&watch->events);
   75.27  
   75.28 +	domain_watch_inc(conn);
   75.29  	list_add_tail(&watch->list, &conn->watches);
   75.30  	trace_create(watch, "watch");
   75.31  	talloc_set_destructor(watch, destroy_watch);
   75.32 @@ -169,6 +177,7 @@ void do_unwatch(struct connection *conn,
   75.33  		if (streq(watch->node, node) && streq(watch->token, vec[1])) {
   75.34  			list_del(&watch->list);
   75.35  			talloc_free(watch);
   75.36 +			domain_watch_dec(conn);
   75.37  			send_ack(conn, XS_UNWATCH);
   75.38  			return;
   75.39  		}
    76.1 --- a/xen/Makefile	Sat Apr 15 19:25:09 2006 +0100
    76.2 +++ b/xen/Makefile	Sat Apr 15 19:25:21 2006 +0100
    76.3 @@ -10,19 +10,22 @@ export BASEDIR := $(CURDIR)
    76.4  .PHONY: default
    76.5  default: build
    76.6  
    76.7 -ifeq ($(XEN_ROOT),)
    76.8 -
    76.9 -.PHONY: build install clean
   76.10 -build install clean:
   76.11 -	make -f Rules.mk $@
   76.12 +.PHONY: dist
   76.13 +dist: install
   76.14  
   76.15 -else
   76.16 +.PHONY: debug
   76.17 +debug:
   76.18 +	objdump -D -S $(TARGET)-syms > $(TARGET).s
   76.19  
   76.20 -.PHONY: build
   76.21 -build: $(TARGET).gz
   76.22 +.PHONY: build install clean cscope TAGS tags
   76.23 +build install clean cscope TAGS tags::
   76.24 +	make -f Rules.mk _$@
   76.25  
   76.26 -.PHONY: install
   76.27 -install: $(TARGET).gz
   76.28 +.PHONY: _build
   76.29 +_build: $(TARGET).gz
   76.30 +
   76.31 +.PHONY: _install
   76.32 +_install: $(TARGET).gz
   76.33  	[ -d $(DESTDIR)/boot ] || $(INSTALL_DIR) $(DESTDIR)/boot
   76.34  	$(INSTALL_DATA) $(TARGET).gz $(DESTDIR)/boot/$(notdir $(TARGET))-$(XEN_FULLVERSION).gz
   76.35  	ln -f -s $(notdir $(TARGET))-$(XEN_FULLVERSION).gz $(DESTDIR)/boot/$(notdir $(TARGET))-$(XEN_VERSION).$(XEN_SUBVERSION).gz
   76.36 @@ -35,8 +38,8 @@ install: $(TARGET).gz
   76.37  	$(INSTALL_DATA) include/public/io/*.h $(DESTDIR)/usr/include/xen/io
   76.38  	$(INSTALL_DATA) include/public/COPYING $(DESTDIR)/usr/include/xen
   76.39  
   76.40 -.PHONY: clean
   76.41 -clean:: delete-unfresh-files
   76.42 +.PHONY: _clean
   76.43 +_clean: delete-unfresh-files
   76.44  	$(MAKE) -C tools clean
   76.45  	$(MAKE) -f $(BASEDIR)/Rules.mk -C common clean
   76.46  	$(MAKE) -f $(BASEDIR)/Rules.mk -C drivers clean
   76.47 @@ -46,15 +49,6 @@ clean:: delete-unfresh-files
   76.48  	rm -f include/asm-*/asm-offsets.h
   76.49  	rm -f include/xen/acm_policy.h
   76.50  
   76.51 -endif
   76.52 -
   76.53 -.PHONY: dist
   76.54 -dist: install
   76.55 -
   76.56 -.PHONY: debug
   76.57 -debug:
   76.58 -	objdump -D -S $(TARGET)-syms > $(TARGET).s
   76.59 -
   76.60  $(TARGET).gz: $(TARGET)
   76.61  	gzip -f -9 < $< > $@.new
   76.62  	mv $@.new $@
   76.63 @@ -135,16 +129,16 @@ define all_sources
   76.64        find $(SUBDIRS) -name SCCS -prune -o -name '*.[chS]' -print )
   76.65  endef
   76.66  
   76.67 -.PHONY: TAGS
   76.68 -TAGS: 
   76.69 +.PHONY: _TAGS
   76.70 +_TAGS: 
   76.71  	$(all_sources) | etags -
   76.72  
   76.73 -.PHONY: tags
   76.74 -tags: 
   76.75 +.PHONY: _tags
   76.76 +_tags: 
   76.77  	$(all_sources) | xargs ctags
   76.78  
   76.79 -.PHONY: cscope
   76.80 -cscope: 
   76.81 +.PHONY: _cscope
   76.82 +_cscope:
   76.83  	$(all_sources) > cscope.files
   76.84  	cscope -k -b -q
   76.85  
    77.1 --- a/xen/arch/x86/Makefile	Sat Apr 15 19:25:09 2006 +0100
    77.2 +++ b/xen/arch/x86/Makefile	Sat Apr 15 19:25:21 2006 +0100
    77.3 @@ -76,6 +76,7 @@ boot/mkelf32: boot/mkelf32.c
    77.4  	$(HOSTCC) $(HOSTCFLAGS) -o $@ $<
    77.5  
    77.6  shadow_guest32.o: shadow.c
    77.7 +shadow_guest32pae.o: shadow.c
    77.8  
    77.9  .PHONY: clean
   77.10  clean::
    78.1 --- a/xen/arch/x86/audit.c	Sat Apr 15 19:25:09 2006 +0100
    78.2 +++ b/xen/arch/x86/audit.c	Sat Apr 15 19:25:21 2006 +0100
    78.3 @@ -639,7 +639,7 @@ void _audit_domain(struct domain *d, int
    78.4      void scan_for_pfn_in_grant_table(struct domain *d, unsigned xmfn)
    78.5      {
    78.6          int i;
    78.7 -        active_grant_entry_t *act = d->grant_table->active;
    78.8 +        struct active_grant_entry *act = d->grant_table->active;
    78.9  
   78.10          spin_lock(&d->grant_table->lock);
   78.11  
    79.1 --- a/xen/arch/x86/hvm/intercept.c	Sat Apr 15 19:25:09 2006 +0100
    79.2 +++ b/xen/arch/x86/hvm/intercept.c	Sat Apr 15 19:25:21 2006 +0100
    79.3 @@ -208,8 +208,9 @@ int register_io_handler(unsigned long ad
    79.4  
    79.5  static void pit_cal_count(struct hvm_virpit *vpit)
    79.6  {
    79.7 -    u64 nsec_delta = (unsigned int)((NOW() - vpit->inject_point));
    79.8 +    u64 nsec_delta = (unsigned int)((NOW() - vpit->count_point));
    79.9  
   79.10 +    nsec_delta += vpit->count_advance;
   79.11      if (nsec_delta > vpit->period)
   79.12          HVM_DBG_LOG(DBG_LEVEL_1,
   79.13  	            "HVM_PIT: long time has passed from last injection!");
    80.1 --- a/xen/arch/x86/hvm/svm/intr.c	Sat Apr 15 19:25:09 2006 +0100
    80.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Sat Apr 15 19:25:21 2006 +0100
    80.3 @@ -79,7 +79,8 @@ interrupt_post_injection(struct vcpu * v
    80.4          } else {
    80.5              vpit->pending_intr_nr--;
    80.6          }
    80.7 -        vpit->inject_point = NOW();
    80.8 +        vpit->count_advance = 0;
    80.9 +        vpit->count_point = NOW();
   80.10  
   80.11          vpit->last_pit_gtime += vpit->period_cycles;
   80.12          svm_set_guest_time(v, vpit->last_pit_gtime);
    81.1 --- a/xen/arch/x86/hvm/svm/svm.c	Sat Apr 15 19:25:09 2006 +0100
    81.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Sat Apr 15 19:25:21 2006 +0100
    81.3 @@ -315,20 +315,30 @@ static inline int long_mode_do_msr_write
    81.4      {
    81.5      case MSR_EFER:
    81.6  #ifdef __x86_64__
    81.7 -        if ((msr_content & EFER_LME) ^ test_bit(SVM_CPU_STATE_LME_ENABLED,
    81.8 -                                                &vc->arch.hvm_svm.cpu_state))
    81.9 +        /* offending reserved bit will cause #GP */
   81.10 +        if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
   81.11          {
   81.12 -            if (test_bit(SVM_CPU_STATE_PG_ENABLED, &vc->arch.hvm_svm.cpu_state)
   81.13 -                    || !test_bit(SVM_CPU_STATE_PAE_ENABLED,
   81.14 -                                 &vc->arch.hvm_svm.cpu_state))
   81.15 +            printk("trying to set reserved bit in EFER\n");
   81.16 +            svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
   81.17 +            return 0;
   81.18 +        }
   81.19 +
   81.20 +        /* LME: 0 -> 1 */
   81.21 +        if ( msr_content & EFER_LME &&
   81.22 +             !test_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state) )
   81.23 +        {
   81.24 +            if ( svm_paging_enabled(vc) ||
   81.25 +                 !test_bit(SVM_CPU_STATE_PAE_ENABLED,
   81.26 +                           &vc->arch.hvm_svm.cpu_state) )
   81.27              {
   81.28 +                printk("trying to set LME bit when "
   81.29 +                       "in paging mode or PAE bit is not set\n");
   81.30                  svm_inject_exception(vmcb, TRAP_gp_fault, 1, 0);
   81.31 +                return 0;
   81.32              }
   81.33 +            set_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state);
   81.34          }
   81.35  
   81.36 -        if (msr_content & EFER_LME)
   81.37 -            set_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state);
   81.38 -
   81.39          /* We have already recorded that we want LME, so it will be set 
   81.40           * next time CR0 gets updated. So we clear that bit and continue.
   81.41           */
   81.42 @@ -669,6 +679,7 @@ static void svm_freeze_time(struct vcpu 
   81.43      
   81.44      if ( vpit->first_injected && !v->domain->arch.hvm_domain.guest_time ) {
   81.45          v->domain->arch.hvm_domain.guest_time = svm_get_guest_time(v);
   81.46 +        vpit->count_advance += (NOW() - vpit->count_point);
   81.47          stop_timer(&(vpit->pit_timer));
   81.48      }
   81.49  }
   81.50 @@ -757,7 +768,8 @@ void arch_svm_do_resume(struct vcpu *v)
   81.51          reset_stack_and_jump( svm_asm_do_resume );
   81.52      }
   81.53      else {
   81.54 -        printk("VCPU core pinned: %d to %d\n", v->arch.hvm_svm.launch_core, smp_processor_id() );
   81.55 +        printk("VCPU core pinned: %d to %d\n", 
   81.56 +                v->arch.hvm_svm.launch_core, smp_processor_id() );
   81.57          v->arch.hvm_svm.launch_core = smp_processor_id();
   81.58          svm_migrate_timers( v );
   81.59          svm_do_resume( v );
   81.60 @@ -922,6 +934,7 @@ static void svm_vmexit_do_cpuid(struct v
   81.61              clear_bit(X86_FEATURE_APIC, &edx);
   81.62  	    
   81.63  #if CONFIG_PAGING_LEVELS < 3
   81.64 +        clear_bit(X86_FEATURE_NX, &edx);
   81.65          clear_bit(X86_FEATURE_PAE, &edx);
   81.66          clear_bit(X86_FEATURE_PSE, &edx);
   81.67          clear_bit(X86_FEATURE_PSE36, &edx);
   81.68 @@ -929,12 +942,14 @@ static void svm_vmexit_do_cpuid(struct v
   81.69          if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
   81.70          {
   81.71              if ( !v->domain->arch.hvm_domain.pae_enabled )
   81.72 -                clear_bit(X86_FEATURE_PAE, &edx);
   81.73 +            {
   81.74 +               clear_bit(X86_FEATURE_PAE, &edx);
   81.75 +               clear_bit(X86_FEATURE_NX, &edx);
   81.76 +            }
   81.77              clear_bit(X86_FEATURE_PSE, &edx);
   81.78              clear_bit(X86_FEATURE_PSE36, &edx);
   81.79          }
   81.80 -#endif
   81.81 -	
   81.82 +#endif	
   81.83          /* Clear out reserved bits. */
   81.84          ecx &= ~SVM_VCPU_CPUID_L1_RESERVED; /* mask off reserved bits */
   81.85          clear_bit(X86_FEATURE_MWAIT & 31, &ecx);
   81.86 @@ -1312,8 +1327,7 @@ static int svm_set_cr0(unsigned long val
   81.87      unsigned long mfn;
   81.88      int paging_enabled;
   81.89      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   81.90 -    unsigned long crn;
   81.91 -
   81.92 +  
   81.93      ASSERT(vmcb);
   81.94  
   81.95      /* We don't want to lose PG.  ET is reserved and should be always be 1*/
   81.96 @@ -1358,37 +1372,39 @@ static int svm_set_cr0(unsigned long val
   81.97              set_bit(SVM_CPU_STATE_LMA_ENABLED,
   81.98                      &v->arch.hvm_svm.cpu_state);
   81.99              vmcb->efer |= (EFER_LMA | EFER_LME);
  81.100 -
  81.101 -#if CONFIG_PAGING_LEVELS >= 4 
  81.102 -            if (!shadow_set_guest_paging_levels(v->domain, 4)) 
  81.103 +            if (!shadow_set_guest_paging_levels(v->domain, PAGING_L4) )
  81.104              {
  81.105                  printk("Unsupported guest paging levels\n");
  81.106                  domain_crash_synchronous(); /* need to take a clean path */
  81.107              }
  81.108 -#endif
  81.109          }
  81.110          else
  81.111  #endif  /* __x86_64__ */
  81.112          {
  81.113  #if CONFIG_PAGING_LEVELS >= 3
  81.114 -            if (!shadow_set_guest_paging_levels(v->domain, 2))
  81.115 +            /* seems it's a 32-bit or 32-bit PAE guest */
  81.116 +            if ( test_bit(SVM_CPU_STATE_PAE_ENABLED,
  81.117 +                        &v->arch.hvm_svm.cpu_state) )
  81.118              {
  81.119 -                printk("Unsupported guest paging levels\n");
  81.120 -                domain_crash_synchronous(); /* need to take a clean path */
  81.121 +                /* The guest enables PAE first and then it enables PG, it is
  81.122 +                 * really a PAE guest */
  81.123 +                if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L3) )
  81.124 +                {
  81.125 +                    printk("Unsupported guest paging levels\n");
  81.126 +                    domain_crash_synchronous();
  81.127 +                }
  81.128 +            }
  81.129 +            else
  81.130 +            {
  81.131 +                if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L2) )
  81.132 +                {
  81.133 +                    printk("Unsupported guest paging levels\n");
  81.134 +                    domain_crash_synchronous(); /* need to take a clean path */
  81.135 +                }
  81.136              }
  81.137  #endif
  81.138          }
  81.139  
  81.140 -        /* update CR4's PAE if needed */
  81.141 -        crn = vmcb->cr4;
  81.142 -        if ((!(crn & X86_CR4_PAE)) 
  81.143 -                && test_bit(SVM_CPU_STATE_PAE_ENABLED, 
  81.144 -                    &v->arch.hvm_svm.cpu_state))
  81.145 -        {
  81.146 -            HVM_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n");
  81.147 -            vmcb->cr4 |= X86_CR4_PAE;
  81.148 -        }
  81.149 -
  81.150          /* Now arch.guest_table points to machine physical. */
  81.151          v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
  81.152          update_pagetables(v);
  81.153 @@ -1402,8 +1418,17 @@ static int svm_set_cr0(unsigned long val
  81.154          /* arch->shadow_table should hold the next CR3 for shadow */
  81.155          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx\n", 
  81.156                      v->arch.hvm_svm.cpu_cr3, mfn);
  81.157 +
  81.158 +        return 1;
  81.159      }
  81.160  
  81.161 +    if ( !((value & X86_CR0_PE) && (value & X86_CR0_PG)) && paging_enabled )
  81.162 +        if ( v->arch.hvm_svm.cpu_cr3 ) {
  81.163 +            put_page(mfn_to_page(get_mfn_from_gpfn(
  81.164 +                      v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
  81.165 +            v->arch.guest_table = mk_pagetable(0);
  81.166 +        }
  81.167 +
  81.168      /*
  81.169       * SVM implements paged real-mode and when we return to real-mode
  81.170       * we revert back to the physical mappings that the domain builder
  81.171 @@ -1415,6 +1440,14 @@ static int svm_set_cr0(unsigned long val
  81.172              return 0;
  81.173          }
  81.174  
  81.175 +        clear_all_shadow_status( v->domain );
  81.176 +        set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
  81.177 +        vmcb->cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
  81.178 +    }
  81.179 +    else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
  81.180 +    {
  81.181 +        /* we should take care of this kind of situation */
  81.182 +        clear_all_shadow_status(v->domain);
  81.183          set_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
  81.184          vmcb->cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
  81.185      }
  81.186 @@ -1438,15 +1471,21 @@ static void mov_from_cr(int cr, int gp, 
  81.187      {
  81.188      case 0:
  81.189          value = v->arch.hvm_svm.cpu_shadow_cr0;
  81.190 -        break;
  81.191 +        if (svm_dbg_on)
  81.192 +            printk("CR0 read =%lx \n", value );
  81.193 +          break;
  81.194      case 2:
  81.195          value = vmcb->cr2;
  81.196          break;
  81.197      case 3:
  81.198          value = (unsigned long) v->arch.hvm_svm.cpu_cr3;
  81.199 -        break;
  81.200 +        if (svm_dbg_on)
  81.201 +            printk("CR3 read =%lx \n", value );
  81.202 +          break;
  81.203      case 4:
  81.204          value = (unsigned long) v->arch.hvm_svm.cpu_shadow_cr4;
  81.205 +        if (svm_dbg_on)
  81.206 +           printk( "CR4 read=%lx\n", value );
  81.207          break;
  81.208      case 8:
  81.209  #if 0
  81.210 @@ -1466,6 +1505,12 @@ static void mov_from_cr(int cr, int gp, 
  81.211  }
  81.212  
  81.213  
  81.214 +static inline int svm_pgbit_test(struct vcpu *v)
  81.215 +{
  81.216 +   return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
  81.217 +}
  81.218 +
  81.219 +
  81.220  /*
  81.221   * Write to control registers
  81.222   */
  81.223 @@ -1486,12 +1531,15 @@ static int mov_to_cr(int gpreg, int cr, 
  81.224      switch (cr) 
  81.225      {
  81.226      case 0: 
  81.227 +        if (svm_dbg_on)
  81.228 +            printk("CR0 write =%lx \n", value );
  81.229          return svm_set_cr0(value);
  81.230  
  81.231      case 3: 
  81.232      {
  81.233          unsigned long old_base_mfn, mfn;
  81.234 -
  81.235 +        if (svm_dbg_on)
  81.236 +            printk("CR3 write =%lx \n", value );
  81.237          /* If paging is not enabled yet, simply copy the value to CR3. */
  81.238          if (!svm_paging_enabled(v)) {
  81.239              v->arch.hvm_svm.cpu_cr3 = value;
  81.240 @@ -1533,19 +1581,104 @@ static int mov_to_cr(int gpreg, int cr, 
  81.241              if (old_base_mfn)
  81.242                  put_page(mfn_to_page(old_base_mfn));
  81.243  
  81.244 +            /*
  81.245 +             * arch.shadow_table should now hold the next CR3 for shadow
  81.246 +             */
  81.247 +#if CONFIG_PAGING_LEVELS >= 3
  81.248 +            if ( v->domain->arch.ops->guest_paging_levels == PAGING_L3 )
  81.249 +                shadow_sync_all(v->domain);
  81.250 +#endif
  81.251 +            v->arch.hvm_svm.cpu_cr3 = value;
  81.252              update_pagetables(v);
  81.253 -            
  81.254 -            /* arch.shadow_table should now hold the next CR3 for shadow*/
  81.255 -            v->arch.hvm_svm.cpu_cr3 = value;
  81.256              HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
  81.257              vmcb->cr3 = pagetable_get_paddr(v->arch.shadow_table);
  81.258          }
  81.259          break;
  81.260      }
  81.261  
  81.262 -    case 4:         
  81.263 -        /* CR4 */
  81.264 -        if (value & X86_CR4_PAE) {
  81.265 +    case 4: /* CR4 */
  81.266 +    {
  81.267 +        if (svm_dbg_on)
  81.268 +            printk( "write cr4=%lx, cr0=%lx\n", 
  81.269 +                     value,  v->arch.hvm_svm.cpu_shadow_cr0 );
  81.270 +        old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
  81.271 +        if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
  81.272 +        {
  81.273 +            set_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
  81.274 +            if ( svm_pgbit_test(v) )
  81.275 +            {
  81.276 +                /* The guest is a 32-bit PAE guest. */
  81.277 +#if CONFIG_PAGING_LEVELS >= 4
  81.278 +                unsigned long mfn, old_base_mfn;
  81.279 +
  81.280 +                if( !shadow_set_guest_paging_levels(v->domain, PAGING_L3) )
  81.281 +                {
  81.282 +                    printk("Unsupported guest paging levels\n");
  81.283 +                    domain_crash_synchronous(); /* need to take a clean path */
  81.284 +                }
  81.285 +
  81.286 +                if ( !VALID_MFN(mfn = get_mfn_from_gpfn(
  81.287 +                                    v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)) ||
  81.288 +                     !get_page(mfn_to_page(mfn), v->domain) )
  81.289 +                {
  81.290 +                    printk("Invalid CR3 value = %lx", v->arch.hvm_svm.cpu_cr3);
  81.291 +                    domain_crash_synchronous(); /* need to take a clean path */
  81.292 +                }
  81.293 +
  81.294 +                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  81.295 +                if ( old_base_mfn )
  81.296 +                    put_page(mfn_to_page(old_base_mfn));
  81.297 +
  81.298 +                /*
  81.299 +                 * Now arch.guest_table points to machine physical.
  81.300 +                 */
  81.301 +
  81.302 +                v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
  81.303 +                update_pagetables(v);
  81.304 +
  81.305 +                HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
  81.306 +                            (unsigned long) (mfn << PAGE_SHIFT));
  81.307 +
  81.308 +                vmcb->cr3 = pagetable_get_paddr(v->arch.shadow_table);
  81.309 +
  81.310 +                /*
  81.311 +                 * arch->shadow_table should hold the next CR3 for shadow
  81.312 +                 */
  81.313 +
  81.314 +                HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
  81.315 +                            v->arch.hvm_svm.cpu_cr3, mfn);
  81.316 +#endif
  81.317 +            }
  81.318 +            else
  81.319 +            {
  81.320 +                /*  The guest is a 64 bit or 32-bit PAE guest. */
  81.321 +#if CONFIG_PAGING_LEVELS >= 4
  81.322 +                if ( (v->domain->arch.ops != NULL) &&
  81.323 +                        v->domain->arch.ops->guest_paging_levels == PAGING_L2)
  81.324 +                {
  81.325 +                    /* Seems the guest first enables PAE without enabling PG,
  81.326 +                     * it must enable PG after that, and it is a 32-bit PAE
  81.327 +                     * guest */
  81.328 +
  81.329 +                    if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L3) )
  81.330 +                    {
  81.331 +                        printk("Unsupported guest paging levels\n");
  81.332 +                        domain_crash_synchronous();
  81.333 +                    }                   
  81.334 +                }
  81.335 +                else
  81.336 +                {
  81.337 +                    if ( !shadow_set_guest_paging_levels(v->domain,
  81.338 +                                                            PAGING_L4) )
  81.339 +                    {
  81.340 +                        printk("Unsupported guest paging levels\n");
  81.341 +                        domain_crash_synchronous();
  81.342 +                    }
  81.343 +                }
  81.344 +#endif
  81.345 +            }
  81.346 +        }
  81.347 +        else if (value & X86_CR4_PAE) {
  81.348              set_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
  81.349          } else {
  81.350              if (test_bit(SVM_CPU_STATE_LMA_ENABLED,
  81.351 @@ -1555,7 +1688,6 @@ static int mov_to_cr(int gpreg, int cr, 
  81.352              clear_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
  81.353          }
  81.354  
  81.355 -        old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
  81.356          v->arch.hvm_svm.cpu_shadow_cr4 = value;
  81.357          vmcb->cr4 = value | SVM_CR4_HOST_MASK;
  81.358    
  81.359 @@ -1569,6 +1701,7 @@ static int mov_to_cr(int gpreg, int cr, 
  81.360              shadow_sync_all(v->domain);
  81.361          }
  81.362          break;
  81.363 +    }
  81.364  
  81.365      default:
  81.366          printk("invalid cr: %d\n", cr);
  81.367 @@ -1933,6 +2066,7 @@ static int svm_do_vmmcall_reset_to_realm
  81.368  
  81.369      vmcb->cr4 = SVM_CR4_HOST_MASK;
  81.370      v->arch.hvm_svm.cpu_shadow_cr4 = 0;
  81.371 +    clear_bit(SVM_CPU_STATE_PAE_ENABLED, &v->arch.hvm_svm.cpu_state);
  81.372  
  81.373      /* This will jump to ROMBIOS */
  81.374      vmcb->rip = 0xFFF0;
  81.375 @@ -1989,6 +2123,7 @@ static int svm_do_vmmcall_reset_to_realm
  81.376      vmcb->idtr.base = 0x00;
  81.377  
  81.378      vmcb->rax = 0;
  81.379 +    vmcb->rsp = 0;
  81.380  
  81.381      return 0;
  81.382  }
  81.383 @@ -2280,7 +2415,8 @@ void walk_shadow_and_guest_pt(unsigned l
  81.384      gpte.l1 = 0;
  81.385      __copy_from_user(&gpte, &linear_pg_table[ l1_linear_offset(gva) ], sizeof(gpte) );
  81.386      printk( "G-PTE = %x, flags=%x\n", gpte.l1, l1e_get_flags(gpte) );
  81.387 -    __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ], sizeof(spte) );
  81.388 +    __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ], 
  81.389 +                      sizeof(spte) );
  81.390      printk( "S-PTE = %x, flags=%x\n", spte.l1, l1e_get_flags(spte));
  81.391  }
  81.392  #endif /* SVM_WALK_GUEST_PAGES */
  81.393 @@ -2314,18 +2450,29 @@ asmlinkage void svm_vmexit_handler(struc
  81.394      {
  81.395          if (svm_paging_enabled(v) && !mmio_space(gva_to_gpa(vmcb->exitinfo2)))
  81.396          {
  81.397 +            printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx, gpa=%llx\n", 
  81.398 +                    intercepts_counter,
  81.399 +                    exit_reasons[exit_reason], exit_reason, regs.cs,
  81.400 +		    (unsigned long long) regs.rip,
  81.401 +		    (unsigned long long) vmcb->exitinfo1,
  81.402 +		    (unsigned long long) vmcb->exitinfo2,
  81.403 +		    (unsigned long long) vmcb->exitintinfo.bytes,
  81.404 +            (unsigned long long) gva_to_gpa( vmcb->exitinfo2 ) );
  81.405 +        }
  81.406 +        else 
  81.407 +        {
  81.408              printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx\n", 
  81.409                      intercepts_counter,
  81.410                      exit_reasons[exit_reason], exit_reason, regs.cs,
  81.411  		    (unsigned long long) regs.rip,
  81.412  		    (unsigned long long) vmcb->exitinfo1,
  81.413  		    (unsigned long long) vmcb->exitinfo2,
  81.414 -		    (unsigned long long) vmcb->exitintinfo.bytes);
  81.415 +		    (unsigned long long) vmcb->exitintinfo.bytes );
  81.416          }
  81.417      } 
  81.418 -    else if (svm_dbg_on 
  81.419 -            && exit_reason != VMEXIT_IOIO 
  81.420 -            && exit_reason != VMEXIT_INTR) 
  81.421 +    else if ( svm_dbg_on 
  81.422 +              && exit_reason != VMEXIT_IOIO 
  81.423 +              && exit_reason != VMEXIT_INTR) 
  81.424      {
  81.425  
  81.426          if (exit_reasons[exit_reason])
  81.427 @@ -2350,7 +2497,9 @@ asmlinkage void svm_vmexit_handler(struc
  81.428      }
  81.429  
  81.430  #ifdef SVM_WALK_GUEST_PAGES
  81.431 -    if( exit_reason == VMEXIT_EXCEPTION_PF && ( ( vmcb->exitinfo2 == vmcb->rip )|| vmcb->exitintinfo.bytes) )
  81.432 +    if( exit_reason == VMEXIT_EXCEPTION_PF 
  81.433 +        && ( ( vmcb->exitinfo2 == vmcb->rip )
  81.434 +        || vmcb->exitintinfo.bytes) )
  81.435      {
  81.436         if (svm_paging_enabled(v) && !mmio_space(gva_to_gpa(vmcb->exitinfo2)))     
  81.437             walk_shadow_and_guest_pt( vmcb->exitinfo2 );
  81.438 @@ -2434,13 +2583,24 @@ asmlinkage void svm_vmexit_handler(struc
  81.439           */
  81.440          break;
  81.441  
  81.442 +    case VMEXIT_INIT:
  81.443 +        /*
  81.444 +         * Nothing to do, in fact we should never get to this point. 
  81.445 +         */
  81.446 +        break;
  81.447 +
  81.448 +    case VMEXIT_EXCEPTION_BP:
  81.449  #ifdef XEN_DEBUGGER
  81.450 -    case VMEXIT_EXCEPTION_BP:
  81.451          svm_debug_save_cpu_user_regs(&regs);
  81.452          pdb_handle_exception(3, &regs, 1);
  81.453          svm_debug_restore_cpu_user_regs(&regs);
  81.454 +#else
  81.455 +        if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
  81.456 +            domain_pause_for_debugger();
  81.457 +        else 
  81.458 +            svm_inject_exception(vmcb, TRAP_int3, 0, 0);
  81.459 +#endif
  81.460          break;
  81.461 -#endif
  81.462  
  81.463      case VMEXIT_EXCEPTION_NM:
  81.464          svm_do_no_device_fault(vmcb);
    82.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Sat Apr 15 19:25:09 2006 +0100
    82.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Sat Apr 15 19:25:21 2006 +0100
    82.3 @@ -257,7 +257,8 @@ static int construct_init_vmcb_guest(str
    82.4      /* CR3 is set in svm_final_setup_guest */
    82.5  
    82.6      __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) :); 
    82.7 -    arch_svm->cpu_shadow_cr4 = crn & ~(X86_CR4_PGE | X86_CR4_PSE);
    82.8 +    crn &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
    82.9 +    arch_svm->cpu_shadow_cr4 = crn;
   82.10      vmcb->cr4 = crn | SVM_CR4_HOST_MASK;
   82.11  
   82.12      vmcb->rsp = 0;
   82.13 @@ -484,6 +485,7 @@ void svm_do_resume(struct vcpu *v)
   82.14      if ( vpit->first_injected ) {
   82.15          if ( v->domain->arch.hvm_domain.guest_time ) {
   82.16              svm_set_guest_time(v, v->domain->arch.hvm_domain.guest_time);
   82.17 +            vpit->count_point = NOW();
   82.18              v->domain->arch.hvm_domain.guest_time = 0;
   82.19          }
   82.20          pickup_deactive_ticks(vpit);
    83.1 --- a/xen/arch/x86/hvm/vmx/io.c	Sat Apr 15 19:25:09 2006 +0100
    83.2 +++ b/xen/arch/x86/hvm/vmx/io.c	Sat Apr 15 19:25:21 2006 +0100
    83.3 @@ -84,7 +84,8 @@ interrupt_post_injection(struct vcpu * v
    83.4          } else {
    83.5              vpit->pending_intr_nr--;
    83.6          }
    83.7 -        vpit->inject_point = NOW();
    83.8 +        vpit->count_advance = 0;
    83.9 +        vpit->count_point = NOW();
   83.10  
   83.11          vpit->last_pit_gtime += vpit->period_cycles;
   83.12          set_guest_time(v, vpit->last_pit_gtime);
   83.13 @@ -208,6 +209,7 @@ void vmx_do_resume(struct vcpu *v)
   83.14      /* pick up the elapsed PIT ticks and re-enable pit_timer */
   83.15      if ( vpit->first_injected ) {
   83.16          if ( v->domain->arch.hvm_domain.guest_time ) {
   83.17 +            vpit->count_point = NOW();
   83.18              set_guest_time(v, v->domain->arch.hvm_domain.guest_time);
   83.19              v->domain->arch.hvm_domain.guest_time = 0;
   83.20          }
    84.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Sat Apr 15 19:25:09 2006 +0100
    84.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Sat Apr 15 19:25:21 2006 +0100
    84.3 @@ -362,6 +362,7 @@ static void vmx_freeze_time(struct vcpu 
    84.4      
    84.5      if ( vpit->first_injected && !v->domain->arch.hvm_domain.guest_time ) {
    84.6          v->domain->arch.hvm_domain.guest_time = get_guest_time(v);
    84.7 +        vpit->count_advance += (NOW() - vpit->count_point);
    84.8          stop_timer(&(vpit->pit_timer));
    84.9      }
   84.10  }
    85.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Sat Apr 15 19:25:09 2006 +0100
    85.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Sat Apr 15 19:25:21 2006 +0100
    85.3 @@ -94,6 +94,7 @@
    85.4  ENTRY(vmx_asm_vmexit_handler)
    85.5          /* selectors are restored/saved by VMX */
    85.6          HVM_SAVE_ALL_NOSEGREGS
    85.7 +        call vmx_trace_vmexit
    85.8          call vmx_vmexit_handler
    85.9          jmp vmx_asm_do_resume
   85.10  
   85.11 @@ -114,6 +115,7 @@ 1:
   85.12  /* vmx_restore_all_guest */
   85.13          call vmx_intr_assist
   85.14          call vmx_load_cr2
   85.15 +        call vmx_trace_vmentry
   85.16          .endif
   85.17          /* 
   85.18           * Check if we are going back to VMX-based VM
    86.1 --- a/xen/arch/x86/i8259.c	Sat Apr 15 19:25:09 2006 +0100
    86.2 +++ b/xen/arch/x86/i8259.c	Sat Apr 15 19:25:21 2006 +0100
    86.3 @@ -318,7 +318,7 @@ void __init init_8259A(int auto_eoi)
    86.4       * outb_p - this has to work on a wide range of PC hardware.
    86.5       */
    86.6      outb_p(0x11, 0x20);     /* ICW1: select 8259A-1 init */
    86.7 -    outb_p(0x20 + 0, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
    86.8 +    outb_p(FIRST_LEGACY_VECTOR + 0, 0x21); /* ICW2: 8259A-1 IR0-7 */
    86.9      outb_p(0x04, 0x21);     /* 8259A-1 (the master) has a slave on IR2 */
   86.10      if (auto_eoi)
   86.11          outb_p(0x03, 0x21); /* master does Auto EOI */
   86.12 @@ -326,7 +326,7 @@ void __init init_8259A(int auto_eoi)
   86.13          outb_p(0x01, 0x21); /* master expects normal EOI */
   86.14  
   86.15      outb_p(0x11, 0xA0);     /* ICW1: select 8259A-2 init */
   86.16 -    outb_p(0x20 + 8, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
   86.17 +    outb_p(FIRST_LEGACY_VECTOR + 8, 0xA1); /* ICW2: 8259A-2 IR0-7 */
   86.18      outb_p(0x02, 0xA1);     /* 8259A-2 is a slave on master's IR2 */
   86.19      outb_p(0x01, 0xA1);     /* (slave's support for AEOI in flat mode
   86.20                                 is to be investigated) */
    87.1 --- a/xen/arch/x86/io_apic.c	Sat Apr 15 19:25:09 2006 +0100
    87.2 +++ b/xen/arch/x86/io_apic.c	Sat Apr 15 19:25:21 2006 +0100
    87.3 @@ -202,6 +202,18 @@ static void __level_IO_APIC_irq (unsigne
    87.4      __modify_IO_APIC_irq(irq, 0x00008000, 0);
    87.5  }
    87.6  
    87.7 +/* mask = 1, trigger = 0 */
    87.8 +static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
    87.9 +{
   87.10 +    __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
   87.11 +}
   87.12 +
   87.13 +/* mask = 0, trigger = 1 */
   87.14 +static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
   87.15 +{
   87.16 +    __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
   87.17 +}
   87.18 +
   87.19  static void mask_IO_APIC_irq (unsigned int irq)
   87.20  {
   87.21      unsigned long flags;
   87.22 @@ -657,11 +669,11 @@ static inline int IO_APIC_irq_trigger(in
   87.23  }
   87.24  
   87.25  /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
   87.26 -u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
   87.27 +u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
   87.28  
   87.29  int assign_irq_vector(int irq)
   87.30  {
   87.31 -    static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
   87.32 +    static int current_vector = FIRST_DYNAMIC_VECTOR, offset = 0;
   87.33  
   87.34      BUG_ON(irq >= NR_IRQ_VECTORS);
   87.35      if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
   87.36 @@ -677,11 +689,11 @@ next:
   87.37      if (current_vector == 0x80)
   87.38          goto next;
   87.39  
   87.40 -    if (current_vector >= FIRST_SYSTEM_VECTOR) {
   87.41 +    if (current_vector > LAST_DYNAMIC_VECTOR) {
   87.42          offset++;
   87.43          if (!(offset%8))
   87.44              return -ENOSPC;
   87.45 -        current_vector = FIRST_DEVICE_VECTOR + offset;
   87.46 +        current_vector = FIRST_DYNAMIC_VECTOR + offset;
   87.47      }
   87.48  
   87.49      vector_irq[current_vector] = irq;
   87.50 @@ -1321,11 +1333,26 @@ static unsigned int startup_level_ioapic
   87.51      return 0; /* don't check for pending */
   87.52  }
   87.53  
   87.54 +int ioapic_ack_new = 1;
   87.55 +static void setup_ioapic_ack(char *s)
   87.56 +{
   87.57 +    if ( !strcmp(s, "old") )
   87.58 +        ioapic_ack_new = 0;
   87.59 +    else if ( !strcmp(s, "new") )
   87.60 +        ioapic_ack_new = 1;
   87.61 +    else
   87.62 +        printk("Unknown ioapic_ack value specified: '%s'\n", s);
   87.63 +}
   87.64 +custom_param("ioapic_ack", setup_ioapic_ack);
   87.65 +
   87.66  static void mask_and_ack_level_ioapic_irq (unsigned int irq)
   87.67  {
   87.68      unsigned long v;
   87.69      int i;
   87.70  
   87.71 +    if ( ioapic_ack_new )
   87.72 +        return;
   87.73 +
   87.74      mask_IO_APIC_irq(irq);
   87.75  /*
   87.76   * It appears there is an erratum which affects at least version 0x11
   87.77 @@ -1363,7 +1390,47 @@ static void mask_and_ack_level_ioapic_ir
   87.78  
   87.79  static void end_level_ioapic_irq (unsigned int irq)
   87.80  {
   87.81 -    unmask_IO_APIC_irq(irq);
   87.82 +    unsigned long v;
   87.83 +    int i;
   87.84 +
   87.85 +    if ( !ioapic_ack_new )
   87.86 +    {
   87.87 +        unmask_IO_APIC_irq(irq);
   87.88 +        return;
   87.89 +    }
   87.90 +
   87.91 +/*
   87.92 + * It appears there is an erratum which affects at least version 0x11
   87.93 + * of I/O APIC (that's the 82093AA and cores integrated into various
   87.94 + * chipsets).  Under certain conditions a level-triggered interrupt is
   87.95 + * erroneously delivered as edge-triggered one but the respective IRR
   87.96 + * bit gets set nevertheless.  As a result the I/O unit expects an EOI
   87.97 + * message but it will never arrive and further interrupts are blocked
   87.98 + * from the source.  The exact reason is so far unknown, but the
   87.99 + * phenomenon was observed when two consecutive interrupt requests
  87.100 + * from a given source get delivered to the same CPU and the source is
  87.101 + * temporarily disabled in between.
  87.102 + *
  87.103 + * A workaround is to simulate an EOI message manually.  We achieve it
  87.104 + * by setting the trigger mode to edge and then to level when the edge
  87.105 + * trigger mode gets detected in the TMR of a local APIC for a
  87.106 + * level-triggered interrupt.  We mask the source for the time of the
  87.107 + * operation to prevent an edge-triggered interrupt escaping meanwhile.
  87.108 + * The idea is from Manfred Spraul.  --macro
  87.109 + */
  87.110 +    i = IO_APIC_VECTOR(irq);
  87.111 +
  87.112 +    v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
  87.113 +
  87.114 +    ack_APIC_irq();
  87.115 +
  87.116 +    if (!(v & (1 << (i & 0x1f)))) {
  87.117 +        atomic_inc(&irq_mis_count);
  87.118 +        spin_lock(&ioapic_lock);
  87.119 +        __mask_and_edge_IO_APIC_irq(irq);
  87.120 +        __unmask_and_level_IO_APIC_irq(irq);
  87.121 +        spin_unlock(&ioapic_lock);
  87.122 +    }
  87.123  }
  87.124  
  87.125  static unsigned int startup_edge_ioapic_vector(unsigned int vector)
  87.126 @@ -1695,6 +1762,7 @@ void __init setup_IO_APIC(void)
  87.127          io_apic_irqs = ~PIC_IRQS;
  87.128  
  87.129      printk("ENABLING IO-APIC IRQs\n");
  87.130 +    printk(" -> Using %s ACK method\n", ioapic_ack_new ? "new" : "old");
  87.131  
  87.132      /*
  87.133       * Set up IO-APIC IRQ routing.
  87.134 @@ -1956,9 +2024,9 @@ int ioapic_guest_write(unsigned long phy
  87.135          return 0;
  87.136      }
  87.137  
  87.138 -    if ( old_rte.vector >= FIRST_DEVICE_VECTOR )
  87.139 +    if ( old_rte.vector >= FIRST_DYNAMIC_VECTOR )
  87.140          old_irq = vector_irq[old_rte.vector];
  87.141 -    if ( new_rte.vector >= FIRST_DEVICE_VECTOR )
  87.142 +    if ( new_rte.vector >= FIRST_DYNAMIC_VECTOR )
  87.143          new_irq = vector_irq[new_rte.vector];
  87.144  
  87.145      if ( (old_irq != new_irq) && (old_irq != -1) && IO_APIC_IRQ(old_irq) )
    88.1 --- a/xen/arch/x86/irq.c	Sat Apr 15 19:25:09 2006 +0100
    88.2 +++ b/xen/arch/x86/irq.c	Sat Apr 15 19:25:21 2006 +0100
    88.3 @@ -148,48 +148,180 @@ typedef struct {
    88.4      u8 nr_guests;
    88.5      u8 in_flight;
    88.6      u8 shareable;
    88.7 +    u8 ack_type;
    88.8 +#define ACKTYPE_NONE   0     /* No final acknowledgement is required */
    88.9 +#define ACKTYPE_UNMASK 1     /* Unmask PIC hardware (from any CPU)   */
   88.10 +#define ACKTYPE_LAPIC_EOI  2 /* EOI on the CPU that was interrupted  */
   88.11 +    cpumask_t cpu_eoi_map;   /* CPUs that need to EOI this interrupt */
   88.12      struct domain *guest[IRQ_MAX_GUESTS];
   88.13  } irq_guest_action_t;
   88.14  
   88.15 +/*
   88.16 + * Stack of interrupts awaiting EOI on each CPU. These must be popped in
   88.17 + * order, as only the current highest-priority pending irq can be EOIed.
   88.18 + */
   88.19 +static struct {
   88.20 +    u8 vector;
   88.21 +    u8 ready_to_end;
   88.22 +} pending_lapic_eoi[NR_CPUS][NR_VECTORS] __cacheline_aligned;
   88.23 +#define pending_lapic_eoi_sp(cpu) (pending_lapic_eoi[cpu][NR_VECTORS-1].vector)
   88.24 +
   88.25  static void __do_IRQ_guest(int vector)
   88.26  {
   88.27      unsigned int        irq = vector_to_irq(vector);
   88.28      irq_desc_t         *desc = &irq_desc[vector];
   88.29      irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
   88.30      struct domain      *d;
   88.31 -    int                 i;
   88.32 +    int                 i, sp, cpu = smp_processor_id();
   88.33 +
   88.34 +    if ( unlikely(action->nr_guests == 0) )
   88.35 +    {
   88.36 +        /* An interrupt may slip through while freeing a LAPIC_EOI irq. */
   88.37 +        ASSERT(action->ack_type == ACKTYPE_LAPIC_EOI);
   88.38 +        desc->handler->end(vector);
   88.39 +        return;
   88.40 +    }
   88.41 +
   88.42 +    if ( action->ack_type == ACKTYPE_LAPIC_EOI )
   88.43 +    {
   88.44 +        sp = pending_lapic_eoi_sp(cpu);
   88.45 +        ASSERT((sp == 0) || (pending_lapic_eoi[cpu][sp-1].vector < vector));
   88.46 +        ASSERT(sp < (NR_VECTORS-1));
   88.47 +        pending_lapic_eoi[cpu][sp].vector = vector;
   88.48 +        pending_lapic_eoi[cpu][sp].ready_to_end = 0;
   88.49 +        pending_lapic_eoi_sp(cpu) = sp+1;
   88.50 +        cpu_set(cpu, action->cpu_eoi_map);
   88.51 +    }
   88.52  
   88.53      for ( i = 0; i < action->nr_guests; i++ )
   88.54      {
   88.55          d = action->guest[i];
   88.56 -        if ( !test_and_set_bit(irq, &d->pirq_mask) )
   88.57 +        if ( (action->ack_type != ACKTYPE_NONE) &&
   88.58 +             !test_and_set_bit(irq, &d->pirq_mask) )
   88.59              action->in_flight++;
   88.60          send_guest_pirq(d, irq);
   88.61      }
   88.62  }
   88.63  
   88.64 +static void end_guest_irq(void *data)
   88.65 +{
   88.66 +    irq_desc_t         *desc = data;
   88.67 +    irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
   88.68 +    unsigned long       flags;
   88.69 +    int                 vector, sp, cpu = smp_processor_id();
   88.70 +
   88.71 +    vector = desc - irq_desc;
   88.72 +
   88.73 +    spin_lock_irqsave(&desc->lock, flags);
   88.74 +
   88.75 +    if ( (desc->status & IRQ_GUEST) &&
   88.76 +         (action->in_flight == 0) &&
   88.77 +         test_and_clear_bit(cpu, &action->cpu_eoi_map) )
   88.78 +    {
   88.79 +        sp = pending_lapic_eoi_sp(cpu);
   88.80 +        do {
   88.81 +            ASSERT(sp > 0);
   88.82 +        } while ( pending_lapic_eoi[cpu][--sp].vector != vector );
   88.83 +        ASSERT(!pending_lapic_eoi[cpu][sp].ready_to_end);
   88.84 +        pending_lapic_eoi[cpu][sp].ready_to_end = 1;
   88.85 +    }
   88.86 +
   88.87 +    for ( ; ; )
   88.88 +    {
   88.89 +        sp = pending_lapic_eoi_sp(cpu);
   88.90 +        if ( (sp == 0) || !pending_lapic_eoi[cpu][sp-1].ready_to_end )
   88.91 +        {
   88.92 +            spin_unlock_irqrestore(&desc->lock, flags);    
   88.93 +            return;
   88.94 +        }
   88.95 +        if ( pending_lapic_eoi[cpu][sp-1].vector != vector )
   88.96 +        {
   88.97 +            spin_unlock(&desc->lock);
   88.98 +            vector = pending_lapic_eoi[cpu][sp-1].vector;
   88.99 +            desc = &irq_desc[vector];
  88.100 +            spin_lock(&desc->lock);
  88.101 +        }
  88.102 +        desc->handler->end(vector);
  88.103 +        pending_lapic_eoi_sp(cpu) = sp-1;
  88.104 +    }
  88.105 +}
  88.106 +
  88.107  int pirq_guest_unmask(struct domain *d)
  88.108  {
  88.109 -    irq_desc_t    *desc;
  88.110 -    unsigned int   pirq;
  88.111 -    shared_info_t *s = d->shared_info;
  88.112 +    irq_desc_t         *desc;
  88.113 +    irq_guest_action_t *action;
  88.114 +    cpumask_t           cpu_eoi_map = CPU_MASK_NONE;
  88.115 +    unsigned int        pirq, cpu = smp_processor_id();
  88.116 +    shared_info_t      *s = d->shared_info;
  88.117  
  88.118      for ( pirq = find_first_bit(d->pirq_mask, NR_PIRQS);
  88.119            pirq < NR_PIRQS;
  88.120            pirq = find_next_bit(d->pirq_mask, NR_PIRQS, pirq+1) )
  88.121      {
  88.122 -        desc = &irq_desc[irq_to_vector(pirq)];
  88.123 +        desc   = &irq_desc[irq_to_vector(pirq)];
  88.124 +        action = (irq_guest_action_t *)desc->action;
  88.125 +
  88.126          spin_lock_irq(&desc->lock);
  88.127          if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
  88.128 -             test_and_clear_bit(pirq, &d->pirq_mask) &&
  88.129 -             (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
  88.130 -            desc->handler->end(irq_to_vector(pirq));
  88.131 +             test_and_clear_bit(pirq, &d->pirq_mask) )
  88.132 +        {
  88.133 +            ASSERT(action->ack_type != ACKTYPE_NONE);
  88.134 +            if ( --action->in_flight == 0 )
  88.135 +            {
  88.136 +                if ( action->ack_type == ACKTYPE_UNMASK )
  88.137 +                    desc->handler->end(irq_to_vector(pirq));
  88.138 +                cpu_eoi_map = action->cpu_eoi_map;
  88.139 +            }
  88.140 +        }
  88.141          spin_unlock_irq(&desc->lock);
  88.142 +
  88.143 +        if ( __test_and_clear_bit(cpu, &cpu_eoi_map) )
  88.144 +            end_guest_irq(desc);
  88.145 +
  88.146 +        if ( !cpus_empty(cpu_eoi_map) )
  88.147 +        {
  88.148 +            on_selected_cpus(cpu_eoi_map, end_guest_irq, desc, 1, 0);
  88.149 +            cpu_eoi_map = CPU_MASK_NONE;
  88.150 +        }
  88.151      }
  88.152  
  88.153      return 0;
  88.154  }
  88.155  
  88.156 +extern int ioapic_ack_new;
  88.157 +int pirq_acktype(int irq)
  88.158 +{
  88.159 +    irq_desc_t  *desc;
  88.160 +    unsigned int vector;
  88.161 +
  88.162 +    vector = irq_to_vector(irq);
  88.163 +    if ( vector == 0 )
  88.164 +        return ACKTYPE_NONE;
  88.165 +
  88.166 +    desc = &irq_desc[vector];
  88.167 +
  88.168 +    /*
  88.169 +     * Edge-triggered IO-APIC interrupts need no final acknowledgement:
  88.170 +     * we ACK early during interrupt processing.
  88.171 +     */
  88.172 +    if ( !strcmp(desc->handler->typename, "IO-APIC-edge") )
  88.173 +        return ACKTYPE_NONE;
  88.174 +
  88.175 +    /* Legacy PIC interrupts can be acknowledged from any CPU. */
  88.176 +    if ( !strcmp(desc->handler->typename, "XT-PIC") )
  88.177 +        return ACKTYPE_UNMASK;
  88.178 +
  88.179 +    /*
  88.180 +     * Level-triggered IO-APIC interrupts need to be acknowledged on the CPU
  88.181 +     * on which they were received. This is because we tickle the LAPIC to EOI.
  88.182 +     */
  88.183 +    if ( !strcmp(desc->handler->typename, "IO-APIC-level") )
  88.184 +        return ioapic_ack_new ? ACKTYPE_LAPIC_EOI : ACKTYPE_UNMASK;
  88.185 +
  88.186 +    BUG();
  88.187 +    return 0;
  88.188 +}
  88.189 +
  88.190  int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
  88.191  {
  88.192      unsigned int        vector;
  88.193 @@ -230,10 +362,12 @@ int pirq_guest_bind(struct vcpu *v, int 
  88.194              goto out;
  88.195          }
  88.196  
  88.197 -        action->nr_guests = 0;
  88.198 -        action->in_flight = 0;
  88.199 -        action->shareable = will_share;
  88.200 -        
  88.201 +        action->nr_guests   = 0;
  88.202 +        action->in_flight   = 0;
  88.203 +        action->shareable   = will_share;
  88.204 +        action->ack_type    = pirq_acktype(irq);
  88.205 +        action->cpu_eoi_map = CPU_MASK_NONE;
  88.206 +
  88.207          desc->depth = 0;
  88.208          desc->status |= IRQ_GUEST;
  88.209          desc->status &= ~IRQ_DISABLED;
  88.210 @@ -271,6 +405,7 @@ int pirq_guest_unbind(struct domain *d, 
  88.211      unsigned int        vector = irq_to_vector(irq);
  88.212      irq_desc_t         *desc = &irq_desc[vector];
  88.213      irq_guest_action_t *action;
  88.214 +    cpumask_t           cpu_eoi_map;
  88.215      unsigned long       flags;
  88.216      int                 i;
  88.217  
  88.218 @@ -280,28 +415,60 @@ int pirq_guest_unbind(struct domain *d, 
  88.219  
  88.220      action = (irq_guest_action_t *)desc->action;
  88.221  
  88.222 -    if ( test_and_clear_bit(irq, &d->pirq_mask) &&
  88.223 -         (--action->in_flight == 0) )
  88.224 -        desc->handler->end(vector);
  88.225 +    i = 0;
  88.226 +    while ( action->guest[i] && (action->guest[i] != d) )
  88.227 +        i++;
  88.228 +    memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
  88.229 +    action->nr_guests--;
  88.230  
  88.231 -    if ( action->nr_guests == 1 )
  88.232 +    switch ( action->ack_type )
  88.233      {
  88.234 -        desc->action = NULL;
  88.235 -        xfree(action);
  88.236 -        desc->depth   = 1;
  88.237 -        desc->status |= IRQ_DISABLED;
  88.238 -        desc->status &= ~IRQ_GUEST;
  88.239 -        desc->handler->shutdown(vector);
  88.240 -    }
  88.241 -    else
  88.242 -    {
  88.243 -        i = 0;
  88.244 -        while ( action->guest[i] && (action->guest[i] != d) )
  88.245 -            i++;
  88.246 -        memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
  88.247 -        action->nr_guests--;
  88.248 +    case ACKTYPE_UNMASK:
  88.249 +        if ( test_and_clear_bit(irq, &d->pirq_mask) &&
  88.250 +             (--action->in_flight == 0) )
  88.251 +            desc->handler->end(vector);
  88.252 +        break;
  88.253 +    case ACKTYPE_LAPIC_EOI:
  88.254 +        if ( test_and_clear_bit(irq, &d->pirq_mask) )
  88.255 +            --action->in_flight;
  88.256 +        while ( action->in_flight == 0 )
  88.257 +        {
  88.258 +            /* We cannot release guest info until all pending ACKs are done. */
  88.259 +            cpu_eoi_map = action->cpu_eoi_map;
  88.260 +            if ( cpus_empty(cpu_eoi_map) )
  88.261 +                break;
  88.262 +
  88.263 +            /* We cannot hold the lock while interrupting other CPUs. */
  88.264 +            spin_unlock_irqrestore(&desc->lock, flags);    
  88.265 +            on_selected_cpus(cpu_eoi_map, end_guest_irq, desc, 1, 1);
  88.266 +            spin_lock_irqsave(&desc->lock, flags);
  88.267 +
  88.268 +            /* The world can change while we do not hold the lock. */
  88.269 +            if ( !(desc->status & IRQ_GUEST) )
  88.270 +                goto out;
  88.271 +            if ( (action->ack_type != ACKTYPE_LAPIC_EOI) ||
  88.272 +                 (action->nr_guests != 0) )
  88.273 +                break;
  88.274 +        }
  88.275 +        break;
  88.276      }
  88.277  
  88.278 +    BUG_ON(test_bit(irq, &d->pirq_mask));
  88.279 +
  88.280 +    if ( action->nr_guests != 0 )
  88.281 +        goto out;
  88.282 +
  88.283 +    BUG_ON(action->in_flight != 0);
  88.284 +    BUG_ON(!cpus_empty(action->cpu_eoi_map));
  88.285 +
  88.286 +    desc->action = NULL;
  88.287 +    xfree(action);
  88.288 +    desc->depth   = 1;
  88.289 +    desc->status |= IRQ_DISABLED;
  88.290 +    desc->status &= ~IRQ_GUEST;
  88.291 +    desc->handler->shutdown(vector);
  88.292 +
  88.293 + out:
  88.294      spin_unlock_irqrestore(&desc->lock, flags);    
  88.295      return 0;
  88.296  }
  88.297 @@ -373,3 +540,61 @@ static int __init setup_dump_irqs(void)
  88.298      return 0;
  88.299  }
  88.300  __initcall(setup_dump_irqs);
  88.301 +
  88.302 +static struct timer end_irq_timer[NR_CPUS];
  88.303 +
  88.304 +static void end_irq_timeout(void *unused)
  88.305 +{
  88.306 +    irq_desc_t         *desc;
  88.307 +    irq_guest_action_t *action;
  88.308 +    cpumask_t           cpu_eoi_map;
  88.309 +    unsigned int        cpu = smp_processor_id();
  88.310 +    int                 sp, vector, i;
  88.311 +
  88.312 +    local_irq_disable();
  88.313 +
  88.314 +    if ( (sp = pending_lapic_eoi_sp(cpu)) == 0 )
  88.315 +    {
  88.316 +        local_irq_enable();
  88.317 +        return;
  88.318 +    }
  88.319 +
  88.320 +    vector = pending_lapic_eoi[cpu][sp-1].vector;
  88.321 +    ASSERT(!pending_lapic_eoi[cpu][sp-1].ready_to_end);
  88.322 +
  88.323 +    desc = &irq_desc[vector];
  88.324 +    spin_lock(&desc->lock);
  88.325 +    action = (irq_guest_action_t *)desc->action;
  88.326 +    ASSERT(action->ack_type == ACKTYPE_LAPIC_EOI);
  88.327 +    ASSERT(desc->status & IRQ_GUEST);
  88.328 +    for ( i = 0; i < action->nr_guests; i++ )
  88.329 +        clear_bit(vector_to_irq(vector), &action->guest[i]->pirq_mask);
  88.330 +    action->in_flight = 0;
  88.331 +    cpu_eoi_map = action->cpu_eoi_map;
  88.332 +    spin_unlock(&desc->lock);
  88.333 +
  88.334 +    local_irq_enable();
  88.335 +
  88.336 +    if ( !cpus_empty(cpu_eoi_map) )
  88.337 +        on_selected_cpus(cpu_eoi_map, end_guest_irq, desc, 1, 0);
  88.338 +
  88.339 +    set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000));
  88.340 +}
  88.341 +
  88.342 +static void __init __setup_irq_timeout(void *unused)
  88.343 +{
  88.344 +    int cpu = smp_processor_id();
  88.345 +    init_timer(&end_irq_timer[cpu], end_irq_timeout, NULL, cpu);
  88.346 +    set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000));
  88.347 +}
  88.348 +
  88.349 +static int force_intack;
  88.350 +boolean_param("force_intack", force_intack);
  88.351 +
  88.352 +static int __init setup_irq_timeout(void)
  88.353 +{
  88.354 +    if ( force_intack )
  88.355 +        on_each_cpu(__setup_irq_timeout, NULL, 1, 1);
  88.356 +    return 0;
  88.357 +}
  88.358 +__initcall(setup_irq_timeout);
    89.1 --- a/xen/arch/x86/physdev.c	Sat Apr 15 19:25:09 2006 +0100
    89.2 +++ b/xen/arch/x86/physdev.c	Sat Apr 15 19:25:21 2006 +0100
    89.3 @@ -18,6 +18,9 @@ ioapic_guest_read(
    89.4  extern int
    89.5  ioapic_guest_write(
    89.6      unsigned long physbase, unsigned int reg, u32 pval);
    89.7 +extern int
    89.8 +pirq_acktype(
    89.9 +    int irq);
   89.10  
   89.11  /*
   89.12   * Demuxing hypercall.
   89.13 @@ -43,8 +46,7 @@ long do_physdev_op(GUEST_HANDLE(physdev_
   89.14          if ( (irq < 0) || (irq >= NR_IRQS) )
   89.15              break;
   89.16          op.u.irq_status_query.flags = 0;
   89.17 -        /* Edge-triggered interrupts don't need an explicit unmask downcall. */
   89.18 -        if ( !strstr(irq_desc[irq_to_vector(irq)].handler->typename, "edge") )
   89.19 +        if ( pirq_acktype(irq) != 0 )
   89.20              op.u.irq_status_query.flags |= PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY;
   89.21          ret = 0;
   89.22          break;
    90.1 --- a/xen/arch/x86/shadow.c	Sat Apr 15 19:25:09 2006 +0100
    90.2 +++ b/xen/arch/x86/shadow.c	Sat Apr 15 19:25:21 2006 +0100
    90.3 @@ -1531,14 +1531,10 @@ static void resync_pae_guest_l3(struct d
    90.4  
    90.5          idx = get_cr3_idxval(v);
    90.6          smfn = __shadow_status(
    90.7 -            d, ((unsigned long)(idx << PGT_score_shift) | entry->gpfn), PGT_l4_shadow);
    90.8 -
    90.9 -#ifndef NDEBUG
   90.10 +            d, ((unsigned long)(idx << PGT_pae_idx_shift) | entry->gpfn), PGT_l4_shadow);
   90.11 +
   90.12          if ( !smfn ) 
   90.13 -        {
   90.14 -            BUG();
   90.15 -        }
   90.16 -#endif
   90.17 +            continue;
   90.18  
   90.19          guest    = (pgentry_64_t *)map_domain_page(entry->gmfn);
   90.20          snapshot = (pgentry_64_t *)map_domain_page(entry->snapshot_mfn);
   90.21 @@ -1550,9 +1546,35 @@ static void resync_pae_guest_l3(struct d
   90.22              if ( entry_has_changed(
   90.23                      guest[index], snapshot[index], PAGE_FLAG_MASK) ) 
   90.24              {
   90.25 +                unsigned long gpfn;
   90.26 +
   90.27 +                /*
   90.28 +                 * Looks like it's no longer a page table. 
   90.29 +                 */
   90.30 +                if ( unlikely(entry_get_value(guest[index]) & PAE_PDPT_RESERVED) )
   90.31 +                {
   90.32 +                    if ( entry_get_flags(shadow_l3[i]) & _PAGE_PRESENT )
   90.33 +                        put_shadow_ref(entry_get_pfn(shadow_l3[i]));
   90.34 +
   90.35 +                    shadow_l3[i] = entry_empty();
   90.36 +                    continue;
   90.37 +                }
   90.38 +
   90.39 +                gpfn = entry_get_pfn(guest[index]);
   90.40 +
   90.41 +                if ( unlikely(gpfn != (gpfn & PGT_mfn_mask)) )
   90.42 +                {
   90.43 +                    if ( entry_get_flags(shadow_l3[i]) & _PAGE_PRESENT )
   90.44 +                        put_shadow_ref(entry_get_pfn(shadow_l3[i]));
   90.45 +
   90.46 +                    shadow_l3[i] = entry_empty();
   90.47 +                    continue;
   90.48 +                }
   90.49 +
   90.50                  validate_entry_change(d, &guest[index],
   90.51                                        &shadow_l3[i], PAGING_L3);
   90.52              }
   90.53 +
   90.54              if ( entry_get_value(guest[index]) != 0 )
   90.55                  max = i;
   90.56  
   90.57 @@ -1676,6 +1698,19 @@ static int resync_all(struct domain *d, 
   90.58                  {
   90.59                      int error;
   90.60  
   90.61 +#if CONFIG_PAGING_LEVELS == 4
   90.62 +                    unsigned long gpfn;
   90.63 +
   90.64 +                    gpfn = guest_l1e_get_paddr(guest1[i]) >> PAGE_SHIFT;
   90.65 +
   90.66 +                    if ( unlikely(gpfn != (gpfn & PGT_mfn_mask)) )
   90.67 +                    {
   90.68 +                        guest_l1_pgentry_t tmp_gl1e = guest_l1e_empty();
   90.69 +                        validate_pte_change(d, tmp_gl1e, sl1e_p);
   90.70 +                        continue;
   90.71 +                    }
   90.72 +#endif
   90.73 +
   90.74                      error = validate_pte_change(d, guest1[i], sl1e_p);
   90.75                      if ( error ==  -1 )
   90.76                          unshadow_l1 = 1;
   90.77 @@ -1698,6 +1733,7 @@ static int resync_all(struct domain *d, 
   90.78              perfc_incrc(resync_l1);
   90.79              perfc_incr_histo(wpt_updates, changed, PT_UPDATES);
   90.80              perfc_incr_histo(l1_entries_checked, max_shadow - min_shadow + 1, PT_UPDATES);
   90.81 +
   90.82              if ( d->arch.ops->guest_paging_levels >= PAGING_L3 &&
   90.83                   unshadow_l1 ) {
   90.84                  pgentry_64_t l2e = { 0 };
   90.85 @@ -1804,18 +1840,22 @@ static int resync_all(struct domain *d, 
   90.86              for ( i = min_shadow; i <= max_shadow; i++ )
   90.87              {
   90.88                  if ( (i < min_snapshot) || (i > max_snapshot) ||
   90.89 -                  entry_has_changed(
   90.90 -                      guest_pt[i], snapshot_pt[i], PAGE_FLAG_MASK) )
   90.91 +                    entry_has_changed(
   90.92 +                        guest_pt[i], snapshot_pt[i], PAGE_FLAG_MASK) )
   90.93                  {
   90.94 -
   90.95                      unsigned long gpfn;
   90.96  
   90.97                      gpfn = entry_get_pfn(guest_pt[i]);
   90.98                      /*
   90.99 -                     * Looks like it's longer a page table.
  90.100 +                     * Looks like it's no longer a page table.
  90.101                       */
  90.102                      if ( unlikely(gpfn != (gpfn & PGT_mfn_mask)) )
  90.103 +                    {
  90.104 +                        if ( entry_get_flags(shadow_pt[i]) & _PAGE_PRESENT )
  90.105 +                            put_shadow_ref(entry_get_pfn(shadow_pt[i]));
  90.106 +                         shadow_pt[i] = entry_empty(); 
  90.107                          continue;
  90.108 +                    }
  90.109  
  90.110                      need_flush |= validate_entry_change(
  90.111                          d, &guest_pt[i], &shadow_pt[i],
  90.112 @@ -1864,11 +1904,17 @@ static int resync_all(struct domain *d, 
  90.113                      unsigned long gpfn;
  90.114  
  90.115                      gpfn = l4e_get_pfn(new_root_e);
  90.116 +
  90.117                      /*
  90.118 -                     * Looks like it's longer a page table.
  90.119 +                     * Looks like it's no longer a page table.
  90.120                       */
  90.121                      if ( unlikely(gpfn != (gpfn & PGT_mfn_mask)) )
  90.122 +                    {
  90.123 +                        if ( l4e_get_flags(shadow4[i]) & _PAGE_PRESENT )
  90.124 +                            put_shadow_ref(l4e_get_pfn(shadow4[i]));
  90.125 +                        shadow4[i] = l4e_empty(); 
  90.126                          continue;
  90.127 +                    }
  90.128  
  90.129                      if ( d->arch.ops->guest_paging_levels == PAGING_L4 ) 
  90.130                      {
  90.131 @@ -2372,7 +2418,7 @@ static void shadow_update_pagetables(str
  90.132      if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 ) 
  90.133      {
  90.134          u32 index = get_cr3_idxval(v);
  90.135 -        gpfn = (index << PGT_score_shift) | gpfn;
  90.136 +        gpfn = ((unsigned long)index << PGT_pae_idx_shift) | gpfn;
  90.137      }
  90.138  #endif
  90.139  
  90.140 @@ -3233,8 +3279,35 @@ update_top_level_shadow(struct vcpu *v, 
  90.141      int i;
  90.142  
  90.143      for ( i = 0; i < PAE_L3_PAGETABLE_ENTRIES; i++ )
  90.144 +    {
  90.145 +        unsigned long gpfn;
  90.146 +
  90.147 +        /*
  90.148 +         * Looks like it's no longer a page table. 
  90.149 +         */
  90.150 +        if ( unlikely(entry_get_value(gple[index*4+i]) & PAE_PDPT_RESERVED) )
  90.151 +        {
  90.152 +            if ( entry_get_flags(sple[i]) & _PAGE_PRESENT )
  90.153 +                put_shadow_ref(entry_get_pfn(sple[i]));
  90.154 +
  90.155 +            sple[i] = entry_empty();
  90.156 +            continue;
  90.157 +        }
  90.158 +
  90.159 +        gpfn = entry_get_pfn(gple[index*4+i]);
  90.160 +
  90.161 +        if ( unlikely(gpfn != (gpfn & PGT_mfn_mask)) )
  90.162 +        {
  90.163 +            if ( entry_get_flags(sple[i]) & _PAGE_PRESENT )
  90.164 +                put_shadow_ref(entry_get_pfn(sple[i]));
  90.165 +
  90.166 +            sple[i] = entry_empty();
  90.167 +            continue;
  90.168 +        }
  90.169 +
  90.170          validate_entry_change(
  90.171              v->domain, &gple[index*4+i], &sple[i], PAGING_L3);
  90.172 +    }
  90.173  
  90.174      unmap_domain_page(sple);
  90.175  }
    91.1 --- a/xen/arch/x86/shadow32.c	Sat Apr 15 19:25:09 2006 +0100
    91.2 +++ b/xen/arch/x86/shadow32.c	Sat Apr 15 19:25:21 2006 +0100
    91.3 @@ -583,6 +583,13 @@ static void free_shadow_pages(struct dom
    91.4          {
    91.5              put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table));
    91.6              v->arch.shadow_table = mk_pagetable(0);
    91.7 +
    91.8 +            if ( shadow_mode_external(d) )
    91.9 +            {
   91.10 +                if ( v->arch.shadow_vtable )
   91.11 +                    unmap_domain_page_global(v->arch.shadow_vtable);
   91.12 +                v->arch.shadow_vtable = NULL;
   91.13 +            }
   91.14          }
   91.15  
   91.16          if ( v->arch.monitor_shadow_ref )
   91.17 @@ -2886,7 +2893,7 @@ int shadow_fault(unsigned long va, struc
   91.18      SH_VVLOG("shadow_fault( va=%lx, code=%lu )",
   91.19               va, (unsigned long)regs->error_code);
   91.20      perfc_incrc(shadow_fault_calls);
   91.21 -    
   91.22 +
   91.23      check_pagetable(v, "pre-sf");
   91.24  
   91.25      /*
   91.26 @@ -2917,7 +2924,16 @@ int shadow_fault(unsigned long va, struc
   91.27      // the mapping is in-sync, so the check of the PDE's present bit, above,
   91.28      // covers this access.
   91.29      //
   91.30 -    orig_gpte = gpte = linear_pg_table[l1_linear_offset(va)];
   91.31 +    if ( __copy_from_user(&gpte,
   91.32 +                          &linear_pg_table[l1_linear_offset(va)],
   91.33 +                          sizeof(gpte)) ) {
   91.34 +        printk("%s() failed, crashing domain %d "
   91.35 +               "due to a unaccessible linear page table (gpde=%" PRIpte "), va=%lx\n",
   91.36 +               __func__, d->domain_id, l2e_get_intpte(gpde), va);
   91.37 +        domain_crash_synchronous();
   91.38 +    }
   91.39 +    orig_gpte = gpte;
   91.40 +
   91.41      if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_PRESENT)) )
   91.42      {
   91.43          SH_VVLOG("shadow_fault - EXIT: gpte not present (%" PRIpte ") (gpde %" PRIpte ")",
   91.44 @@ -2928,7 +2944,7 @@ int shadow_fault(unsigned long va, struc
   91.45      }
   91.46  
   91.47      /* Write fault? */
   91.48 -    if ( regs->error_code & 2 )  
   91.49 +    if ( regs->error_code & 2 )
   91.50      {
   91.51          int allow_writes = 0;
   91.52  
   91.53 @@ -2942,7 +2958,7 @@ int shadow_fault(unsigned long va, struc
   91.54              else
   91.55              {
   91.56                  /* Write fault on a read-only mapping. */
   91.57 -                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")", 
   91.58 +                SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%" PRIpte ")",
   91.59                           l1e_get_intpte(gpte));
   91.60                  perfc_incrc(shadow_fault_bail_ro_mapping);
   91.61                  goto fail;
   91.62 @@ -2955,10 +2971,10 @@ int shadow_fault(unsigned long va, struc
   91.63          }
   91.64  
   91.65          /* User access violation in guest? */
   91.66 -        if ( unlikely((regs->error_code & 4) && 
   91.67 +        if ( unlikely((regs->error_code & 4) &&
   91.68                        !(l1e_get_flags(gpte) & _PAGE_USER)))
   91.69          {
   91.70 -            SH_VVLOG("shadow_fault - EXIT: wr fault on super page (%" PRIpte ")", 
   91.71 +            SH_VVLOG("shadow_fault - EXIT: wr fault on super page (%" PRIpte ")",
   91.72                      l1e_get_intpte(gpte));
   91.73              goto fail;
   91.74  
   91.75 @@ -2980,7 +2996,7 @@ int shadow_fault(unsigned long va, struc
   91.76          /* Read-protection violation in guest? */
   91.77          if ( unlikely((regs->error_code & 1) ))
   91.78          {
   91.79 -            SH_VVLOG("shadow_fault - EXIT: read fault on super page (%" PRIpte ")", 
   91.80 +            SH_VVLOG("shadow_fault - EXIT: read fault on super page (%" PRIpte ")",
   91.81                      l1e_get_intpte(gpte));
   91.82              goto fail;
   91.83  
    92.1 --- a/xen/arch/x86/shadow_public.c	Sat Apr 15 19:25:09 2006 +0100
    92.2 +++ b/xen/arch/x86/shadow_public.c	Sat Apr 15 19:25:21 2006 +0100
    92.3 @@ -102,6 +102,15 @@ void free_shadow_pages(struct domain *d)
    92.4  
    92.5  int shadow_set_guest_paging_levels(struct domain *d, int levels)
    92.6  {
    92.7 +    struct vcpu *v = current;
    92.8 +
    92.9 +    /*
   92.10 +     * Need to wait for VCPU0 to complete the on-going shadow ops.
   92.11 +     */
   92.12 +
   92.13 +    if ( v->vcpu_id )
   92.14 +        return 1;
   92.15 +
   92.16      shadow_lock(d);
   92.17  
   92.18      switch(levels) {
   92.19 @@ -692,7 +701,6 @@ void free_fake_shadow_l2(struct domain *
   92.20  void free_shadow_page(unsigned long smfn)
   92.21  {
   92.22      struct page_info *page = mfn_to_page(smfn);
   92.23 -
   92.24      unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
   92.25      struct domain *d = page_get_owner(mfn_to_page(gmfn));
   92.26      unsigned long gpfn = mfn_to_gmfn(d, gmfn);
   92.27 @@ -709,10 +717,9 @@ void free_shadow_page(unsigned long smfn
   92.28          if ( !mfn )
   92.29              gpfn |= (1UL << 63);
   92.30      }
   92.31 -    if (d->arch.ops->guest_paging_levels == PAGING_L3)
   92.32 -        if (type == PGT_l4_shadow ) {
   92.33 -            gpfn = ((unsigned long)page->tlbflush_timestamp << PGT_score_shift) | gpfn;
   92.34 -        }
   92.35 +    if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
   92.36 +        if ( type == PGT_l4_shadow ) 
   92.37 +            gpfn = ((unsigned long)page->tlbflush_timestamp << PGT_pae_idx_shift) | gpfn;
   92.38  #endif
   92.39  
   92.40      delete_shadow_status(d, gpfn, gmfn, type);
   92.41 @@ -743,9 +750,24 @@ void free_shadow_page(unsigned long smfn
   92.42  #if CONFIG_PAGING_LEVELS >= 3
   92.43      case PGT_l2_shadow:
   92.44      case PGT_l3_shadow:
   92.45 +        shadow_demote(d, gpfn, gmfn);
   92.46 +        free_shadow_tables(d, smfn, shadow_type_to_level(type));
   92.47 +        d->arch.shadow_page_count--;
   92.48 +        break;
   92.49 +
   92.50      case PGT_l4_shadow:
   92.51          gpfn = gpfn & PGT_mfn_mask;
   92.52 -        shadow_demote(d, gpfn, gmfn);
   92.53 +        if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
   92.54 +        {
   92.55 +            /*
   92.56 +             * Since a single PDPT page can have multiple PDPs, it's possible
   92.57 +             * that shadow_demote() has been already called for gmfn.
   92.58 +             */
   92.59 +            if ( mfn_is_page_table(gmfn) )
   92.60 +                shadow_demote(d, gpfn, gmfn);
   92.61 +        } else
   92.62 +            shadow_demote(d, gpfn, gmfn);
   92.63 +
   92.64          free_shadow_tables(d, smfn, shadow_type_to_level(type));
   92.65          d->arch.shadow_page_count--;
   92.66          break;
   92.67 @@ -898,6 +920,13 @@ void free_shadow_pages(struct domain *d)
   92.68          {
   92.69              put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table));
   92.70              v->arch.shadow_table = mk_pagetable(0);
   92.71 +
   92.72 +            if ( shadow_mode_external(d) )
   92.73 +            {
   92.74 +                if ( v->arch.shadow_vtable )
   92.75 +                    unmap_domain_page_global(v->arch.shadow_vtable);
   92.76 +                v->arch.shadow_vtable = NULL;
   92.77 +            }
   92.78          }
   92.79  
   92.80          if ( v->arch.monitor_shadow_ref )
   92.81 @@ -2034,7 +2063,16 @@ void shadow_sync_and_drop_references(
   92.82  
   92.83  void clear_all_shadow_status(struct domain *d)
   92.84  {
   92.85 +    struct vcpu *v = current;
   92.86 +
   92.87 +    /*
   92.88 +     * Don't clean up while other vcpus are working.
   92.89 +     */
   92.90 +    if ( v->vcpu_id )
   92.91 +        return;
   92.92 +
   92.93      shadow_lock(d);
   92.94 +
   92.95      free_shadow_pages(d);
   92.96      free_shadow_ht_entries(d);
   92.97      d->arch.shadow_ht = 
   92.98 @@ -2047,6 +2085,7 @@ void clear_all_shadow_status(struct doma
   92.99             shadow_ht_buckets * sizeof(struct shadow_status));
  92.100  
  92.101      free_out_of_sync_entries(d);
  92.102 +
  92.103      shadow_unlock(d);
  92.104  }
  92.105  
    93.1 --- a/xen/arch/x86/smp.c	Sat Apr 15 19:25:09 2006 +0100
    93.2 +++ b/xen/arch/x86/smp.c	Sat Apr 15 19:25:21 2006 +0100
    93.3 @@ -261,7 +261,7 @@ int smp_call_function(
    93.4      return on_selected_cpus(allbutself, func, info, retry, wait);
    93.5  }
    93.6  
    93.7 -extern int on_selected_cpus(
    93.8 +int on_selected_cpus(
    93.9      cpumask_t selected,
   93.10      void (*func) (void *info),
   93.11      void *info,
    94.1 --- a/xen/arch/x86/smpboot.c	Sat Apr 15 19:25:09 2006 +0100
    94.2 +++ b/xen/arch/x86/smpboot.c	Sat Apr 15 19:25:21 2006 +0100
    94.3 @@ -41,6 +41,7 @@
    94.4  #include <xen/irq.h>
    94.5  #include <xen/delay.h>
    94.6  #include <xen/softirq.h>
    94.7 +#include <xen/serial.h>
    94.8  #include <asm/current.h>
    94.9  #include <asm/mc146818rtc.h>
   94.10  #include <asm/desc.h>
   94.11 @@ -1231,12 +1232,25 @@ void __init smp_cpus_done(unsigned int m
   94.12  
   94.13  void __init smp_intr_init(void)
   94.14  {
   94.15 +	int irq, seridx;
   94.16 +
   94.17  	/*
   94.18  	 * IRQ0 must be given a fixed assignment and initialized,
   94.19  	 * because it's used before the IO-APIC is set up.
   94.20  	 */
   94.21 -	irq_vector[0] = FIRST_DEVICE_VECTOR;
   94.22 -	vector_irq[FIRST_DEVICE_VECTOR] = 0;
   94.23 +	irq_vector[0] = FIRST_HIPRIORITY_VECTOR;
   94.24 +	vector_irq[FIRST_HIPRIORITY_VECTOR] = 0;
   94.25 +
   94.26 +	/*
   94.27 +	 * Also ensure serial interrupts are high priority. We do not
   94.28 +	 * want them to be blocked by unacknowledged guest-bound interrupts.
   94.29 +	 */
   94.30 +	for (seridx = 0; seridx < 2; seridx++) {
   94.31 +		if ((irq = serial_irq(seridx)) < 0)
   94.32 +			continue;
   94.33 +		irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1;
   94.34 +		vector_irq[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq;
   94.35 +	}
   94.36  
   94.37  	/* IPI for event checking. */
   94.38  	set_intr_gate(EVENT_CHECK_VECTOR, event_check_interrupt);
    95.1 --- a/xen/common/dom0_ops.c	Sat Apr 15 19:25:09 2006 +0100
    95.2 +++ b/xen/common/dom0_ops.c	Sat Apr 15 19:25:21 2006 +0100
    95.3 @@ -581,20 +581,31 @@ long do_dom0_op(GUEST_HANDLE(dom0_op_t) 
    95.4      case DOM0_SETDOMAINMAXMEM:
    95.5      {
    95.6          struct domain *d; 
    95.7 +        unsigned long new_max;
    95.8 +
    95.9          ret = -ESRCH;
   95.10          d = find_domain_by_id(op->u.setdomainmaxmem.domain);
   95.11 -        if ( d != NULL )
   95.12 +        if ( d == NULL )
   95.13 +            break;
   95.14 +
   95.15 +        ret = -EINVAL;
   95.16 +        new_max = op->u.setdomainmaxmem.max_memkb >> (PAGE_SHIFT-10);
   95.17 +
   95.18 +        spin_lock(&d->page_alloc_lock);
   95.19 +        if ( new_max >= d->tot_pages )
   95.20          {
   95.21 -            d->max_pages = op->u.setdomainmaxmem.max_memkb >> (PAGE_SHIFT-10);
   95.22 -            put_domain(d);
   95.23 +            d->max_pages = new_max;
   95.24              ret = 0;
   95.25          }
   95.26 +        spin_unlock(&d->page_alloc_lock);
   95.27 +
   95.28 +        put_domain(d);
   95.29      }
   95.30      break;
   95.31  
   95.32      case DOM0_SETDOMAINHANDLE:
   95.33      {
   95.34 -        struct domain *d; 
   95.35 +        struct domain *d;
   95.36          ret = -ESRCH;
   95.37          d = find_domain_by_id(op->u.setdomainhandle.domain);
   95.38          if ( d != NULL )
    96.1 --- a/xen/common/grant_table.c	Sat Apr 15 19:25:09 2006 +0100
    96.2 +++ b/xen/common/grant_table.c	Sat Apr 15 19:25:21 2006 +0100
    96.3 @@ -41,21 +41,21 @@
    96.4  
    96.5  static inline int
    96.6  get_maptrack_handle(
    96.7 -    grant_table_t *t)
    96.8 +    struct grant_table *t)
    96.9  {
   96.10      unsigned int h;
   96.11      if ( unlikely((h = t->maptrack_head) == (t->maptrack_limit - 1)) )
   96.12          return -1;
   96.13 -    t->maptrack_head = t->maptrack[h].ref_and_flags >> MAPTRACK_REF_SHIFT;
   96.14 +    t->maptrack_head = t->maptrack[h].ref;
   96.15      t->map_count++;
   96.16      return h;
   96.17  }
   96.18  
   96.19  static inline void
   96.20  put_maptrack_handle(
   96.21 -    grant_table_t *t, int handle)
   96.22 +    struct grant_table *t, int handle)
   96.23  {
   96.24 -    t->maptrack[handle].ref_and_flags = t->maptrack_head << MAPTRACK_REF_SHIFT;
   96.25 +    t->maptrack[handle].ref = t->maptrack_head;
   96.26      t->maptrack_head = handle;
   96.27      t->map_count--;
   96.28  }
   96.29 @@ -76,7 +76,7 @@ static void
   96.30      int            handle;
   96.31      unsigned long  frame = 0;
   96.32      int            rc = GNTST_okay;
   96.33 -    active_grant_entry_t *act;
   96.34 +    struct active_grant_entry *act;
   96.35  
   96.36      /* Entry details from @rd's shared grant table. */
   96.37      grant_entry_t *sha;
   96.38 @@ -123,9 +123,9 @@ static void
   96.39      /* Get a maptrack handle. */
   96.40      if ( unlikely((handle = get_maptrack_handle(ld->grant_table)) == -1) )
   96.41      {
   96.42 -        int              i;
   96.43 -        grant_mapping_t *new_mt;
   96.44 -        grant_table_t   *lgt = ld->grant_table;
   96.45 +        int                   i;
   96.46 +        struct grant_mapping *new_mt;
   96.47 +        struct grant_table   *lgt = ld->grant_table;
   96.48  
   96.49          if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
   96.50          {
   96.51 @@ -147,7 +147,7 @@ static void
   96.52  
   96.53          memcpy(new_mt, lgt->maptrack, PAGE_SIZE << lgt->maptrack_order);
   96.54          for ( i = lgt->maptrack_limit; i < (lgt->maptrack_limit << 1); i++ )
   96.55 -            new_mt[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT;
   96.56 +            new_mt[i].ref = i+1;
   96.57  
   96.58          free_xenheap_pages(lgt->maptrack, lgt->maptrack_order);
   96.59          lgt->maptrack          = new_mt;
   96.60 @@ -264,10 +264,9 @@ static void
   96.61  
   96.62      TRACE_1D(TRC_MEM_PAGE_GRANT_MAP, op->dom);
   96.63  
   96.64 -    ld->grant_table->maptrack[handle].domid         = op->dom;
   96.65 -    ld->grant_table->maptrack[handle].ref_and_flags =
   96.66 -        (op->ref << MAPTRACK_REF_SHIFT) |
   96.67 -        (op->flags & MAPTRACK_GNTMAP_MASK);
   96.68 +    ld->grant_table->maptrack[handle].domid = op->dom;
   96.69 +    ld->grant_table->maptrack[handle].ref   = op->ref;
   96.70 +    ld->grant_table->maptrack[handle].flags = op->flags;
   96.71  
   96.72      op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
   96.73      op->handle       = handle;
   96.74 @@ -326,9 +325,9 @@ static void
   96.75      domid_t          dom;
   96.76      grant_ref_t      ref;
   96.77      struct domain   *ld, *rd;
   96.78 -    active_grant_entry_t *act;
   96.79 +    struct active_grant_entry *act;
   96.80      grant_entry_t   *sha;
   96.81 -    grant_mapping_t *map;
   96.82 +    struct grant_mapping *map;
   96.83      u16              flags;
   96.84      s16              rc = 0;
   96.85      unsigned long    frame;
   96.86 @@ -340,7 +339,7 @@ static void
   96.87      map = &ld->grant_table->maptrack[op->handle];
   96.88  
   96.89      if ( unlikely(op->handle >= ld->grant_table->maptrack_limit) ||
   96.90 -         unlikely(!(map->ref_and_flags & MAPTRACK_GNTMAP_MASK)) )
   96.91 +         unlikely(!map->flags) )
   96.92      {
   96.93          DPRINTK("Bad handle (%d).\n", op->handle);
   96.94          op->status = GNTST_bad_handle;
   96.95 @@ -348,8 +347,8 @@ static void
   96.96      }
   96.97  
   96.98      dom   = map->domid;
   96.99 -    ref   = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
  96.100 -    flags = map->ref_and_flags & MAPTRACK_GNTMAP_MASK;
  96.101 +    ref   = map->ref;
  96.102 +    flags = map->flags;
  96.103  
  96.104      if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
  96.105           unlikely(ld == rd) )
  96.106 @@ -380,7 +379,7 @@ static void
  96.107          if ( flags & GNTMAP_device_map )
  96.108          {
  96.109              ASSERT(act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask));
  96.110 -            map->ref_and_flags &= ~GNTMAP_device_map;
  96.111 +            map->flags &= ~GNTMAP_device_map;
  96.112              if ( flags & GNTMAP_readonly )
  96.113              {
  96.114                  act->pin -= GNTPIN_devr_inc;
  96.115 @@ -401,7 +400,7 @@ static void
  96.116              goto unmap_out;
  96.117  
  96.118          ASSERT(act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask));
  96.119 -        map->ref_and_flags &= ~GNTMAP_host_map;
  96.120 +        map->flags &= ~GNTMAP_host_map;
  96.121          if ( flags & GNTMAP_readonly )
  96.122          {
  96.123              act->pin -= GNTPIN_hstr_inc;
  96.124 @@ -414,9 +413,9 @@ static void
  96.125          }
  96.126      }
  96.127  
  96.128 -    if ( (map->ref_and_flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
  96.129 +    if ( (map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
  96.130      {
  96.131 -        map->ref_and_flags = 0;
  96.132 +        map->flags = 0;
  96.133          put_maptrack_handle(ld->grant_table, op->handle);
  96.134      }
  96.135  
  96.136 @@ -534,12 +533,12 @@ static int
  96.137  gnttab_prepare_for_transfer(
  96.138      struct domain *rd, struct domain *ld, grant_ref_t ref)
  96.139  {
  96.140 -    grant_table_t *rgt;
  96.141 -    grant_entry_t *sha;
  96.142 -    domid_t        sdom;
  96.143 -    u16            sflags;
  96.144 -    u32            scombo, prev_scombo;
  96.145 -    int            retries = 0;
  96.146 +    struct grant_table *rgt;
  96.147 +    struct grant_entry *sha;
  96.148 +    domid_t             sdom;
  96.149 +    u16                 sflags;
  96.150 +    u32                 scombo, prev_scombo;
  96.151 +    int                 retries = 0;
  96.152  
  96.153      if ( unlikely((rgt = rd->grant_table) == NULL) ||
  96.154           unlikely(ref >= NR_GRANT_ENTRIES) )
  96.155 @@ -775,10 +774,11 @@ int
  96.156  grant_table_create(
  96.157      struct domain *d)
  96.158  {
  96.159 -    grant_table_t *t;
  96.160 -    int            i;
  96.161 +    struct grant_table *t;
  96.162 +    int                 i;
  96.163  
  96.164 -    if ( (t = xmalloc(grant_table_t)) == NULL )
  96.165 +    BUG_ON(MAPTRACK_MAX_ENTRIES < NR_GRANT_ENTRIES);
  96.166 +    if ( (t = xmalloc(struct grant_table)) == NULL )
  96.167          goto no_mem;
  96.168  
  96.169      /* Simple stuff. */
  96.170 @@ -786,19 +786,19 @@ grant_table_create(
  96.171      spin_lock_init(&t->lock);
  96.172  
  96.173      /* Active grant table. */
  96.174 -    if ( (t->active = xmalloc_array(active_grant_entry_t, NR_GRANT_ENTRIES))
  96.175 -         == NULL )
  96.176 +    t->active = xmalloc_array(struct active_grant_entry, NR_GRANT_ENTRIES);
  96.177 +    if ( t->active == NULL )
  96.178          goto no_mem;
  96.179 -    memset(t->active, 0, sizeof(active_grant_entry_t) * NR_GRANT_ENTRIES);
  96.180 +    memset(t->active, 0, sizeof(struct active_grant_entry) * NR_GRANT_ENTRIES);
  96.181  
  96.182      /* Tracking of mapped foreign frames table */
  96.183      if ( (t->maptrack = alloc_xenheap_page()) == NULL )
  96.184          goto no_mem;
  96.185      t->maptrack_order = 0;
  96.186 -    t->maptrack_limit = PAGE_SIZE / sizeof(grant_mapping_t);
  96.187 +    t->maptrack_limit = PAGE_SIZE / sizeof(struct grant_mapping);
  96.188      memset(t->maptrack, 0, PAGE_SIZE);
  96.189      for ( i = 0; i < t->maptrack_limit; i++ )
  96.190 -        t->maptrack[i].ref_and_flags = (i+1) << MAPTRACK_REF_SHIFT;
  96.191 +        t->maptrack[i].ref = i+1;
  96.192  
  96.193      /* Shared grant table. */
  96.194      t->shared = alloc_xenheap_pages(ORDER_GRANT_FRAMES);
  96.195 @@ -828,27 +828,26 @@ void
  96.196  gnttab_release_mappings(
  96.197      struct domain *d)
  96.198  {
  96.199 -    grant_table_t        *gt = d->grant_table;
  96.200 -    grant_mapping_t      *map;
  96.201 +    struct grant_table   *gt = d->grant_table;
  96.202 +    struct grant_mapping *map;
  96.203      grant_ref_t           ref;
  96.204      grant_handle_t        handle;
  96.205      struct domain        *rd;
  96.206 -    active_grant_entry_t *act;
  96.207 -    grant_entry_t        *sha;
  96.208 +    struct active_grant_entry *act;
  96.209 +    struct grant_entry   *sha;
  96.210  
  96.211      BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
  96.212  
  96.213      for ( handle = 0; handle < gt->maptrack_limit; handle++ )
  96.214      {
  96.215          map = &gt->maptrack[handle];
  96.216 -        if ( !(map->ref_and_flags & (GNTMAP_device_map|GNTMAP_host_map)) )
  96.217 +        if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
  96.218              continue;
  96.219  
  96.220 -        ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
  96.221 +        ref = map->ref;
  96.222  
  96.223          DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
  96.224 -                handle, ref, map->ref_and_flags & MAPTRACK_GNTMAP_MASK,
  96.225 -                map->domid);
  96.226 +                handle, ref, map->flags, map->domid);
  96.227  
  96.228          rd = find_domain_by_id(map->domid);
  96.229          BUG_ON(rd == NULL);
  96.230 @@ -858,16 +857,16 @@ gnttab_release_mappings(
  96.231          act = &rd->grant_table->active[ref];
  96.232          sha = &rd->grant_table->shared[ref];
  96.233  
  96.234 -        if ( map->ref_and_flags & GNTMAP_readonly )
  96.235 +        if ( map->flags & GNTMAP_readonly )
  96.236          {
  96.237 -            if ( map->ref_and_flags & GNTMAP_device_map )
  96.238 +            if ( map->flags & GNTMAP_device_map )
  96.239              {
  96.240                  BUG_ON(!(act->pin & GNTPIN_devr_mask));
  96.241                  act->pin -= GNTPIN_devr_inc;
  96.242                  put_page(mfn_to_page(act->frame));
  96.243              }
  96.244  
  96.245 -            if ( map->ref_and_flags & GNTMAP_host_map )
  96.246 +            if ( map->flags & GNTMAP_host_map )
  96.247              {
  96.248                  BUG_ON(!(act->pin & GNTPIN_hstr_mask));
  96.249                  act->pin -= GNTPIN_hstr_inc;
  96.250 @@ -877,14 +876,14 @@ gnttab_release_mappings(
  96.251          }
  96.252          else
  96.253          {
  96.254 -            if ( map->ref_and_flags & GNTMAP_device_map )
  96.255 +            if ( map->flags & GNTMAP_device_map )
  96.256              {
  96.257                  BUG_ON(!(act->pin & GNTPIN_devw_mask));
  96.258                  act->pin -= GNTPIN_devw_inc;
  96.259                  put_page_and_type(mfn_to_page(act->frame));
  96.260              }
  96.261  
  96.262 -            if ( map->ref_and_flags & GNTMAP_host_map )
  96.263 +            if ( map->flags & GNTMAP_host_map )
  96.264              {
  96.265                  BUG_ON(!(act->pin & GNTPIN_hstw_mask));
  96.266                  act->pin -= GNTPIN_hstw_inc;
  96.267 @@ -903,7 +902,7 @@ gnttab_release_mappings(
  96.268  
  96.269          put_domain(rd);
  96.270  
  96.271 -        map->ref_and_flags = 0;
  96.272 +        map->flags = 0;
  96.273      }
  96.274  }
  96.275  
  96.276 @@ -912,7 +911,7 @@ void
  96.277  grant_table_destroy(
  96.278      struct domain *d)
  96.279  {
  96.280 -    grant_table_t *t = d->grant_table;
  96.281 +    struct grant_table *t = d->grant_table;
  96.282  
  96.283      if ( t == NULL )
  96.284          return;
    97.1 --- a/xen/drivers/char/console.c	Sat Apr 15 19:25:09 2006 +0100
    97.2 +++ b/xen/drivers/char/console.c	Sat Apr 15 19:25:21 2006 +0100
    97.3 @@ -65,11 +65,12 @@ spinlock_t console_lock = SPIN_LOCK_UNLO
    97.4  #define COLUMNS     80
    97.5  #define LINES       25
    97.6  #define ATTRIBUTE    7
    97.7 +#define VIDEO_SIZE  (COLUMNS * LINES * 2)
    97.8  
    97.9  /* Clear the screen and initialize VIDEO, XPOS and YPOS.  */
   97.10  static void cls(void)
   97.11  {
   97.12 -    memset(video, 0, COLUMNS * LINES * 2);
   97.13 +    memset(video, 0, VIDEO_SIZE);
   97.14      xpos = ypos = 0;
   97.15      outw(10+(1<<(5+8)), 0x3d4); /* cursor off */
   97.16  }
   97.17 @@ -107,9 +108,9 @@ static int detect_vga(void)
   97.18       * 
   97.19       * These checks are basically to detect headless server boxes.
   97.20       */
   97.21 -    return (detect_video(__va(0xA0000)) || 
   97.22 -            detect_video(__va(0xB0000)) || 
   97.23 -            detect_video(__va(0xB8000)));
   97.24 +    return (detect_video(ioremap(0xA0000, VIDEO_SIZE)) || 
   97.25 +            detect_video(ioremap(0xB0000, VIDEO_SIZE)) || 
   97.26 +            detect_video(ioremap(0xB8000, VIDEO_SIZE)));
   97.27  }
   97.28  
   97.29  /* This is actually code from vgaHWRestore in an old version of XFree86 :-) */
   97.30 @@ -143,7 +144,7 @@ static void init_vga(void)
   97.31          return;
   97.32      }
   97.33  
   97.34 -    video = __va(0xB8000);
   97.35 +    video = ioremap(0xB8000, VIDEO_SIZE);
   97.36  
   97.37      tmp = inb(0x3da);
   97.38      outb(0x00, 0x3c0);
   97.39 @@ -180,12 +181,10 @@ static void put_newline(void)
   97.40  
   97.41      if (ypos >= LINES)
   97.42      {
   97.43 -        static char zeroarr[2*COLUMNS] = { 0 };
   97.44          ypos = LINES-1;
   97.45 -        memcpy((char*)video, 
   97.46 -               (char*)video + 2*COLUMNS, (LINES-1)*2*COLUMNS);
   97.47 -        memcpy((char*)video + (LINES-1)*2*COLUMNS, 
   97.48 -               zeroarr, 2*COLUMNS);
   97.49 +        memmove((char*)video, 
   97.50 +                (char*)video + 2*COLUMNS, (LINES-1)*2*COLUMNS);
   97.51 +        memset((char*)video + (LINES-1)*2*COLUMNS, 0, 2*COLUMNS);
   97.52      }
   97.53  }
   97.54  
    98.1 --- a/xen/drivers/char/ns16550.c	Sat Apr 15 19:25:09 2006 +0100
    98.2 +++ b/xen/drivers/char/ns16550.c	Sat Apr 15 19:25:21 2006 +0100
    98.3 @@ -260,13 +260,20 @@ static void ns16550_endboot(struct seria
    98.4  #define ns16550_endboot NULL
    98.5  #endif
    98.6  
    98.7 +static int ns16550_irq(struct serial_port *port)
    98.8 +{
    98.9 +    struct ns16550 *uart = port->uart;
   98.10 +    return ((uart->irq > 0) ? uart->irq : -1);
   98.11 +}
   98.12 +
   98.13  static struct uart_driver ns16550_driver = {
   98.14      .init_preirq  = ns16550_init_preirq,
   98.15      .init_postirq = ns16550_init_postirq,
   98.16      .endboot      = ns16550_endboot,
   98.17      .tx_empty     = ns16550_tx_empty,
   98.18      .putc         = ns16550_putc,
   98.19 -    .getc         = ns16550_getc
   98.20 +    .getc         = ns16550_getc,
   98.21 +    .irq          = ns16550_irq
   98.22  };
   98.23  
   98.24  static int parse_parity_char(int c)
    99.1 --- a/xen/drivers/char/serial.c	Sat Apr 15 19:25:09 2006 +0100
    99.2 +++ b/xen/drivers/char/serial.c	Sat Apr 15 19:25:21 2006 +0100
    99.3 @@ -372,6 +372,15 @@ void serial_endboot(void)
    99.4              com[i].driver->endboot(&com[i]);
    99.5  }
    99.6  
    99.7 +int serial_irq(int idx)
    99.8 +{
    99.9 +    if ( (idx >= 0) && (idx < ARRAY_SIZE(com)) &&
   99.10 +         com[idx].driver && com[idx].driver->irq )
   99.11 +        return com[idx].driver->irq(&com[idx]);
   99.12 +
   99.13 +    return -1;
   99.14 +}
   99.15 +
   99.16  void serial_register_uart(int idx, struct uart_driver *driver, void *uart)
   99.17  {
   99.18      /* Store UART-specific info. */
   100.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Sat Apr 15 19:25:09 2006 +0100
   100.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Sat Apr 15 19:25:21 2006 +0100
   100.3 @@ -61,8 +61,7 @@ extern unsigned int cpu_rev;
   100.4      CPU_BASED_MWAIT_EXITING | \
   100.5      CPU_BASED_MOV_DR_EXITING | \
   100.6      CPU_BASED_ACTIVATE_IO_BITMAP | \
   100.7 -    CPU_BASED_USE_TSC_OFFSETING  | \
   100.8 -    CPU_BASED_UNCOND_IO_EXITING \
   100.9 +    CPU_BASED_USE_TSC_OFFSETING  \
  100.10      )
  100.11  
  100.12  #define MONITOR_CPU_BASED_EXEC_CONTROLS_IA32E_MODE \
   101.1 --- a/xen/include/asm-x86/hvm/vpit.h	Sat Apr 15 19:25:09 2006 +0100
   101.2 +++ b/xen/include/asm-x86/hvm/vpit.h	Sat Apr 15 19:25:21 2006 +0100
   101.3 @@ -38,7 +38,8 @@
   101.4  struct hvm_virpit {
   101.5      /* for simulation of counter 0 in mode 2 */
   101.6      u64 period_cycles;          /* pit frequency in cpu cycles */
   101.7 -    s_time_t inject_point;      /* the time inject virt intr */
   101.8 +    s_time_t count_advance;     /* accumulated count advance since last fire */
   101.9 +    s_time_t count_point;        /* last point accumulating count advance */
  101.10      s_time_t scheduled;         /* scheduled timer interrupt */
  101.11      struct timer pit_timer;     /* periodic timer for mode 2*/
  101.12      unsigned int channel;       /* the pit channel, counter 0~2 */
   102.1 --- a/xen/include/asm-x86/irq.h	Sat Apr 15 19:25:09 2006 +0100
   102.2 +++ b/xen/include/asm-x86/irq.h	Sat Apr 15 19:25:21 2006 +0100
   102.3 @@ -11,8 +11,8 @@
   102.4  #define IO_APIC_IRQ(irq)    (((irq) >= 16) || ((1<<(irq)) & io_apic_irqs))
   102.5  #define IO_APIC_VECTOR(irq) (irq_vector[irq])
   102.6  
   102.7 -#define LEGACY_VECTOR(irq)          ((irq) + FIRST_EXTERNAL_VECTOR)
   102.8 -#define LEGACY_IRQ_FROM_VECTOR(vec) ((vec) - FIRST_EXTERNAL_VECTOR)
   102.9 +#define LEGACY_VECTOR(irq)          ((irq) + FIRST_LEGACY_VECTOR)
  102.10 +#define LEGACY_IRQ_FROM_VECTOR(vec) ((vec) - FIRST_LEGACY_VECTOR)
  102.11  
  102.12  #define irq_to_vector(irq)  \
  102.13      (IO_APIC_IRQ(irq) ? IO_APIC_VECTOR(irq) : LEGACY_VECTOR(irq))
   103.1 --- a/xen/include/asm-x86/mach-default/irq_vectors.h	Sat Apr 15 19:25:09 2006 +0100
   103.2 +++ b/xen/include/asm-x86/mach-default/irq_vectors.h	Sat Apr 15 19:25:21 2006 +0100
   103.3 @@ -1,96 +1,36 @@
   103.4 -/*
   103.5 - * This file should contain #defines for all of the interrupt vector
   103.6 - * numbers used by this architecture.
   103.7 - *
   103.8 - * In addition, there are some standard defines:
   103.9 - *
  103.10 - *	FIRST_EXTERNAL_VECTOR:
  103.11 - *		The first free place for external interrupts
  103.12 - *
  103.13 - *	SYSCALL_VECTOR:
  103.14 - *		The IRQ vector a syscall makes the user to kernel transition
  103.15 - *		under.
  103.16 - *
  103.17 - *	TIMER_IRQ:
  103.18 - *		The IRQ number the timer interrupt comes in at.
  103.19 - *
  103.20 - *	NR_IRQS:
  103.21 - *		The total number of interrupt vectors (including all the
  103.22 - *		architecture specific interrupts) needed.
  103.23 - *
  103.24 - */			
  103.25  #ifndef _ASM_IRQ_VECTORS_H
  103.26  #define _ASM_IRQ_VECTORS_H
  103.27  
  103.28 -/*
  103.29 - * IDT vectors usable for external interrupt sources start
  103.30 - * at 0x20:
  103.31 - */
  103.32 -#define FIRST_EXTERNAL_VECTOR	0x20
  103.33 -
  103.34 -#define HYPERCALL_VECTOR	0x82
  103.35 -
  103.36 -/*
  103.37 - * Vectors 0x20-0x2f are used for ISA interrupts.
  103.38 - */
  103.39 -
  103.40 -/*
  103.41 - * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
  103.42 - *
  103.43 - *  some of the following vectors are 'rare', they are merged
  103.44 - *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
  103.45 - *  TLB, reschedule and local APIC vectors are performance-critical.
  103.46 - *
  103.47 - *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
  103.48 - */
  103.49 +/* Processor-initiated interrupts are all high priority. */
  103.50  #define SPURIOUS_APIC_VECTOR	0xff
  103.51  #define ERROR_APIC_VECTOR	0xfe
  103.52  #define INVALIDATE_TLB_VECTOR	0xfd
  103.53  #define EVENT_CHECK_VECTOR	0xfc
  103.54  #define CALL_FUNCTION_VECTOR	0xfb
  103.55 -
  103.56 -#define THERMAL_APIC_VECTOR	0xf0
  103.57 -/*
  103.58 - * Local APIC timer IRQ vector is on a different priority level,
  103.59 - * to work around the 'lost local interrupt if more than 2 IRQ
  103.60 - * sources per level' errata.
  103.61 - */
  103.62 -#define LOCAL_TIMER_VECTOR	0xef
  103.63 -
  103.64 -/*
  103.65 - * First APIC vector available to drivers: (vectors 0x30-0xee)
  103.66 - * we start at 0x31 to spread out vectors evenly between priority
  103.67 - * levels. (0x80 is the syscall vector)
  103.68 - */
  103.69 -#define FIRST_DEVICE_VECTOR	0x31
  103.70 -#define FIRST_SYSTEM_VECTOR	0xef
  103.71 -
  103.72 -#define TIMER_IRQ 0
  103.73 +#define THERMAL_APIC_VECTOR	0xfa
  103.74 +#define LOCAL_TIMER_VECTOR	0xf9
  103.75  
  103.76  /*
  103.77 - * 16 8259A IRQ's, 208 potential APIC interrupt sources.
  103.78 - * Right now the APIC is mostly only used for SMP.
  103.79 - * 256 vectors is an architectural limit. (we can have
  103.80 - * more than 256 devices theoretically, but they will
  103.81 - * have to use shared interrupts)
  103.82 - * Since vectors 0x00-0x1f are used/reserved for the CPU,
  103.83 - * the usable vector space is 0x20-0xff (224 vectors)
  103.84 + * High-priority dynamically-allocated vectors. For interrupts that
  103.85 + * must be higher priority than any guest-bound interrupt.
  103.86   */
  103.87 +#define FIRST_HIPRIORITY_VECTOR	0xf0
  103.88 +#define LAST_HIPRIORITY_VECTOR  0xf8
  103.89  
  103.90 -/*
  103.91 - * The maximum number of vectors supported by i386 processors
  103.92 - * is limited to 256. For processors other than i386, NR_VECTORS
  103.93 - * should be changed accordingly.
  103.94 - */
  103.95 +/* Legacy PIC uses vectors 0xe0-0xef. */
  103.96 +#define FIRST_LEGACY_VECTOR	0xe0
  103.97 +#define LAST_LEGACY_VECTOR      0xef
  103.98 +
  103.99 +#define HYPERCALL_VECTOR	0x82
 103.100 +
 103.101 +/* Dynamically-allocated vectors available to any driver. */
 103.102 +#define FIRST_DYNAMIC_VECTOR	0x20
 103.103 +#define LAST_DYNAMIC_VECTOR	0xdf
 103.104 +
 103.105  #define NR_VECTORS 256
 103.106  
 103.107 -#include "irq_vectors_limits.h"
 103.108 -
 103.109 -#define FPU_IRQ			13
 103.110 -
 103.111 -#define	FIRST_VM86_IRQ		3
 103.112 -#define LAST_VM86_IRQ		15
 103.113 -#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
 103.114 -
 103.115 +/* Limited by number of trap vectors. */
 103.116 +#define NR_IRQS        NR_VECTORS
 103.117 +#define NR_IRQ_VECTORS NR_IRQS
 103.118  
 103.119  #endif /* _ASM_IRQ_VECTORS_H */
   104.1 --- a/xen/include/asm-x86/mach-default/irq_vectors_limits.h	Sat Apr 15 19:25:09 2006 +0100
   104.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   104.3 @@ -1,8 +0,0 @@
   104.4 -#ifndef _ASM_IRQ_VECTORS_LIMITS_H
   104.5 -#define _ASM_IRQ_VECTORS_LIMITS_H
   104.6 -
   104.7 -/* Limited by number of trap vectors. */
   104.8 -#define NR_IRQS        FIRST_SYSTEM_VECTOR
   104.9 -#define NR_IRQ_VECTORS NR_IRQS
  104.10 -
  104.11 -#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
   105.1 --- a/xen/include/asm-x86/mm.h	Sat Apr 15 19:25:09 2006 +0100
   105.2 +++ b/xen/include/asm-x86/mm.h	Sat Apr 15 19:25:21 2006 +0100
   105.3 @@ -103,11 +103,13 @@ struct page_info
   105.4  #define PGT_high_mfn_mask   (0xfffUL << PGT_high_mfn_shift)
   105.5  #define PGT_mfn_mask        (((1U<<23)-1) | PGT_high_mfn_mask)
   105.6  #define PGT_high_mfn_nx     (0x800UL << PGT_high_mfn_shift)
   105.7 +#define PGT_pae_idx_shift   PGT_high_mfn_shift
   105.8  #else
   105.9   /* 23-bit mfn mask for shadow types: good for up to 32GB RAM. */
  105.10  #define PGT_mfn_mask        ((1U<<23)-1)
  105.11   /* NX for PAE xen is not supported yet */
  105.12  #define PGT_high_mfn_nx     (1ULL << 63)
  105.13 +#define PGT_pae_idx_shift   23
  105.14  #endif
  105.15  
  105.16  #define PGT_score_shift     23
   106.1 --- a/xen/include/asm-x86/shadow_64.h	Sat Apr 15 19:25:09 2006 +0100
   106.2 +++ b/xen/include/asm-x86/shadow_64.h	Sat Apr 15 19:25:21 2006 +0100
   106.3 @@ -119,6 +119,8 @@ typedef struct { intpte_t lo; } pgentry_
   106.4  #define PAE_CR3_IDX_MASK    0x7f
   106.5  #define PAE_CR3_IDX_NO      128
   106.6  
   106.7 +#define PAE_PDPT_RESERVED   0x1e6 /* [8:5], [2,1] */
   106.8 +
   106.9  /******************************************************************************/
  106.10  static inline int  table_offset_64(unsigned long va, int level)
  106.11  {
   107.1 --- a/xen/include/public/xen.h	Sat Apr 15 19:25:09 2006 +0100
   107.2 +++ b/xen/include/public/xen.h	Sat Apr 15 19:25:21 2006 +0100
   107.3 @@ -286,7 +286,8 @@ typedef struct vcpu_time_info {
   107.4      uint64_t system_time;     /* Time, in nanosecs, since boot.    */
   107.5      /*
   107.6       * Current system time:
   107.7 -     *   system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
   107.8 +     *   system_time +
   107.9 +     *   ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
  107.10       * CPU frequency (Hz):
  107.11       *   ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
  107.12       */
   108.1 --- a/xen/include/xen/grant_table.h	Sat Apr 15 19:25:09 2006 +0100
   108.2 +++ b/xen/include/xen/grant_table.h	Sat Apr 15 19:25:21 2006 +0100
   108.3 @@ -29,11 +29,11 @@
   108.4  #include <asm/grant_table.h>
   108.5  
   108.6  /* Active grant entry - used for shadowing GTF_permit_access grants. */
   108.7 -typedef struct {
   108.8 +struct active_grant_entry {
   108.9      u32           pin;    /* Reference count information.  */
  108.10      domid_t       domid;  /* Domain being granted access.  */
  108.11      unsigned long frame;  /* Frame being granted.          */
  108.12 -} active_grant_entry_t;
  108.13 +};
  108.14  
  108.15   /* Count of writable host-CPU mappings. */
  108.16  #define GNTPIN_hstw_shift    (0)
  108.17 @@ -60,29 +60,30 @@ typedef struct {
  108.18   * Tracks a mapping of another domain's grant reference. Each domain has a
  108.19   * table of these, indexes into which are returned as a 'mapping handle'.
  108.20   */
  108.21 -typedef struct {
  108.22 -    u16      ref_and_flags; /* 0-4: GNTMAP_* ; 5-15: grant ref */
  108.23 +struct grant_mapping {
  108.24 +    u32      ref;           /* grant ref */
  108.25 +    u16      flags;         /* 0-4: GNTMAP_* ; 5-15: unused */
  108.26      domid_t  domid;         /* granting domain */
  108.27 -} grant_mapping_t;
  108.28 -#define MAPTRACK_GNTMAP_MASK  0x1f
  108.29 -#define MAPTRACK_REF_SHIFT    5
  108.30 -#define MAPTRACK_MAX_ENTRIES  (1 << (16 - MAPTRACK_REF_SHIFT))
  108.31 +};
  108.32 +
  108.33 +/* Fairly arbitrary. [POLICY] */
  108.34 +#define MAPTRACK_MAX_ENTRIES 16384
  108.35  
  108.36  /* Per-domain grant information. */
  108.37 -typedef struct {
  108.38 +struct grant_table {
  108.39      /* Shared grant table (see include/public/grant_table.h). */
  108.40 -    grant_entry_t        *shared;
  108.41 +    struct grant_entry   *shared;
  108.42      /* Active grant table. */
  108.43 -    active_grant_entry_t *active;
  108.44 +    struct active_grant_entry *active;
  108.45      /* Mapping tracking table. */
  108.46 -    grant_mapping_t      *maptrack;
  108.47 +    struct grant_mapping *maptrack;
  108.48      unsigned int          maptrack_head;
  108.49      unsigned int          maptrack_order;
  108.50      unsigned int          maptrack_limit;
  108.51      unsigned int          map_count;
  108.52      /* Lock protecting updates to active and shared grant tables. */
  108.53      spinlock_t            lock;
  108.54 -} grant_table_t;
  108.55 +};
  108.56  
  108.57  /* Create/destroy per-domain grant table context. */
  108.58  int grant_table_create(
   109.1 --- a/xen/include/xen/sched.h	Sat Apr 15 19:25:09 2006 +0100
   109.2 +++ b/xen/include/xen/sched.h	Sat Apr 15 19:25:21 2006 +0100
   109.3 @@ -125,7 +125,7 @@ struct domain
   109.4      struct evtchn   *evtchn[NR_EVTCHN_BUCKETS];
   109.5      spinlock_t       evtchn_lock;
   109.6  
   109.7 -    grant_table_t   *grant_table;
   109.8 +    struct grant_table *grant_table;
   109.9  
  109.10      /*
  109.11       * Interrupt to event-channel mappings. Updates should be protected by the 
   110.1 --- a/xen/include/xen/serial.h	Sat Apr 15 19:25:09 2006 +0100
   110.2 +++ b/xen/include/xen/serial.h	Sat Apr 15 19:25:21 2006 +0100
   110.3 @@ -57,6 +57,8 @@ struct uart_driver {
   110.4      void (*putc)(struct serial_port *, char);
   110.5      /* Get a character from the serial line: returns 0 if none available. */
   110.6      int  (*getc)(struct serial_port *, char *);
   110.7 +    /* Get IRQ number for this port's serial line: returns -1 if none. */
   110.8 +    int  (*irq)(struct serial_port *);
   110.9  };
  110.10  
  110.11  /* 'Serial handles' are composed from the following fields. */
  110.12 @@ -99,6 +101,9 @@ void serial_end_sync(int handle);
  110.13  /* Return number of bytes headroom in transmit buffer. */
  110.14  int serial_tx_space(int handle);
  110.15  
  110.16 +/* Return irq number for specified serial port (identified by index). */
  110.17 +int serial_irq(int idx);
  110.18 +
  110.19  /*
  110.20   * Initialisation and helper functions for uart drivers.
  110.21   */