ia64/xen-unstable

changeset 13956:9af0c7e4ff51

Merge with xenppc-unstable.hg
author kfraser@localhost.localdomain
date Wed Feb 14 19:01:35 2007 +0000 (2007-02-14)
parents aea80dbf6d96 58e71ae679d5
children 9529d667d042 f8030a569811
files linux-2.6-xen-sparse/include/asm-i386/a.out.h tools/examples/block tools/libxc/xenguest.h tools/libxc/xg_private.c xen/common/libelf/libelf-loader.c xen/include/asm-ia64/kexec.h xen/include/asm-ia64/linux/asm/percpu.h xen/include/asm-powerpc/kexec.h xen/include/asm-x86/kexec.h xen/include/asm-x86/x86_32/kexec.h xen/include/asm-x86/x86_64/kexec.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/.hgignore	Fri Feb 09 14:43:22 2007 -0600
     1.2 +++ b/.hgignore	Wed Feb 14 19:01:35 2007 +0000
     1.3 @@ -107,6 +107,7 @@
     1.4  ^tools/firmware/rombios/BIOS-bochs-[^/]*$
     1.5  ^tools/firmware/rombios/_rombios[^/]*_\.c$
     1.6  ^tools/firmware/rombios/rombios[^/]*\.s$
     1.7 +^tools/firmware/rombios/32bit/32bitbios_flat\.h$
     1.8  ^tools/firmware/vmxassist/gen$
     1.9  ^tools/firmware/vmxassist/offsets\.h$
    1.10  ^tools/firmware/vmxassist/vmxassist$
    1.11 @@ -137,6 +138,7 @@
    1.12  ^tools/misc/miniterm/miniterm$
    1.13  ^tools/misc/xc_shadow$
    1.14  ^tools/misc/xen_cpuperf$
    1.15 +^tools/misc/xen-detect$
    1.16  ^tools/misc/xenperf$
    1.17  ^tools/pygrub/build/.*$
    1.18  ^tools/python/build/.*$
     2.1 --- a/buildconfigs/linux-defconfig_xen0_ia64	Fri Feb 09 14:43:22 2007 -0600
     2.2 +++ b/buildconfigs/linux-defconfig_xen0_ia64	Wed Feb 14 19:01:35 2007 +0000
     2.3 @@ -1,7 +1,7 @@
     2.4  #
     2.5  # Automatically generated make config: don't edit
     2.6 -# Linux kernel version: 2.6.16.29-xen0
     2.7 -# Tue Nov 14 10:39:09 2006
     2.8 +# Linux kernel version: 2.6.18-xen0
     2.9 +# Mon Jan 29 10:16:18 2007
    2.10  #
    2.11  
    2.12  #
    2.13 @@ -21,14 +21,16 @@ CONFIG_SYSVIPC=y
    2.14  CONFIG_POSIX_MQUEUE=y
    2.15  CONFIG_BSD_PROCESS_ACCT=y
    2.16  # CONFIG_BSD_PROCESS_ACCT_V3 is not set
    2.17 -CONFIG_SYSCTL=y
    2.18 +# CONFIG_TASKSTATS is not set
    2.19  # CONFIG_AUDIT is not set
    2.20  CONFIG_IKCONFIG=y
    2.21  CONFIG_IKCONFIG_PROC=y
    2.22  # CONFIG_CPUSETS is not set
    2.23 +# CONFIG_RELAY is not set
    2.24  CONFIG_INITRAMFS_SOURCE=""
    2.25  CONFIG_CC_OPTIMIZE_FOR_SIZE=y
    2.26  # CONFIG_EMBEDDED is not set
    2.27 +CONFIG_SYSCTL=y
    2.28  CONFIG_KALLSYMS=y
    2.29  CONFIG_KALLSYMS_ALL=y
    2.30  CONFIG_KALLSYMS_EXTRA_PASS=y
    2.31 @@ -40,11 +42,9 @@ CONFIG_BASE_FULL=y
    2.32  CONFIG_FUTEX=y
    2.33  CONFIG_EPOLL=y
    2.34  CONFIG_SHMEM=y
    2.35 -CONFIG_CC_ALIGN_FUNCTIONS=0
    2.36 -CONFIG_CC_ALIGN_LABELS=0
    2.37 -CONFIG_CC_ALIGN_LOOPS=0
    2.38 -CONFIG_CC_ALIGN_JUMPS=0
    2.39  CONFIG_SLAB=y
    2.40 +CONFIG_VM_EVENT_COUNTERS=y
    2.41 +CONFIG_RT_MUTEXES=y
    2.42  # CONFIG_TINY_SHMEM is not set
    2.43  CONFIG_BASE_SMALL=0
    2.44  # CONFIG_SLOB is not set
    2.45 @@ -55,7 +55,6 @@ CONFIG_BASE_SMALL=0
    2.46  CONFIG_MODULES=y
    2.47  CONFIG_MODULE_UNLOAD=y
    2.48  # CONFIG_MODULE_FORCE_UNLOAD is not set
    2.49 -CONFIG_OBSOLETE_MODPARM=y
    2.50  CONFIG_MODVERSIONS=y
    2.51  CONFIG_MODULE_SRCVERSION_ALL=y
    2.52  CONFIG_KMOD=y
    2.53 @@ -64,6 +63,7 @@ CONFIG_STOP_MACHINE=y
    2.54  #
    2.55  # Block layer
    2.56  #
    2.57 +# CONFIG_BLK_DEV_IO_TRACE is not set
    2.58  
    2.59  #
    2.60  # IO Schedulers
    2.61 @@ -86,8 +86,10 @@ CONFIG_64BIT=y
    2.62  CONFIG_MMU=y
    2.63  CONFIG_SWIOTLB=y
    2.64  CONFIG_RWSEM_XCHGADD_ALGORITHM=y
    2.65 +CONFIG_GENERIC_FIND_NEXT_BIT=y
    2.66  CONFIG_GENERIC_CALIBRATE_DELAY=y
    2.67  CONFIG_TIME_INTERPOLATION=y
    2.68 +CONFIG_DMI=y
    2.69  CONFIG_EFI=y
    2.70  CONFIG_GENERIC_IOMAP=y
    2.71  CONFIG_XEN=y
    2.72 @@ -96,6 +98,7 @@ CONFIG_XEN_IA64_EXPOSE_P2M=y
    2.73  CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
    2.74  CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
    2.75  CONFIG_DMA_IS_DMA32=y
    2.76 +CONFIG_AUDIT_ARCH=y
    2.77  # CONFIG_IA64_GENERIC is not set
    2.78  CONFIG_IA64_DIG=y
    2.79  # CONFIG_IA64_HP_ZX1 is not set
    2.80 @@ -123,6 +126,7 @@ CONFIG_NR_CPUS=16
    2.81  CONFIG_HOTPLUG_CPU=y
    2.82  CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
    2.83  # CONFIG_SCHED_SMT is not set
    2.84 +# CONFIG_PERMIT_BSP_REMOVE is not set
    2.85  # CONFIG_PREEMPT is not set
    2.86  CONFIG_SELECT_MEMORY_MODEL=y
    2.87  CONFIG_FLATMEM_MANUAL=y
    2.88 @@ -132,6 +136,7 @@ CONFIG_FLATMEM=y
    2.89  CONFIG_FLAT_NODE_MEM_MAP=y
    2.90  # CONFIG_SPARSEMEM_STATIC is not set
    2.91  CONFIG_SPLIT_PTLOCK_CPUS=4
    2.92 +CONFIG_RESOURCES_64BIT=y
    2.93  CONFIG_ARCH_SELECT_MEMORY_MODEL=y
    2.94  CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
    2.95  CONFIG_ARCH_FLATMEM_ENABLE=y
    2.96 @@ -163,6 +168,7 @@ CONFIG_PM_LEGACY=y
    2.97  CONFIG_ACPI=y
    2.98  CONFIG_ACPI_BUTTON=y
    2.99  CONFIG_ACPI_FAN=y
   2.100 +# CONFIG_ACPI_DOCK is not set
   2.101  CONFIG_ACPI_PROCESSOR=y
   2.102  CONFIG_ACPI_HOTPLUG_CPU=y
   2.103  CONFIG_ACPI_THERMAL=y
   2.104 @@ -185,7 +191,7 @@ CONFIG_PCI=y
   2.105  CONFIG_PCI_DOMAINS=y
   2.106  CONFIG_XEN_PCIDEV_FRONTEND=y
   2.107  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
   2.108 -CONFIG_PCI_LEGACY_PROC=y
   2.109 +# CONFIG_PCIEPORTBUS is not set
   2.110  # CONFIG_PCI_DEBUG is not set
   2.111  
   2.112  #
   2.113 @@ -215,6 +221,8 @@ CONFIG_NET=y
   2.114  CONFIG_PACKET=y
   2.115  # CONFIG_PACKET_MMAP is not set
   2.116  CONFIG_UNIX=y
   2.117 +CONFIG_XFRM=y
   2.118 +# CONFIG_XFRM_USER is not set
   2.119  # CONFIG_NET_KEY is not set
   2.120  CONFIG_INET=y
   2.121  CONFIG_IP_MULTICAST=y
   2.122 @@ -229,7 +237,10 @@ CONFIG_SYN_COOKIES=y
   2.123  # CONFIG_INET_AH is not set
   2.124  # CONFIG_INET_ESP is not set
   2.125  # CONFIG_INET_IPCOMP is not set
   2.126 +# CONFIG_INET_XFRM_TUNNEL is not set
   2.127  # CONFIG_INET_TUNNEL is not set
   2.128 +CONFIG_INET_XFRM_MODE_TRANSPORT=y
   2.129 +CONFIG_INET_XFRM_MODE_TUNNEL=y
   2.130  CONFIG_INET_DIAG=y
   2.131  CONFIG_INET_TCP_DIAG=y
   2.132  # CONFIG_TCP_CONG_ADVANCED is not set
   2.133 @@ -240,6 +251,9 @@ CONFIG_TCP_CONG_BIC=y
   2.134  #
   2.135  # CONFIG_IP_VS is not set
   2.136  # CONFIG_IPV6 is not set
   2.137 +# CONFIG_INET6_XFRM_TUNNEL is not set
   2.138 +# CONFIG_INET6_TUNNEL is not set
   2.139 +# CONFIG_NETWORK_SECMARK is not set
   2.140  CONFIG_NETFILTER=y
   2.141  # CONFIG_NETFILTER_DEBUG is not set
   2.142  CONFIG_BRIDGE_NETFILTER=y
   2.143 @@ -280,12 +294,12 @@ CONFIG_BRIDGE_NETFILTER=y
   2.144  CONFIG_BRIDGE=y
   2.145  # CONFIG_VLAN_8021Q is not set
   2.146  # CONFIG_DECNET is not set
   2.147 +CONFIG_LLC=y
   2.148  # CONFIG_LLC2 is not set
   2.149  # CONFIG_IPX is not set
   2.150  # CONFIG_ATALK is not set
   2.151  # CONFIG_X25 is not set
   2.152  # CONFIG_LAPB is not set
   2.153 -# CONFIG_NET_DIVERT is not set
   2.154  # CONFIG_ECONET is not set
   2.155  # CONFIG_WAN_ROUTER is not set
   2.156  
   2.157 @@ -314,6 +328,7 @@ CONFIG_STANDALONE=y
   2.158  CONFIG_PREVENT_FIRMWARE_BUILD=y
   2.159  CONFIG_FW_LOADER=y
   2.160  # CONFIG_DEBUG_DRIVER is not set
   2.161 +# CONFIG_SYS_HYPERVISOR is not set
   2.162  
   2.163  #
   2.164  # Connector - unified userspace <-> kernelspace linker
   2.165 @@ -352,6 +367,7 @@ CONFIG_BLK_DEV_NBD=m
   2.166  CONFIG_BLK_DEV_RAM=y
   2.167  CONFIG_BLK_DEV_RAM_COUNT=16
   2.168  CONFIG_BLK_DEV_RAM_SIZE=4096
   2.169 +CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
   2.170  CONFIG_BLK_DEV_INITRD=y
   2.171  # CONFIG_CDROM_PKTCDVD is not set
   2.172  # CONFIG_ATA_OVER_ETH is not set
   2.173 @@ -463,6 +479,7 @@ CONFIG_SCSI_SAS_ATTRS=y
   2.174  # CONFIG_MEGARAID_LEGACY is not set
   2.175  # CONFIG_MEGARAID_SAS is not set
   2.176  # CONFIG_SCSI_SATA is not set
   2.177 +# CONFIG_SCSI_HPTIOP is not set
   2.178  # CONFIG_SCSI_DMX3191D is not set
   2.179  # CONFIG_SCSI_FUTURE_DOMAIN is not set
   2.180  # CONFIG_SCSI_IPS is not set
   2.181 @@ -472,10 +489,8 @@ CONFIG_SCSI_SYM53C8XX_2=y
   2.182  CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
   2.183  CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
   2.184  CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
   2.185 -# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
   2.186 +CONFIG_SCSI_SYM53C8XX_MMIO=y
   2.187  # CONFIG_SCSI_IPR is not set
   2.188 -CONFIG_SCSI_QLOGIC_FC=y
   2.189 -# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
   2.190  CONFIG_SCSI_QLOGIC_1280=y
   2.191  # CONFIG_SCSI_QLA_FC is not set
   2.192  # CONFIG_SCSI_LPFC is not set
   2.193 @@ -488,7 +503,13 @@ CONFIG_SCSI_QLOGIC_1280=y
   2.194  #
   2.195  CONFIG_MD=y
   2.196  # CONFIG_BLK_DEV_MD is not set
   2.197 -# CONFIG_BLK_DEV_DM is not set
   2.198 +CONFIG_BLK_DEV_DM=y
   2.199 +CONFIG_DM_CRYPT=m
   2.200 +CONFIG_DM_SNAPSHOT=m
   2.201 +CONFIG_DM_MIRROR=m
   2.202 +CONFIG_DM_ZERO=m
   2.203 +CONFIG_DM_MULTIPATH=m
   2.204 +CONFIG_DM_MULTIPATH_EMC=m
   2.205  
   2.206  #
   2.207  # Fusion MPT device support
   2.208 @@ -607,6 +628,7 @@ CONFIG_TIGON3=y
   2.209  # CONFIG_CHELSIO_T1 is not set
   2.210  # CONFIG_IXGB is not set
   2.211  # CONFIG_S2IO is not set
   2.212 +# CONFIG_MYRI10GE is not set
   2.213  
   2.214  #
   2.215  # Token Ring devices
   2.216 @@ -709,6 +731,7 @@ CONFIG_GAMEPORT=y
   2.217  CONFIG_VT=y
   2.218  CONFIG_VT_CONSOLE=y
   2.219  CONFIG_HW_CONSOLE=y
   2.220 +# CONFIG_VT_HW_CONSOLE_BINDING is not set
   2.221  CONFIG_SERIAL_NONSTANDARD=y
   2.222  # CONFIG_COMPUTONE is not set
   2.223  # CONFIG_ROCKETPORT is not set
   2.224 @@ -722,6 +745,7 @@ CONFIG_SERIAL_NONSTANDARD=y
   2.225  # CONFIG_N_HDLC is not set
   2.226  # CONFIG_SPECIALIX is not set
   2.227  # CONFIG_SX is not set
   2.228 +# CONFIG_RIO is not set
   2.229  # CONFIG_STALDRV is not set
   2.230  
   2.231  #
   2.232 @@ -755,6 +779,8 @@ CONFIG_EFI_RTC=y
   2.233  # Ftape, the floppy tape device driver
   2.234  #
   2.235  CONFIG_AGP=y
   2.236 +# CONFIG_AGP_SIS is not set
   2.237 +# CONFIG_AGP_VIA is not set
   2.238  CONFIG_AGP_I460=y
   2.239  CONFIG_DRM=y
   2.240  # CONFIG_DRM_TDFX is not set
   2.241 @@ -799,10 +825,10 @@ CONFIG_I2C_ALGOPCF=y
   2.242  # CONFIG_I2C_I810 is not set
   2.243  # CONFIG_I2C_PIIX4 is not set
   2.244  # CONFIG_I2C_NFORCE2 is not set
   2.245 +# CONFIG_I2C_OCORES is not set
   2.246  # CONFIG_I2C_PARPORT_LIGHT is not set
   2.247  # CONFIG_I2C_PROSAVAGE is not set
   2.248  # CONFIG_I2C_SAVAGE4 is not set
   2.249 -# CONFIG_SCx200_ACB is not set
   2.250  # CONFIG_I2C_SIS5595 is not set
   2.251  # CONFIG_I2C_SIS630 is not set
   2.252  # CONFIG_I2C_SIS96X is not set
   2.253 @@ -821,9 +847,7 @@ CONFIG_I2C_ALGOPCF=y
   2.254  # CONFIG_SENSORS_PCF8574 is not set
   2.255  # CONFIG_SENSORS_PCA9539 is not set
   2.256  # CONFIG_SENSORS_PCF8591 is not set
   2.257 -# CONFIG_SENSORS_RTC8564 is not set
   2.258  # CONFIG_SENSORS_MAX6875 is not set
   2.259 -# CONFIG_RTC_X1205_I2C is not set
   2.260  # CONFIG_I2C_DEBUG_CORE is not set
   2.261  # CONFIG_I2C_DEBUG_ALGO is not set
   2.262  # CONFIG_I2C_DEBUG_BUS is not set
   2.263 @@ -838,13 +862,13 @@ CONFIG_I2C_ALGOPCF=y
   2.264  #
   2.265  # Dallas's 1-wire bus
   2.266  #
   2.267 -# CONFIG_W1 is not set
   2.268  
   2.269  #
   2.270  # Hardware Monitoring support
   2.271  #
   2.272  CONFIG_HWMON=y
   2.273  # CONFIG_HWMON_VID is not set
   2.274 +# CONFIG_SENSORS_ABITUGURU is not set
   2.275  # CONFIG_SENSORS_ADM1021 is not set
   2.276  # CONFIG_SENSORS_ADM1025 is not set
   2.277  # CONFIG_SENSORS_ADM1026 is not set
   2.278 @@ -873,10 +897,12 @@ CONFIG_HWMON=y
   2.279  # CONFIG_SENSORS_PC87360 is not set
   2.280  # CONFIG_SENSORS_SIS5595 is not set
   2.281  # CONFIG_SENSORS_SMSC47M1 is not set
   2.282 +# CONFIG_SENSORS_SMSC47M192 is not set
   2.283  # CONFIG_SENSORS_SMSC47B397 is not set
   2.284  # CONFIG_SENSORS_VIA686A is not set
   2.285  # CONFIG_SENSORS_VT8231 is not set
   2.286  # CONFIG_SENSORS_W83781D is not set
   2.287 +# CONFIG_SENSORS_W83791D is not set
   2.288  # CONFIG_SENSORS_W83792D is not set
   2.289  # CONFIG_SENSORS_W83L785TS is not set
   2.290  # CONFIG_SENSORS_W83627HF is not set
   2.291 @@ -888,24 +914,25 @@ CONFIG_HWMON=y
   2.292  #
   2.293  
   2.294  #
   2.295 -# Multimedia Capabilities Port drivers
   2.296 -#
   2.297 -
   2.298 -#
   2.299  # Multimedia devices
   2.300  #
   2.301  CONFIG_VIDEO_DEV=y
   2.302 +CONFIG_VIDEO_V4L1=y
   2.303 +CONFIG_VIDEO_V4L1_COMPAT=y
   2.304 +CONFIG_VIDEO_V4L2=y
   2.305  
   2.306  #
   2.307 -# Video For Linux
   2.308 +# Video Capture Adapters
   2.309  #
   2.310  
   2.311  #
   2.312 -# Video Adapters
   2.313 +# Video Capture Adapters
   2.314  #
   2.315  # CONFIG_VIDEO_ADV_DEBUG is not set
   2.316 +# CONFIG_VIDEO_VIVI is not set
   2.317  # CONFIG_VIDEO_BT848 is not set
   2.318  # CONFIG_VIDEO_CPIA is not set
   2.319 +# CONFIG_VIDEO_CPIA2 is not set
   2.320  # CONFIG_VIDEO_SAA5246A is not set
   2.321  # CONFIG_VIDEO_SAA5249 is not set
   2.322  # CONFIG_TUNER_3036 is not set
   2.323 @@ -917,10 +944,40 @@ CONFIG_VIDEO_DEV=y
   2.324  # CONFIG_VIDEO_HEXIUM_ORION is not set
   2.325  # CONFIG_VIDEO_HEXIUM_GEMINI is not set
   2.326  # CONFIG_VIDEO_CX88 is not set
   2.327 +
   2.328 +#
   2.329 +# Encoders and Decoders
   2.330 +#
   2.331 +# CONFIG_VIDEO_MSP3400 is not set
   2.332 +# CONFIG_VIDEO_CS53L32A is not set
   2.333 +# CONFIG_VIDEO_TLV320AIC23B is not set
   2.334 +# CONFIG_VIDEO_WM8775 is not set
   2.335 +# CONFIG_VIDEO_WM8739 is not set
   2.336 +# CONFIG_VIDEO_CX2341X is not set
   2.337 +# CONFIG_VIDEO_CX25840 is not set
   2.338 +# CONFIG_VIDEO_SAA711X is not set
   2.339 +# CONFIG_VIDEO_SAA7127 is not set
   2.340 +# CONFIG_VIDEO_UPD64031A is not set
   2.341 +# CONFIG_VIDEO_UPD64083 is not set
   2.342 +
   2.343 +#
   2.344 +# V4L USB devices
   2.345 +#
   2.346 +# CONFIG_VIDEO_PVRUSB2 is not set
   2.347  # CONFIG_VIDEO_EM28XX is not set
   2.348 +# CONFIG_USB_VICAM is not set
   2.349 +# CONFIG_USB_IBMCAM is not set
   2.350 +# CONFIG_USB_KONICAWC is not set
   2.351 +# CONFIG_USB_QUICKCAM_MESSENGER is not set
   2.352 +# CONFIG_USB_ET61X251 is not set
   2.353  # CONFIG_VIDEO_OVCAMCHIP is not set
   2.354 -# CONFIG_VIDEO_AUDIO_DECODER is not set
   2.355 -# CONFIG_VIDEO_DECODER is not set
   2.356 +# CONFIG_USB_W9968CF is not set
   2.357 +# CONFIG_USB_OV511 is not set
   2.358 +# CONFIG_USB_SE401 is not set
   2.359 +# CONFIG_USB_SN9C102 is not set
   2.360 +# CONFIG_USB_STV680 is not set
   2.361 +# CONFIG_USB_ZC0301 is not set
   2.362 +# CONFIG_USB_PWC is not set
   2.363  
   2.364  #
   2.365  # Radio Adapters
   2.366 @@ -928,20 +985,24 @@ CONFIG_VIDEO_DEV=y
   2.367  # CONFIG_RADIO_GEMTEK_PCI is not set
   2.368  # CONFIG_RADIO_MAXIRADIO is not set
   2.369  # CONFIG_RADIO_MAESTRO is not set
   2.370 +# CONFIG_USB_DSBR is not set
   2.371  
   2.372  #
   2.373  # Digital Video Broadcasting Devices
   2.374  #
   2.375  # CONFIG_DVB is not set
   2.376 +# CONFIG_USB_DABUSB is not set
   2.377  
   2.378  #
   2.379  # Graphics support
   2.380  #
   2.381 +CONFIG_FIRMWARE_EDID=y
   2.382  CONFIG_FB=y
   2.383  CONFIG_FB_CFB_FILLRECT=y
   2.384  CONFIG_FB_CFB_COPYAREA=y
   2.385  CONFIG_FB_CFB_IMAGEBLIT=y
   2.386  # CONFIG_FB_MACMODES is not set
   2.387 +# CONFIG_FB_BACKLIGHT is not set
   2.388  CONFIG_FB_MODE_HELPERS=y
   2.389  # CONFIG_FB_TILEBLITTING is not set
   2.390  # CONFIG_FB_CIRRUS is not set
   2.391 @@ -953,7 +1014,6 @@ CONFIG_FB_MODE_HELPERS=y
   2.392  # CONFIG_FB_NVIDIA is not set
   2.393  # CONFIG_FB_RIVA is not set
   2.394  # CONFIG_FB_MATROX is not set
   2.395 -# CONFIG_FB_RADEON_OLD is not set
   2.396  CONFIG_FB_RADEON=y
   2.397  CONFIG_FB_RADEON_I2C=y
   2.398  CONFIG_FB_RADEON_DEBUG=y
   2.399 @@ -972,6 +1032,7 @@ CONFIG_FB_RADEON_DEBUG=y
   2.400  # Console display driver support
   2.401  #
   2.402  CONFIG_VGA_CONSOLE=y
   2.403 +# CONFIG_VGACON_SOFT_SCROLLBACK is not set
   2.404  CONFIG_DUMMY_CONSOLE=y
   2.405  # CONFIG_FRAMEBUFFER_CONSOLE is not set
   2.406  
   2.407 @@ -1002,9 +1063,11 @@ CONFIG_SND_SEQ_DUMMY=y
   2.408  CONFIG_SND_OSSEMUL=y
   2.409  CONFIG_SND_MIXER_OSS=y
   2.410  CONFIG_SND_PCM_OSS=y
   2.411 +CONFIG_SND_PCM_OSS_PLUGINS=y
   2.412  CONFIG_SND_SEQUENCER_OSS=y
   2.413  # CONFIG_SND_DYNAMIC_MINORS is not set
   2.414  CONFIG_SND_SUPPORT_OLD_API=y
   2.415 +CONFIG_SND_VERBOSE_PROCFS=y
   2.416  # CONFIG_SND_VERBOSE_PRINTK is not set
   2.417  # CONFIG_SND_DEBUG is not set
   2.418  
   2.419 @@ -1025,6 +1088,7 @@ CONFIG_SND_MPU401=y
   2.420  # PCI devices
   2.421  #
   2.422  # CONFIG_SND_AD1889 is not set
   2.423 +# CONFIG_SND_ALS300 is not set
   2.424  # CONFIG_SND_ALI5451 is not set
   2.425  CONFIG_SND_ATIIXP=y
   2.426  # CONFIG_SND_ATIIXP_MODEM is not set
   2.427 @@ -1037,6 +1101,18 @@ CONFIG_SND_ATIIXP=y
   2.428  # CONFIG_SND_CMIPCI is not set
   2.429  # CONFIG_SND_CS4281 is not set
   2.430  # CONFIG_SND_CS46XX is not set
   2.431 +# CONFIG_SND_DARLA20 is not set
   2.432 +# CONFIG_SND_GINA20 is not set
   2.433 +# CONFIG_SND_LAYLA20 is not set
   2.434 +# CONFIG_SND_DARLA24 is not set
   2.435 +# CONFIG_SND_GINA24 is not set
   2.436 +# CONFIG_SND_LAYLA24 is not set
   2.437 +# CONFIG_SND_MONA is not set
   2.438 +# CONFIG_SND_MIA is not set
   2.439 +# CONFIG_SND_ECHO3G is not set
   2.440 +# CONFIG_SND_INDIGO is not set
   2.441 +# CONFIG_SND_INDIGOIO is not set
   2.442 +# CONFIG_SND_INDIGODJ is not set
   2.443  # CONFIG_SND_EMU10K1 is not set
   2.444  # CONFIG_SND_EMU10K1X is not set
   2.445  # CONFIG_SND_ENS1370 is not set
   2.446 @@ -1057,6 +1133,7 @@ CONFIG_SND_FM801=y
   2.447  # CONFIG_SND_MIXART is not set
   2.448  # CONFIG_SND_NM256 is not set
   2.449  # CONFIG_SND_PCXHR is not set
   2.450 +# CONFIG_SND_RIPTIDE is not set
   2.451  # CONFIG_SND_RME32 is not set
   2.452  # CONFIG_SND_RME96 is not set
   2.453  # CONFIG_SND_RME9652 is not set
   2.454 @@ -1076,12 +1153,14 @@ CONFIG_SND_FM801=y
   2.455  # Open Sound System
   2.456  #
   2.457  CONFIG_SOUND_PRIME=y
   2.458 -# CONFIG_OBSOLETE_OSS_DRIVER is not set
   2.459 -# CONFIG_SOUND_FUSION is not set
   2.460 +# CONFIG_OSS_OBSOLETE_DRIVER is not set
   2.461 +# CONFIG_SOUND_BT878 is not set
   2.462 +# CONFIG_SOUND_ES1371 is not set
   2.463  # CONFIG_SOUND_ICH is not set
   2.464  # CONFIG_SOUND_TRIDENT is not set
   2.465  # CONFIG_SOUND_MSNDCLAS is not set
   2.466  # CONFIG_SOUND_MSNDPIN is not set
   2.467 +# CONFIG_SOUND_VIA82CXXX is not set
   2.468  # CONFIG_SOUND_TVMIXER is not set
   2.469  
   2.470  #
   2.471 @@ -1089,6 +1168,7 @@ CONFIG_SOUND_PRIME=y
   2.472  #
   2.473  CONFIG_USB_ARCH_HAS_HCD=y
   2.474  CONFIG_USB_ARCH_HAS_OHCI=y
   2.475 +CONFIG_USB_ARCH_HAS_EHCI=y
   2.476  CONFIG_USB=y
   2.477  # CONFIG_USB_DEBUG is not set
   2.478  
   2.479 @@ -1107,6 +1187,7 @@ CONFIG_USB_BANDWIDTH=y
   2.480  CONFIG_USB_EHCI_HCD=y
   2.481  # CONFIG_USB_EHCI_SPLIT_ISO is not set
   2.482  # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
   2.483 +# CONFIG_USB_EHCI_TT_NEWSCHED is not set
   2.484  # CONFIG_USB_ISP116X_HCD is not set
   2.485  CONFIG_USB_OHCI_HCD=y
   2.486  # CONFIG_USB_OHCI_BIG_ENDIAN is not set
   2.487 @@ -1117,7 +1198,6 @@ CONFIG_USB_UHCI_HCD=y
   2.488  #
   2.489  # USB Device Class drivers
   2.490  #
   2.491 -# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
   2.492  # CONFIG_USB_ACM is not set
   2.493  # CONFIG_USB_PRINTER is not set
   2.494  
   2.495 @@ -1154,9 +1234,7 @@ CONFIG_USB_HIDDEV=y
   2.496  # CONFIG_USB_ACECAD is not set
   2.497  # CONFIG_USB_KBTAB is not set
   2.498  # CONFIG_USB_POWERMATE is not set
   2.499 -# CONFIG_USB_MTOUCH is not set
   2.500 -# CONFIG_USB_ITMTOUCH is not set
   2.501 -# CONFIG_USB_EGALAX is not set
   2.502 +# CONFIG_USB_TOUCHSCREEN is not set
   2.503  # CONFIG_USB_YEALINK is not set
   2.504  # CONFIG_USB_XPAD is not set
   2.505  # CONFIG_USB_ATI_REMOTE is not set
   2.506 @@ -1171,21 +1249,6 @@ CONFIG_USB_HIDDEV=y
   2.507  # CONFIG_USB_MICROTEK is not set
   2.508  
   2.509  #
   2.510 -# USB Multimedia devices
   2.511 -#
   2.512 -# CONFIG_USB_DABUSB is not set
   2.513 -# CONFIG_USB_VICAM is not set
   2.514 -# CONFIG_USB_DSBR is not set
   2.515 -# CONFIG_USB_ET61X251 is not set
   2.516 -# CONFIG_USB_IBMCAM is not set
   2.517 -# CONFIG_USB_KONICAWC is not set
   2.518 -# CONFIG_USB_OV511 is not set
   2.519 -# CONFIG_USB_SE401 is not set
   2.520 -# CONFIG_USB_SN9C102 is not set
   2.521 -# CONFIG_USB_STV680 is not set
   2.522 -# CONFIG_USB_PWC is not set
   2.523 -
   2.524 -#
   2.525  # USB Network Adapters
   2.526  #
   2.527  # CONFIG_USB_CATC is not set
   2.528 @@ -1214,10 +1277,12 @@ CONFIG_USB_MON=y
   2.529  # CONFIG_USB_LEGOTOWER is not set
   2.530  # CONFIG_USB_LCD is not set
   2.531  # CONFIG_USB_LED is not set
   2.532 +# CONFIG_USB_CYPRESS_CY7C63 is not set
   2.533  # CONFIG_USB_CYTHERM is not set
   2.534  # CONFIG_USB_PHIDGETKIT is not set
   2.535  # CONFIG_USB_PHIDGETSERVO is not set
   2.536  # CONFIG_USB_IDMOUSE is not set
   2.537 +# CONFIG_USB_APPLEDISPLAY is not set
   2.538  # CONFIG_USB_SISUSBVGA is not set
   2.539  # CONFIG_USB_LD is not set
   2.540  # CONFIG_USB_TEST is not set
   2.541 @@ -1237,6 +1302,19 @@ CONFIG_USB_MON=y
   2.542  # CONFIG_MMC is not set
   2.543  
   2.544  #
   2.545 +# LED devices
   2.546 +#
   2.547 +# CONFIG_NEW_LEDS is not set
   2.548 +
   2.549 +#
   2.550 +# LED drivers
   2.551 +#
   2.552 +
   2.553 +#
   2.554 +# LED Triggers
   2.555 +#
   2.556 +
   2.557 +#
   2.558  # InfiniBand support
   2.559  #
   2.560  # CONFIG_INFINIBAND is not set
   2.561 @@ -1246,6 +1324,24 @@ CONFIG_USB_MON=y
   2.562  #
   2.563  
   2.564  #
   2.565 +# Real Time Clock
   2.566 +#
   2.567 +# CONFIG_RTC_CLASS is not set
   2.568 +
   2.569 +#
   2.570 +# DMA Engine support
   2.571 +#
   2.572 +# CONFIG_DMA_ENGINE is not set
   2.573 +
   2.574 +#
   2.575 +# DMA Clients
   2.576 +#
   2.577 +
   2.578 +#
   2.579 +# DMA Devices
   2.580 +#
   2.581 +
   2.582 +#
   2.583  # File systems
   2.584  #
   2.585  CONFIG_EXT2_FS=y
   2.586 @@ -1269,7 +1365,6 @@ CONFIG_REISERFS_FS_SECURITY=y
   2.587  # CONFIG_JFS_FS is not set
   2.588  CONFIG_FS_POSIX_ACL=y
   2.589  CONFIG_XFS_FS=y
   2.590 -CONFIG_XFS_EXPORT=y
   2.591  # CONFIG_XFS_QUOTA is not set
   2.592  # CONFIG_XFS_SECURITY is not set
   2.593  # CONFIG_XFS_POSIX_ACL is not set
   2.594 @@ -1278,6 +1373,7 @@ CONFIG_XFS_EXPORT=y
   2.595  # CONFIG_MINIX_FS is not set
   2.596  # CONFIG_ROMFS_FS is not set
   2.597  CONFIG_INOTIFY=y
   2.598 +CONFIG_INOTIFY_USER=y
   2.599  # CONFIG_QUOTA is not set
   2.600  CONFIG_DNOTIFY=y
   2.601  CONFIG_AUTOFS_FS=y
   2.602 @@ -1312,7 +1408,6 @@ CONFIG_SYSFS=y
   2.603  CONFIG_TMPFS=y
   2.604  # CONFIG_HUGETLB_PAGE is not set
   2.605  CONFIG_RAMFS=y
   2.606 -# CONFIG_RELAYFS_FS is not set
   2.607  # CONFIG_CONFIGFS_FS is not set
   2.608  
   2.609  #
   2.610 @@ -1358,7 +1453,9 @@ CONFIG_SMB_NLS_DEFAULT=y
   2.611  CONFIG_SMB_NLS_REMOTE="cp437"
   2.612  CONFIG_CIFS=y
   2.613  # CONFIG_CIFS_STATS is not set
   2.614 +# CONFIG_CIFS_WEAK_PW_HASH is not set
   2.615  # CONFIG_CIFS_XATTR is not set
   2.616 +# CONFIG_CIFS_DEBUG2 is not set
   2.617  # CONFIG_CIFS_EXPERIMENTAL is not set
   2.618  # CONFIG_NCP_FS is not set
   2.619  # CONFIG_CODA_FS is not set
   2.620 @@ -1437,9 +1534,11 @@ CONFIG_NLS_UTF8=y
   2.621  # CONFIG_CRC16 is not set
   2.622  CONFIG_CRC32=y
   2.623  # CONFIG_LIBCRC32C is not set
   2.624 +CONFIG_PLIST=y
   2.625  CONFIG_GENERIC_HARDIRQS=y
   2.626  CONFIG_GENERIC_IRQ_PROBE=y
   2.627  CONFIG_GENERIC_PENDING_IRQ=y
   2.628 +CONFIG_IRQ_PER_CPU=y
   2.629  
   2.630  #
   2.631  # Instrumentation Support
   2.632 @@ -1452,14 +1551,19 @@ CONFIG_GENERIC_PENDING_IRQ=y
   2.633  #
   2.634  # CONFIG_PRINTK_TIME is not set
   2.635  CONFIG_MAGIC_SYSRQ=y
   2.636 +CONFIG_UNUSED_SYMBOLS=y
   2.637  CONFIG_DEBUG_KERNEL=y
   2.638  CONFIG_LOG_BUF_SHIFT=20
   2.639  CONFIG_DETECT_SOFTLOCKUP=y
   2.640  # CONFIG_SCHEDSTATS is not set
   2.641  # CONFIG_DEBUG_SLAB is not set
   2.642 -CONFIG_DEBUG_MUTEXES=y
   2.643 +# CONFIG_DEBUG_RT_MUTEXES is not set
   2.644 +# CONFIG_RT_MUTEX_TESTER is not set
   2.645  # CONFIG_DEBUG_SPINLOCK is not set
   2.646 +CONFIG_DEBUG_MUTEXES=y
   2.647 +# CONFIG_DEBUG_RWSEMS is not set
   2.648  # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
   2.649 +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
   2.650  # CONFIG_DEBUG_KOBJECT is not set
   2.651  # CONFIG_DEBUG_INFO is not set
   2.652  # CONFIG_DEBUG_FS is not set
   2.653 @@ -1513,7 +1617,6 @@ CONFIG_CRYPTO_DES=y
   2.654  #
   2.655  # CONFIG_XEN_UTIL is not set
   2.656  CONFIG_XEN_BALLOON=y
   2.657 -# CONFIG_XEN_DEVMEM is not set
   2.658  CONFIG_XEN_REBOOT=y
   2.659  # CONFIG_XEN_SMPBOOT is not set
   2.660  CONFIG_XEN_INTERFACE_VERSION=0x00030203
   2.661 @@ -1539,6 +1642,7 @@ CONFIG_XEN_PCIDEV_BACKEND_SLOT=y
   2.662  CONFIG_XEN_TPMDEV_BACKEND=m
   2.663  CONFIG_XEN_BLKDEV_FRONTEND=y
   2.664  CONFIG_XEN_NETDEV_FRONTEND=y
   2.665 +# CONFIG_XEN_FRAMEBUFFER is not set
   2.666  # CONFIG_XEN_SCRUB_PAGES is not set
   2.667  CONFIG_XEN_DISABLE_SERIAL=y
   2.668  CONFIG_XEN_SYSFS=y
   2.669 @@ -1547,3 +1651,4 @@ CONFIG_XEN_COMPAT_030002_AND_LATER=y
   2.670  CONFIG_XEN_COMPAT_030002=y
   2.671  CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
   2.672  CONFIG_NO_IDLE_HZ=y
   2.673 +CONFIG_XEN_DEVMEM=y
     3.1 --- a/buildconfigs/linux-defconfig_xenU_ia64	Fri Feb 09 14:43:22 2007 -0600
     3.2 +++ b/buildconfigs/linux-defconfig_xenU_ia64	Wed Feb 14 19:01:35 2007 +0000
     3.3 @@ -1,7 +1,7 @@
     3.4  #
     3.5  # Automatically generated make config: don't edit
     3.6 -# Linux kernel version: 2.6.16.29-xenU
     3.7 -# Wed Oct  4 12:54:26 2006
     3.8 +# Linux kernel version: 2.6.18-xenU
     3.9 +# Mon Jan 29 10:26:51 2007
    3.10  #
    3.11  
    3.12  #
    3.13 @@ -21,13 +21,15 @@ CONFIG_SYSVIPC=y
    3.14  # CONFIG_POSIX_MQUEUE is not set
    3.15  CONFIG_BSD_PROCESS_ACCT=y
    3.16  # CONFIG_BSD_PROCESS_ACCT_V3 is not set
    3.17 -CONFIG_SYSCTL=y
    3.18 +# CONFIG_TASKSTATS is not set
    3.19  # CONFIG_AUDIT is not set
    3.20  # CONFIG_IKCONFIG is not set
    3.21  # CONFIG_CPUSETS is not set
    3.22 +# CONFIG_RELAY is not set
    3.23  CONFIG_INITRAMFS_SOURCE=""
    3.24  CONFIG_CC_OPTIMIZE_FOR_SIZE=y
    3.25  # CONFIG_EMBEDDED is not set
    3.26 +CONFIG_SYSCTL=y
    3.27  CONFIG_KALLSYMS=y
    3.28  # CONFIG_KALLSYMS_ALL is not set
    3.29  # CONFIG_KALLSYMS_EXTRA_PASS is not set
    3.30 @@ -39,11 +41,9 @@ CONFIG_BASE_FULL=y
    3.31  CONFIG_FUTEX=y
    3.32  CONFIG_EPOLL=y
    3.33  CONFIG_SHMEM=y
    3.34 -CONFIG_CC_ALIGN_FUNCTIONS=0
    3.35 -CONFIG_CC_ALIGN_LABELS=0
    3.36 -CONFIG_CC_ALIGN_LOOPS=0
    3.37 -CONFIG_CC_ALIGN_JUMPS=0
    3.38  CONFIG_SLAB=y
    3.39 +CONFIG_VM_EVENT_COUNTERS=y
    3.40 +CONFIG_RT_MUTEXES=y
    3.41  # CONFIG_TINY_SHMEM is not set
    3.42  CONFIG_BASE_SMALL=0
    3.43  # CONFIG_SLOB is not set
    3.44 @@ -53,7 +53,6 @@ CONFIG_BASE_SMALL=0
    3.45  #
    3.46  CONFIG_MODULES=y
    3.47  # CONFIG_MODULE_UNLOAD is not set
    3.48 -CONFIG_OBSOLETE_MODPARM=y
    3.49  # CONFIG_MODVERSIONS is not set
    3.50  # CONFIG_MODULE_SRCVERSION_ALL is not set
    3.51  # CONFIG_KMOD is not set
    3.52 @@ -61,6 +60,7 @@ CONFIG_OBSOLETE_MODPARM=y
    3.53  #
    3.54  # Block layer
    3.55  #
    3.56 +# CONFIG_BLK_DEV_IO_TRACE is not set
    3.57  
    3.58  #
    3.59  # IO Schedulers
    3.60 @@ -83,8 +83,10 @@ CONFIG_64BIT=y
    3.61  CONFIG_MMU=y
    3.62  CONFIG_SWIOTLB=y
    3.63  CONFIG_RWSEM_XCHGADD_ALGORITHM=y
    3.64 +CONFIG_GENERIC_FIND_NEXT_BIT=y
    3.65  CONFIG_GENERIC_CALIBRATE_DELAY=y
    3.66  CONFIG_TIME_INTERPOLATION=y
    3.67 +CONFIG_DMI=y
    3.68  CONFIG_EFI=y
    3.69  CONFIG_GENERIC_IOMAP=y
    3.70  CONFIG_XEN=y
    3.71 @@ -93,6 +95,7 @@ CONFIG_XEN_IA64_EXPOSE_P2M=y
    3.72  CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
    3.73  CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
    3.74  CONFIG_DMA_IS_DMA32=y
    3.75 +CONFIG_AUDIT_ARCH=y
    3.76  # CONFIG_IA64_GENERIC is not set
    3.77  CONFIG_IA64_DIG=y
    3.78  # CONFIG_IA64_HP_ZX1 is not set
    3.79 @@ -117,9 +120,10 @@ CONFIG_IOSAPIC=y
    3.80  CONFIG_FORCE_MAX_ZONEORDER=11
    3.81  CONFIG_SMP=y
    3.82  CONFIG_NR_CPUS=16
    3.83 -# CONFIG_HOTPLUG_CPU is not set
    3.84 +CONFIG_HOTPLUG_CPU=y
    3.85  CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
    3.86  # CONFIG_SCHED_SMT is not set
    3.87 +# CONFIG_PERMIT_BSP_REMOVE is not set
    3.88  # CONFIG_PREEMPT is not set
    3.89  CONFIG_SELECT_MEMORY_MODEL=y
    3.90  CONFIG_FLATMEM_MANUAL=y
    3.91 @@ -129,6 +133,7 @@ CONFIG_FLATMEM=y
    3.92  CONFIG_FLAT_NODE_MEM_MAP=y
    3.93  # CONFIG_SPARSEMEM_STATIC is not set
    3.94  CONFIG_SPLIT_PTLOCK_CPUS=4
    3.95 +CONFIG_RESOURCES_64BIT=y
    3.96  CONFIG_ARCH_SELECT_MEMORY_MODEL=y
    3.97  CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
    3.98  CONFIG_ARCH_FLATMEM_ENABLE=y
    3.99 @@ -161,7 +166,9 @@ CONFIG_PM_LEGACY=y
   3.100  CONFIG_ACPI=y
   3.101  CONFIG_ACPI_BUTTON=y
   3.102  CONFIG_ACPI_FAN=y
   3.103 +# CONFIG_ACPI_DOCK is not set
   3.104  CONFIG_ACPI_PROCESSOR=y
   3.105 +CONFIG_ACPI_HOTPLUG_CPU=y
   3.106  CONFIG_ACPI_THERMAL=y
   3.107  CONFIG_ACPI_BLACKLIST_YEAR=0
   3.108  # CONFIG_ACPI_DEBUG is not set
   3.109 @@ -182,7 +189,7 @@ CONFIG_PCI=y
   3.110  CONFIG_PCI_DOMAINS=y
   3.111  CONFIG_XEN_PCIDEV_FRONTEND=y
   3.112  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
   3.113 -CONFIG_PCI_LEGACY_PROC=y
   3.114 +# CONFIG_PCIEPORTBUS is not set
   3.115  # CONFIG_PCI_DEBUG is not set
   3.116  
   3.117  #
   3.118 @@ -212,6 +219,8 @@ CONFIG_NET=y
   3.119  CONFIG_PACKET=y
   3.120  # CONFIG_PACKET_MMAP is not set
   3.121  CONFIG_UNIX=y
   3.122 +CONFIG_XFRM=y
   3.123 +# CONFIG_XFRM_USER is not set
   3.124  # CONFIG_NET_KEY is not set
   3.125  CONFIG_INET=y
   3.126  CONFIG_IP_MULTICAST=y
   3.127 @@ -226,7 +235,10 @@ CONFIG_IP_FIB_HASH=y
   3.128  # CONFIG_INET_AH is not set
   3.129  # CONFIG_INET_ESP is not set
   3.130  # CONFIG_INET_IPCOMP is not set
   3.131 +# CONFIG_INET_XFRM_TUNNEL is not set
   3.132  # CONFIG_INET_TUNNEL is not set
   3.133 +CONFIG_INET_XFRM_MODE_TRANSPORT=y
   3.134 +CONFIG_INET_XFRM_MODE_TUNNEL=y
   3.135  CONFIG_INET_DIAG=y
   3.136  CONFIG_INET_TCP_DIAG=y
   3.137  # CONFIG_TCP_CONG_ADVANCED is not set
   3.138 @@ -237,6 +249,9 @@ CONFIG_TCP_CONG_BIC=y
   3.139  #
   3.140  # CONFIG_IP_VS is not set
   3.141  # CONFIG_IPV6 is not set
   3.142 +# CONFIG_INET6_XFRM_TUNNEL is not set
   3.143 +# CONFIG_INET6_TUNNEL is not set
   3.144 +# CONFIG_NETWORK_SECMARK is not set
   3.145  CONFIG_NETFILTER=y
   3.146  # CONFIG_NETFILTER_DEBUG is not set
   3.147  
   3.148 @@ -276,7 +291,6 @@ CONFIG_NETFILTER=y
   3.149  # CONFIG_ATALK is not set
   3.150  # CONFIG_X25 is not set
   3.151  # CONFIG_LAPB is not set
   3.152 -# CONFIG_NET_DIVERT is not set
   3.153  # CONFIG_ECONET is not set
   3.154  # CONFIG_WAN_ROUTER is not set
   3.155  
   3.156 @@ -305,6 +319,7 @@ CONFIG_STANDALONE=y
   3.157  CONFIG_PREVENT_FIRMWARE_BUILD=y
   3.158  # CONFIG_FW_LOADER is not set
   3.159  # CONFIG_DEBUG_DRIVER is not set
   3.160 +# CONFIG_SYS_HYPERVISOR is not set
   3.161  
   3.162  #
   3.163  # Connector - unified userspace <-> kernelspace linker
   3.164 @@ -342,6 +357,7 @@ CONFIG_BLK_DEV_LOOP=y
   3.165  CONFIG_BLK_DEV_RAM=y
   3.166  CONFIG_BLK_DEV_RAM_COUNT=16
   3.167  CONFIG_BLK_DEV_RAM_SIZE=4096
   3.168 +CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
   3.169  CONFIG_BLK_DEV_INITRD=y
   3.170  # CONFIG_CDROM_PKTCDVD is not set
   3.171  # CONFIG_ATA_OVER_ETH is not set
   3.172 @@ -399,6 +415,7 @@ CONFIG_SCSI_SAS_ATTRS=y
   3.173  # CONFIG_MEGARAID_LEGACY is not set
   3.174  # CONFIG_MEGARAID_SAS is not set
   3.175  # CONFIG_SCSI_SATA is not set
   3.176 +# CONFIG_SCSI_HPTIOP is not set
   3.177  # CONFIG_SCSI_DMX3191D is not set
   3.178  # CONFIG_SCSI_FUTURE_DOMAIN is not set
   3.179  # CONFIG_SCSI_IPS is not set
   3.180 @@ -408,9 +425,8 @@ CONFIG_SCSI_SYM53C8XX_2=y
   3.181  CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
   3.182  CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
   3.183  CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
   3.184 -# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
   3.185 +CONFIG_SCSI_SYM53C8XX_MMIO=y
   3.186  # CONFIG_SCSI_IPR is not set
   3.187 -# CONFIG_SCSI_QLOGIC_FC is not set
   3.188  CONFIG_SCSI_QLOGIC_1280=y
   3.189  # CONFIG_SCSI_QLA_FC is not set
   3.190  # CONFIG_SCSI_LPFC is not set
   3.191 @@ -532,6 +548,7 @@ CONFIG_TIGON3=y
   3.192  # CONFIG_CHELSIO_T1 is not set
   3.193  # CONFIG_IXGB is not set
   3.194  # CONFIG_S2IO is not set
   3.195 +# CONFIG_MYRI10GE is not set
   3.196  
   3.197  #
   3.198  # Token Ring devices
   3.199 @@ -609,6 +626,7 @@ CONFIG_SERIO=y
   3.200  CONFIG_VT=y
   3.201  CONFIG_VT_CONSOLE=y
   3.202  CONFIG_HW_CONSOLE=y
   3.203 +# CONFIG_VT_HW_CONSOLE_BINDING is not set
   3.204  # CONFIG_SERIAL_NONSTANDARD is not set
   3.205  
   3.206  #
   3.207 @@ -616,7 +634,7 @@ CONFIG_HW_CONSOLE=y
   3.208  #
   3.209  CONFIG_SERIAL_8250=y
   3.210  CONFIG_SERIAL_8250_CONSOLE=y
   3.211 -CONFIG_SERIAL_8250_ACPI=y
   3.212 +CONFIG_SERIAL_8250_PCI=y
   3.213  CONFIG_SERIAL_8250_NR_UARTS=8
   3.214  CONFIG_SERIAL_8250_RUNTIME_UARTS=4
   3.215  CONFIG_SERIAL_8250_EXTENDED=y
   3.216 @@ -653,6 +671,8 @@ CONFIG_EFI_RTC=y
   3.217  # Ftape, the floppy tape device driver
   3.218  #
   3.219  CONFIG_AGP=y
   3.220 +# CONFIG_AGP_SIS is not set
   3.221 +# CONFIG_AGP_VIA is not set
   3.222  # CONFIG_AGP_I460 is not set
   3.223  CONFIG_DRM=y
   3.224  # CONFIG_DRM_TDFX is not set
   3.225 @@ -697,10 +717,10 @@ CONFIG_I2C_ALGOPCF=y
   3.226  # CONFIG_I2C_I810 is not set
   3.227  # CONFIG_I2C_PIIX4 is not set
   3.228  # CONFIG_I2C_NFORCE2 is not set
   3.229 +# CONFIG_I2C_OCORES is not set
   3.230  # CONFIG_I2C_PARPORT_LIGHT is not set
   3.231  # CONFIG_I2C_PROSAVAGE is not set
   3.232  # CONFIG_I2C_SAVAGE4 is not set
   3.233 -# CONFIG_SCx200_ACB is not set
   3.234  # CONFIG_I2C_SIS5595 is not set
   3.235  # CONFIG_I2C_SIS630 is not set
   3.236  # CONFIG_I2C_SIS96X is not set
   3.237 @@ -719,9 +739,7 @@ CONFIG_I2C_ALGOPCF=y
   3.238  # CONFIG_SENSORS_PCF8574 is not set
   3.239  # CONFIG_SENSORS_PCA9539 is not set
   3.240  # CONFIG_SENSORS_PCF8591 is not set
   3.241 -# CONFIG_SENSORS_RTC8564 is not set
   3.242  # CONFIG_SENSORS_MAX6875 is not set
   3.243 -# CONFIG_RTC_X1205_I2C is not set
   3.244  # CONFIG_I2C_DEBUG_CORE is not set
   3.245  # CONFIG_I2C_DEBUG_ALGO is not set
   3.246  # CONFIG_I2C_DEBUG_BUS is not set
   3.247 @@ -736,13 +754,13 @@ CONFIG_I2C_ALGOPCF=y
   3.248  #
   3.249  # Dallas's 1-wire bus
   3.250  #
   3.251 -# CONFIG_W1 is not set
   3.252  
   3.253  #
   3.254  # Hardware Monitoring support
   3.255  #
   3.256  CONFIG_HWMON=y
   3.257  # CONFIG_HWMON_VID is not set
   3.258 +# CONFIG_SENSORS_ABITUGURU is not set
   3.259  # CONFIG_SENSORS_ADM1021 is not set
   3.260  # CONFIG_SENSORS_ADM1025 is not set
   3.261  # CONFIG_SENSORS_ADM1026 is not set
   3.262 @@ -771,10 +789,12 @@ CONFIG_HWMON=y
   3.263  # CONFIG_SENSORS_PC87360 is not set
   3.264  # CONFIG_SENSORS_SIS5595 is not set
   3.265  # CONFIG_SENSORS_SMSC47M1 is not set
   3.266 +# CONFIG_SENSORS_SMSC47M192 is not set
   3.267  # CONFIG_SENSORS_SMSC47B397 is not set
   3.268  # CONFIG_SENSORS_VIA686A is not set
   3.269  # CONFIG_SENSORS_VT8231 is not set
   3.270  # CONFIG_SENSORS_W83781D is not set
   3.271 +# CONFIG_SENSORS_W83791D is not set
   3.272  # CONFIG_SENSORS_W83792D is not set
   3.273  # CONFIG_SENSORS_W83L785TS is not set
   3.274  # CONFIG_SENSORS_W83627HF is not set
   3.275 @@ -786,24 +806,25 @@ CONFIG_HWMON=y
   3.276  #
   3.277  
   3.278  #
   3.279 -# Multimedia Capabilities Port drivers
   3.280 -#
   3.281 -
   3.282 -#
   3.283  # Multimedia devices
   3.284  #
   3.285  CONFIG_VIDEO_DEV=y
   3.286 +CONFIG_VIDEO_V4L1=y
   3.287 +CONFIG_VIDEO_V4L1_COMPAT=y
   3.288 +CONFIG_VIDEO_V4L2=y
   3.289  
   3.290  #
   3.291 -# Video For Linux
   3.292 +# Video Capture Adapters
   3.293  #
   3.294  
   3.295  #
   3.296 -# Video Adapters
   3.297 +# Video Capture Adapters
   3.298  #
   3.299  # CONFIG_VIDEO_ADV_DEBUG is not set
   3.300 +# CONFIG_VIDEO_VIVI is not set
   3.301  # CONFIG_VIDEO_BT848 is not set
   3.302  # CONFIG_VIDEO_CPIA is not set
   3.303 +# CONFIG_VIDEO_CPIA2 is not set
   3.304  # CONFIG_VIDEO_SAA5246A is not set
   3.305  # CONFIG_VIDEO_SAA5249 is not set
   3.306  # CONFIG_TUNER_3036 is not set
   3.307 @@ -815,10 +836,40 @@ CONFIG_VIDEO_DEV=y
   3.308  # CONFIG_VIDEO_HEXIUM_ORION is not set
   3.309  # CONFIG_VIDEO_HEXIUM_GEMINI is not set
   3.310  # CONFIG_VIDEO_CX88 is not set
   3.311 +
   3.312 +#
   3.313 +# Encoders and Decoders
   3.314 +#
   3.315 +# CONFIG_VIDEO_MSP3400 is not set
   3.316 +# CONFIG_VIDEO_CS53L32A is not set
   3.317 +# CONFIG_VIDEO_TLV320AIC23B is not set
   3.318 +# CONFIG_VIDEO_WM8775 is not set
   3.319 +# CONFIG_VIDEO_WM8739 is not set
   3.320 +# CONFIG_VIDEO_CX2341X is not set
   3.321 +# CONFIG_VIDEO_CX25840 is not set
   3.322 +# CONFIG_VIDEO_SAA711X is not set
   3.323 +# CONFIG_VIDEO_SAA7127 is not set
   3.324 +# CONFIG_VIDEO_UPD64031A is not set
   3.325 +# CONFIG_VIDEO_UPD64083 is not set
   3.326 +
   3.327 +#
   3.328 +# V4L USB devices
   3.329 +#
   3.330 +# CONFIG_VIDEO_PVRUSB2 is not set
   3.331  # CONFIG_VIDEO_EM28XX is not set
   3.332 +# CONFIG_USB_VICAM is not set
   3.333 +# CONFIG_USB_IBMCAM is not set
   3.334 +# CONFIG_USB_KONICAWC is not set
   3.335 +# CONFIG_USB_QUICKCAM_MESSENGER is not set
   3.336 +# CONFIG_USB_ET61X251 is not set
   3.337  # CONFIG_VIDEO_OVCAMCHIP is not set
   3.338 -# CONFIG_VIDEO_AUDIO_DECODER is not set
   3.339 -# CONFIG_VIDEO_DECODER is not set
   3.340 +# CONFIG_USB_W9968CF is not set
   3.341 +# CONFIG_USB_OV511 is not set
   3.342 +# CONFIG_USB_SE401 is not set
   3.343 +# CONFIG_USB_SN9C102 is not set
   3.344 +# CONFIG_USB_STV680 is not set
   3.345 +# CONFIG_USB_ZC0301 is not set
   3.346 +# CONFIG_USB_PWC is not set
   3.347  
   3.348  #
   3.349  # Radio Adapters
   3.350 @@ -826,20 +877,24 @@ CONFIG_VIDEO_DEV=y
   3.351  # CONFIG_RADIO_GEMTEK_PCI is not set
   3.352  # CONFIG_RADIO_MAXIRADIO is not set
   3.353  # CONFIG_RADIO_MAESTRO is not set
   3.354 +# CONFIG_USB_DSBR is not set
   3.355  
   3.356  #
   3.357  # Digital Video Broadcasting Devices
   3.358  #
   3.359  # CONFIG_DVB is not set
   3.360 +# CONFIG_USB_DABUSB is not set
   3.361  
   3.362  #
   3.363  # Graphics support
   3.364  #
   3.365 +CONFIG_FIRMWARE_EDID=y
   3.366  CONFIG_FB=y
   3.367  CONFIG_FB_CFB_FILLRECT=y
   3.368  CONFIG_FB_CFB_COPYAREA=y
   3.369  CONFIG_FB_CFB_IMAGEBLIT=y
   3.370  # CONFIG_FB_MACMODES is not set
   3.371 +# CONFIG_FB_BACKLIGHT is not set
   3.372  CONFIG_FB_MODE_HELPERS=y
   3.373  # CONFIG_FB_TILEBLITTING is not set
   3.374  # CONFIG_FB_CIRRUS is not set
   3.375 @@ -851,7 +906,6 @@ CONFIG_FB_MODE_HELPERS=y
   3.376  # CONFIG_FB_NVIDIA is not set
   3.377  # CONFIG_FB_RIVA is not set
   3.378  # CONFIG_FB_MATROX is not set
   3.379 -# CONFIG_FB_RADEON_OLD is not set
   3.380  CONFIG_FB_RADEON=y
   3.381  CONFIG_FB_RADEON_I2C=y
   3.382  CONFIG_FB_RADEON_DEBUG=y
   3.383 @@ -870,6 +924,7 @@ CONFIG_FB_RADEON_DEBUG=y
   3.384  # Console display driver support
   3.385  #
   3.386  CONFIG_VGA_CONSOLE=y
   3.387 +# CONFIG_VGACON_SOFT_SCROLLBACK is not set
   3.388  CONFIG_DUMMY_CONSOLE=y
   3.389  # CONFIG_FRAMEBUFFER_CONSOLE is not set
   3.390  
   3.391 @@ -900,9 +955,11 @@ CONFIG_SND_SEQUENCER=y
   3.392  CONFIG_SND_OSSEMUL=y
   3.393  CONFIG_SND_MIXER_OSS=y
   3.394  CONFIG_SND_PCM_OSS=y
   3.395 +CONFIG_SND_PCM_OSS_PLUGINS=y
   3.396  CONFIG_SND_SEQUENCER_OSS=y
   3.397  # CONFIG_SND_DYNAMIC_MINORS is not set
   3.398  CONFIG_SND_SUPPORT_OLD_API=y
   3.399 +CONFIG_SND_VERBOSE_PROCFS=y
   3.400  # CONFIG_SND_VERBOSE_PRINTK is not set
   3.401  # CONFIG_SND_DEBUG is not set
   3.402  
   3.403 @@ -923,6 +980,7 @@ CONFIG_SND_AC97_BUS=y
   3.404  # PCI devices
   3.405  #
   3.406  # CONFIG_SND_AD1889 is not set
   3.407 +# CONFIG_SND_ALS300 is not set
   3.408  # CONFIG_SND_ALI5451 is not set
   3.409  # CONFIG_SND_ATIIXP is not set
   3.410  # CONFIG_SND_ATIIXP_MODEM is not set
   3.411 @@ -980,6 +1038,7 @@ CONFIG_SND_FM801=y
   3.412  #
   3.413  CONFIG_USB_ARCH_HAS_HCD=y
   3.414  CONFIG_USB_ARCH_HAS_OHCI=y
   3.415 +CONFIG_USB_ARCH_HAS_EHCI=y
   3.416  CONFIG_USB=y
   3.417  # CONFIG_USB_DEBUG is not set
   3.418  
   3.419 @@ -998,6 +1057,7 @@ CONFIG_USB_BANDWIDTH=y
   3.420  CONFIG_USB_EHCI_HCD=y
   3.421  # CONFIG_USB_EHCI_SPLIT_ISO is not set
   3.422  # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
   3.423 +# CONFIG_USB_EHCI_TT_NEWSCHED is not set
   3.424  # CONFIG_USB_ISP116X_HCD is not set
   3.425  CONFIG_USB_OHCI_HCD=y
   3.426  # CONFIG_USB_OHCI_BIG_ENDIAN is not set
   3.427 @@ -1008,7 +1068,6 @@ CONFIG_USB_UHCI_HCD=y
   3.428  #
   3.429  # USB Device Class drivers
   3.430  #
   3.431 -# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
   3.432  # CONFIG_USB_ACM is not set
   3.433  # CONFIG_USB_PRINTER is not set
   3.434  
   3.435 @@ -1044,9 +1103,7 @@ CONFIG_USB_HIDDEV=y
   3.436  # CONFIG_USB_ACECAD is not set
   3.437  # CONFIG_USB_KBTAB is not set
   3.438  # CONFIG_USB_POWERMATE is not set
   3.439 -# CONFIG_USB_MTOUCH is not set
   3.440 -# CONFIG_USB_ITMTOUCH is not set
   3.441 -# CONFIG_USB_EGALAX is not set
   3.442 +# CONFIG_USB_TOUCHSCREEN is not set
   3.443  # CONFIG_USB_YEALINK is not set
   3.444  # CONFIG_USB_XPAD is not set
   3.445  # CONFIG_USB_ATI_REMOTE is not set
   3.446 @@ -1061,21 +1118,6 @@ CONFIG_USB_HIDDEV=y
   3.447  # CONFIG_USB_MICROTEK is not set
   3.448  
   3.449  #
   3.450 -# USB Multimedia devices
   3.451 -#
   3.452 -# CONFIG_USB_DABUSB is not set
   3.453 -# CONFIG_USB_VICAM is not set
   3.454 -# CONFIG_USB_DSBR is not set
   3.455 -# CONFIG_USB_ET61X251 is not set
   3.456 -# CONFIG_USB_IBMCAM is not set
   3.457 -# CONFIG_USB_KONICAWC is not set
   3.458 -# CONFIG_USB_OV511 is not set
   3.459 -# CONFIG_USB_SE401 is not set
   3.460 -# CONFIG_USB_SN9C102 is not set
   3.461 -# CONFIG_USB_STV680 is not set
   3.462 -# CONFIG_USB_PWC is not set
   3.463 -
   3.464 -#
   3.465  # USB Network Adapters
   3.466  #
   3.467  # CONFIG_USB_CATC is not set
   3.468 @@ -1104,10 +1146,12 @@ CONFIG_USB_MON=y
   3.469  # CONFIG_USB_LEGOTOWER is not set
   3.470  # CONFIG_USB_LCD is not set
   3.471  # CONFIG_USB_LED is not set
   3.472 +# CONFIG_USB_CYPRESS_CY7C63 is not set
   3.473  # CONFIG_USB_CYTHERM is not set
   3.474  # CONFIG_USB_PHIDGETKIT is not set
   3.475  # CONFIG_USB_PHIDGETSERVO is not set
   3.476  # CONFIG_USB_IDMOUSE is not set
   3.477 +# CONFIG_USB_APPLEDISPLAY is not set
   3.478  # CONFIG_USB_SISUSBVGA is not set
   3.479  # CONFIG_USB_LD is not set
   3.480  
   3.481 @@ -1126,6 +1170,19 @@ CONFIG_USB_MON=y
   3.482  # CONFIG_MMC is not set
   3.483  
   3.484  #
   3.485 +# LED devices
   3.486 +#
   3.487 +# CONFIG_NEW_LEDS is not set
   3.488 +
   3.489 +#
   3.490 +# LED drivers
   3.491 +#
   3.492 +
   3.493 +#
   3.494 +# LED Triggers
   3.495 +#
   3.496 +
   3.497 +#
   3.498  # InfiniBand support
   3.499  #
   3.500  # CONFIG_INFINIBAND is not set
   3.501 @@ -1135,6 +1192,24 @@ CONFIG_USB_MON=y
   3.502  #
   3.503  
   3.504  #
   3.505 +# Real Time Clock
   3.506 +#
   3.507 +# CONFIG_RTC_CLASS is not set
   3.508 +
   3.509 +#
   3.510 +# DMA Engine support
   3.511 +#
   3.512 +# CONFIG_DMA_ENGINE is not set
   3.513 +
   3.514 +#
   3.515 +# DMA Clients
   3.516 +#
   3.517 +
   3.518 +#
   3.519 +# DMA Devices
   3.520 +#
   3.521 +
   3.522 +#
   3.523  # File systems
   3.524  #
   3.525  CONFIG_EXT2_FS=y
   3.526 @@ -1157,6 +1232,7 @@ CONFIG_FS_MBCACHE=y
   3.527  # CONFIG_MINIX_FS is not set
   3.528  # CONFIG_ROMFS_FS is not set
   3.529  CONFIG_INOTIFY=y
   3.530 +CONFIG_INOTIFY_USER=y
   3.531  # CONFIG_QUOTA is not set
   3.532  CONFIG_DNOTIFY=y
   3.533  CONFIG_AUTOFS_FS=y
   3.534 @@ -1191,7 +1267,6 @@ CONFIG_SYSFS=y
   3.535  CONFIG_TMPFS=y
   3.536  # CONFIG_HUGETLB_PAGE is not set
   3.537  CONFIG_RAMFS=y
   3.538 -# CONFIG_RELAYFS_FS is not set
   3.539  # CONFIG_CONFIGFS_FS is not set
   3.540  
   3.541  #
   3.542 @@ -1311,9 +1386,11 @@ CONFIG_NLS_UTF8=y
   3.543  # CONFIG_CRC16 is not set
   3.544  CONFIG_CRC32=y
   3.545  # CONFIG_LIBCRC32C is not set
   3.546 +CONFIG_PLIST=y
   3.547  CONFIG_GENERIC_HARDIRQS=y
   3.548  CONFIG_GENERIC_IRQ_PROBE=y
   3.549  CONFIG_GENERIC_PENDING_IRQ=y
   3.550 +CONFIG_IRQ_PER_CPU=y
   3.551  
   3.552  #
   3.553  # Instrumentation Support
   3.554 @@ -1326,14 +1403,19 @@ CONFIG_GENERIC_PENDING_IRQ=y
   3.555  #
   3.556  # CONFIG_PRINTK_TIME is not set
   3.557  CONFIG_MAGIC_SYSRQ=y
   3.558 +CONFIG_UNUSED_SYMBOLS=y
   3.559  CONFIG_DEBUG_KERNEL=y
   3.560  CONFIG_LOG_BUF_SHIFT=17
   3.561  CONFIG_DETECT_SOFTLOCKUP=y
   3.562  # CONFIG_SCHEDSTATS is not set
   3.563  # CONFIG_DEBUG_SLAB is not set
   3.564 -CONFIG_DEBUG_MUTEXES=y
   3.565 +# CONFIG_DEBUG_RT_MUTEXES is not set
   3.566 +# CONFIG_RT_MUTEX_TESTER is not set
   3.567  # CONFIG_DEBUG_SPINLOCK is not set
   3.568 +CONFIG_DEBUG_MUTEXES=y
   3.569 +# CONFIG_DEBUG_RWSEMS is not set
   3.570  # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
   3.571 +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
   3.572  # CONFIG_DEBUG_KOBJECT is not set
   3.573  # CONFIG_DEBUG_INFO is not set
   3.574  # CONFIG_DEBUG_FS is not set
   3.575 @@ -1387,7 +1469,6 @@ CONFIG_CRYPTO_DES=y
   3.576  #
   3.577  # CONFIG_XEN_UTIL is not set
   3.578  CONFIG_XEN_BALLOON=y
   3.579 -# CONFIG_XEN_DEVMEM is not set
   3.580  CONFIG_XEN_REBOOT=y
   3.581  # CONFIG_XEN_SMPBOOT is not set
   3.582  CONFIG_XEN_INTERFACE_VERSION=0x00030203
   3.583 @@ -1402,6 +1483,8 @@ CONFIG_XEN_XENBUS_DEV=y
   3.584  # CONFIG_XEN_BACKEND is not set
   3.585  CONFIG_XEN_BLKDEV_FRONTEND=y
   3.586  CONFIG_XEN_NETDEV_FRONTEND=y
   3.587 +CONFIG_XEN_FRAMEBUFFER=y
   3.588 +CONFIG_XEN_KEYBOARD=y
   3.589  # CONFIG_XEN_SCRUB_PAGES is not set
   3.590  # CONFIG_XEN_DISABLE_SERIAL is not set
   3.591  CONFIG_XEN_SYSFS=y
   3.592 @@ -1410,3 +1493,4 @@ CONFIG_XEN_COMPAT_030002_AND_LATER=y
   3.593  CONFIG_XEN_COMPAT_030002=y
   3.594  CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
   3.595  CONFIG_NO_IDLE_HZ=y
   3.596 +CONFIG_XEN_DEVMEM=y
     4.1 --- a/buildconfigs/linux-defconfig_xen_ia64	Fri Feb 09 14:43:22 2007 -0600
     4.2 +++ b/buildconfigs/linux-defconfig_xen_ia64	Wed Feb 14 19:01:35 2007 +0000
     4.3 @@ -1,7 +1,7 @@
     4.4  #
     4.5  # Automatically generated make config: don't edit
     4.6 -# Linux kernel version: 2.6.16.29-xen
     4.7 -# Tue Nov 14 10:38:50 2006
     4.8 +# Linux kernel version: 2.6.18-xen
     4.9 +# Mon Jan 29 10:01:13 2007
    4.10  #
    4.11  
    4.12  #
    4.13 @@ -21,14 +21,16 @@ CONFIG_SYSVIPC=y
    4.14  CONFIG_POSIX_MQUEUE=y
    4.15  CONFIG_BSD_PROCESS_ACCT=y
    4.16  # CONFIG_BSD_PROCESS_ACCT_V3 is not set
    4.17 -CONFIG_SYSCTL=y
    4.18 +# CONFIG_TASKSTATS is not set
    4.19  # CONFIG_AUDIT is not set
    4.20  CONFIG_IKCONFIG=y
    4.21  CONFIG_IKCONFIG_PROC=y
    4.22  # CONFIG_CPUSETS is not set
    4.23 +# CONFIG_RELAY is not set
    4.24  CONFIG_INITRAMFS_SOURCE=""
    4.25  CONFIG_CC_OPTIMIZE_FOR_SIZE=y
    4.26  # CONFIG_EMBEDDED is not set
    4.27 +CONFIG_SYSCTL=y
    4.28  CONFIG_KALLSYMS=y
    4.29  CONFIG_KALLSYMS_ALL=y
    4.30  CONFIG_KALLSYMS_EXTRA_PASS=y
    4.31 @@ -40,11 +42,9 @@ CONFIG_BASE_FULL=y
    4.32  CONFIG_FUTEX=y
    4.33  CONFIG_EPOLL=y
    4.34  CONFIG_SHMEM=y
    4.35 -CONFIG_CC_ALIGN_FUNCTIONS=0
    4.36 -CONFIG_CC_ALIGN_LABELS=0
    4.37 -CONFIG_CC_ALIGN_LOOPS=0
    4.38 -CONFIG_CC_ALIGN_JUMPS=0
    4.39  CONFIG_SLAB=y
    4.40 +CONFIG_VM_EVENT_COUNTERS=y
    4.41 +CONFIG_RT_MUTEXES=y
    4.42  # CONFIG_TINY_SHMEM is not set
    4.43  CONFIG_BASE_SMALL=0
    4.44  # CONFIG_SLOB is not set
    4.45 @@ -55,7 +55,6 @@ CONFIG_BASE_SMALL=0
    4.46  CONFIG_MODULES=y
    4.47  CONFIG_MODULE_UNLOAD=y
    4.48  # CONFIG_MODULE_FORCE_UNLOAD is not set
    4.49 -CONFIG_OBSOLETE_MODPARM=y
    4.50  CONFIG_MODVERSIONS=y
    4.51  CONFIG_MODULE_SRCVERSION_ALL=y
    4.52  CONFIG_KMOD=y
    4.53 @@ -64,6 +63,7 @@ CONFIG_STOP_MACHINE=y
    4.54  #
    4.55  # Block layer
    4.56  #
    4.57 +# CONFIG_BLK_DEV_IO_TRACE is not set
    4.58  
    4.59  #
    4.60  # IO Schedulers
    4.61 @@ -86,8 +86,10 @@ CONFIG_64BIT=y
    4.62  CONFIG_MMU=y
    4.63  CONFIG_SWIOTLB=y
    4.64  CONFIG_RWSEM_XCHGADD_ALGORITHM=y
    4.65 +CONFIG_GENERIC_FIND_NEXT_BIT=y
    4.66  CONFIG_GENERIC_CALIBRATE_DELAY=y
    4.67  CONFIG_TIME_INTERPOLATION=y
    4.68 +CONFIG_DMI=y
    4.69  CONFIG_EFI=y
    4.70  CONFIG_GENERIC_IOMAP=y
    4.71  CONFIG_XEN=y
    4.72 @@ -96,6 +98,7 @@ CONFIG_XEN_IA64_EXPOSE_P2M=y
    4.73  CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR=y
    4.74  CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
    4.75  CONFIG_DMA_IS_DMA32=y
    4.76 +CONFIG_AUDIT_ARCH=y
    4.77  # CONFIG_IA64_GENERIC is not set
    4.78  CONFIG_IA64_DIG=y
    4.79  # CONFIG_IA64_HP_ZX1 is not set
    4.80 @@ -123,6 +126,7 @@ CONFIG_NR_CPUS=16
    4.81  CONFIG_HOTPLUG_CPU=y
    4.82  CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
    4.83  # CONFIG_SCHED_SMT is not set
    4.84 +# CONFIG_PERMIT_BSP_REMOVE is not set
    4.85  # CONFIG_PREEMPT is not set
    4.86  CONFIG_SELECT_MEMORY_MODEL=y
    4.87  CONFIG_FLATMEM_MANUAL=y
    4.88 @@ -132,6 +136,7 @@ CONFIG_FLATMEM=y
    4.89  CONFIG_FLAT_NODE_MEM_MAP=y
    4.90  # CONFIG_SPARSEMEM_STATIC is not set
    4.91  CONFIG_SPLIT_PTLOCK_CPUS=4
    4.92 +CONFIG_RESOURCES_64BIT=y
    4.93  CONFIG_ARCH_SELECT_MEMORY_MODEL=y
    4.94  CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
    4.95  CONFIG_ARCH_FLATMEM_ENABLE=y
    4.96 @@ -163,6 +168,7 @@ CONFIG_PM_LEGACY=y
    4.97  CONFIG_ACPI=y
    4.98  CONFIG_ACPI_BUTTON=y
    4.99  CONFIG_ACPI_FAN=y
   4.100 +# CONFIG_ACPI_DOCK is not set
   4.101  CONFIG_ACPI_PROCESSOR=y
   4.102  CONFIG_ACPI_HOTPLUG_CPU=y
   4.103  CONFIG_ACPI_THERMAL=y
   4.104 @@ -185,7 +191,7 @@ CONFIG_PCI=y
   4.105  CONFIG_PCI_DOMAINS=y
   4.106  CONFIG_XEN_PCIDEV_FRONTEND=y
   4.107  # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
   4.108 -CONFIG_PCI_LEGACY_PROC=y
   4.109 +# CONFIG_PCIEPORTBUS is not set
   4.110  # CONFIG_PCI_DEBUG is not set
   4.111  
   4.112  #
   4.113 @@ -215,6 +221,8 @@ CONFIG_NET=y
   4.114  CONFIG_PACKET=y
   4.115  # CONFIG_PACKET_MMAP is not set
   4.116  CONFIG_UNIX=y
   4.117 +CONFIG_XFRM=y
   4.118 +# CONFIG_XFRM_USER is not set
   4.119  # CONFIG_NET_KEY is not set
   4.120  CONFIG_INET=y
   4.121  CONFIG_IP_MULTICAST=y
   4.122 @@ -229,7 +237,10 @@ CONFIG_SYN_COOKIES=y
   4.123  # CONFIG_INET_AH is not set
   4.124  # CONFIG_INET_ESP is not set
   4.125  # CONFIG_INET_IPCOMP is not set
   4.126 +# CONFIG_INET_XFRM_TUNNEL is not set
   4.127  # CONFIG_INET_TUNNEL is not set
   4.128 +CONFIG_INET_XFRM_MODE_TRANSPORT=y
   4.129 +CONFIG_INET_XFRM_MODE_TUNNEL=y
   4.130  CONFIG_INET_DIAG=y
   4.131  CONFIG_INET_TCP_DIAG=y
   4.132  # CONFIG_TCP_CONG_ADVANCED is not set
   4.133 @@ -240,6 +251,9 @@ CONFIG_TCP_CONG_BIC=y
   4.134  #
   4.135  # CONFIG_IP_VS is not set
   4.136  # CONFIG_IPV6 is not set
   4.137 +# CONFIG_INET6_XFRM_TUNNEL is not set
   4.138 +# CONFIG_INET6_TUNNEL is not set
   4.139 +# CONFIG_NETWORK_SECMARK is not set
   4.140  CONFIG_NETFILTER=y
   4.141  # CONFIG_NETFILTER_DEBUG is not set
   4.142  CONFIG_BRIDGE_NETFILTER=y
   4.143 @@ -280,12 +294,12 @@ CONFIG_BRIDGE_NETFILTER=y
   4.144  CONFIG_BRIDGE=y
   4.145  # CONFIG_VLAN_8021Q is not set
   4.146  # CONFIG_DECNET is not set
   4.147 +CONFIG_LLC=y
   4.148  # CONFIG_LLC2 is not set
   4.149  # CONFIG_IPX is not set
   4.150  # CONFIG_ATALK is not set
   4.151  # CONFIG_X25 is not set
   4.152  # CONFIG_LAPB is not set
   4.153 -# CONFIG_NET_DIVERT is not set
   4.154  # CONFIG_ECONET is not set
   4.155  # CONFIG_WAN_ROUTER is not set
   4.156  
   4.157 @@ -314,6 +328,7 @@ CONFIG_STANDALONE=y
   4.158  CONFIG_PREVENT_FIRMWARE_BUILD=y
   4.159  CONFIG_FW_LOADER=y
   4.160  # CONFIG_DEBUG_DRIVER is not set
   4.161 +# CONFIG_SYS_HYPERVISOR is not set
   4.162  
   4.163  #
   4.164  # Connector - unified userspace <-> kernelspace linker
   4.165 @@ -352,6 +367,7 @@ CONFIG_BLK_DEV_NBD=m
   4.166  CONFIG_BLK_DEV_RAM=y
   4.167  CONFIG_BLK_DEV_RAM_COUNT=16
   4.168  CONFIG_BLK_DEV_RAM_SIZE=4096
   4.169 +CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
   4.170  CONFIG_BLK_DEV_INITRD=y
   4.171  # CONFIG_CDROM_PKTCDVD is not set
   4.172  # CONFIG_ATA_OVER_ETH is not set
   4.173 @@ -463,6 +479,7 @@ CONFIG_SCSI_SAS_ATTRS=y
   4.174  # CONFIG_MEGARAID_LEGACY is not set
   4.175  # CONFIG_MEGARAID_SAS is not set
   4.176  # CONFIG_SCSI_SATA is not set
   4.177 +# CONFIG_SCSI_HPTIOP is not set
   4.178  # CONFIG_SCSI_DMX3191D is not set
   4.179  # CONFIG_SCSI_FUTURE_DOMAIN is not set
   4.180  # CONFIG_SCSI_IPS is not set
   4.181 @@ -472,10 +489,8 @@ CONFIG_SCSI_SYM53C8XX_2=y
   4.182  CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
   4.183  CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
   4.184  CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
   4.185 -# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
   4.186 +CONFIG_SCSI_SYM53C8XX_MMIO=y
   4.187  # CONFIG_SCSI_IPR is not set
   4.188 -CONFIG_SCSI_QLOGIC_FC=y
   4.189 -# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
   4.190  CONFIG_SCSI_QLOGIC_1280=y
   4.191  # CONFIG_SCSI_QLA_FC is not set
   4.192  # CONFIG_SCSI_LPFC is not set
   4.193 @@ -613,6 +628,7 @@ CONFIG_TIGON3=y
   4.194  # CONFIG_CHELSIO_T1 is not set
   4.195  # CONFIG_IXGB is not set
   4.196  # CONFIG_S2IO is not set
   4.197 +# CONFIG_MYRI10GE is not set
   4.198  
   4.199  #
   4.200  # Token Ring devices
   4.201 @@ -715,6 +731,7 @@ CONFIG_GAMEPORT=y
   4.202  CONFIG_VT=y
   4.203  CONFIG_VT_CONSOLE=y
   4.204  CONFIG_HW_CONSOLE=y
   4.205 +# CONFIG_VT_HW_CONSOLE_BINDING is not set
   4.206  CONFIG_SERIAL_NONSTANDARD=y
   4.207  # CONFIG_COMPUTONE is not set
   4.208  # CONFIG_ROCKETPORT is not set
   4.209 @@ -728,6 +745,7 @@ CONFIG_SERIAL_NONSTANDARD=y
   4.210  # CONFIG_N_HDLC is not set
   4.211  # CONFIG_SPECIALIX is not set
   4.212  # CONFIG_SX is not set
   4.213 +# CONFIG_RIO is not set
   4.214  # CONFIG_STALDRV is not set
   4.215  
   4.216  #
   4.217 @@ -761,6 +779,8 @@ CONFIG_EFI_RTC=y
   4.218  # Ftape, the floppy tape device driver
   4.219  #
   4.220  CONFIG_AGP=y
   4.221 +# CONFIG_AGP_SIS is not set
   4.222 +# CONFIG_AGP_VIA is not set
   4.223  CONFIG_AGP_I460=y
   4.224  CONFIG_DRM=y
   4.225  # CONFIG_DRM_TDFX is not set
   4.226 @@ -805,10 +825,10 @@ CONFIG_I2C_ALGOPCF=y
   4.227  # CONFIG_I2C_I810 is not set
   4.228  # CONFIG_I2C_PIIX4 is not set
   4.229  # CONFIG_I2C_NFORCE2 is not set
   4.230 +# CONFIG_I2C_OCORES is not set
   4.231  # CONFIG_I2C_PARPORT_LIGHT is not set
   4.232  # CONFIG_I2C_PROSAVAGE is not set
   4.233  # CONFIG_I2C_SAVAGE4 is not set
   4.234 -# CONFIG_SCx200_ACB is not set
   4.235  # CONFIG_I2C_SIS5595 is not set
   4.236  # CONFIG_I2C_SIS630 is not set
   4.237  # CONFIG_I2C_SIS96X is not set
   4.238 @@ -827,9 +847,7 @@ CONFIG_I2C_ALGOPCF=y
   4.239  # CONFIG_SENSORS_PCF8574 is not set
   4.240  # CONFIG_SENSORS_PCA9539 is not set
   4.241  # CONFIG_SENSORS_PCF8591 is not set
   4.242 -# CONFIG_SENSORS_RTC8564 is not set
   4.243  # CONFIG_SENSORS_MAX6875 is not set
   4.244 -# CONFIG_RTC_X1205_I2C is not set
   4.245  # CONFIG_I2C_DEBUG_CORE is not set
   4.246  # CONFIG_I2C_DEBUG_ALGO is not set
   4.247  # CONFIG_I2C_DEBUG_BUS is not set
   4.248 @@ -844,13 +862,13 @@ CONFIG_I2C_ALGOPCF=y
   4.249  #
   4.250  # Dallas's 1-wire bus
   4.251  #
   4.252 -# CONFIG_W1 is not set
   4.253  
   4.254  #
   4.255  # Hardware Monitoring support
   4.256  #
   4.257  CONFIG_HWMON=y
   4.258  # CONFIG_HWMON_VID is not set
   4.259 +# CONFIG_SENSORS_ABITUGURU is not set
   4.260  # CONFIG_SENSORS_ADM1021 is not set
   4.261  # CONFIG_SENSORS_ADM1025 is not set
   4.262  # CONFIG_SENSORS_ADM1026 is not set
   4.263 @@ -879,10 +897,12 @@ CONFIG_HWMON=y
   4.264  # CONFIG_SENSORS_PC87360 is not set
   4.265  # CONFIG_SENSORS_SIS5595 is not set
   4.266  # CONFIG_SENSORS_SMSC47M1 is not set
   4.267 +# CONFIG_SENSORS_SMSC47M192 is not set
   4.268  # CONFIG_SENSORS_SMSC47B397 is not set
   4.269  # CONFIG_SENSORS_VIA686A is not set
   4.270  # CONFIG_SENSORS_VT8231 is not set
   4.271  # CONFIG_SENSORS_W83781D is not set
   4.272 +# CONFIG_SENSORS_W83791D is not set
   4.273  # CONFIG_SENSORS_W83792D is not set
   4.274  # CONFIG_SENSORS_W83L785TS is not set
   4.275  # CONFIG_SENSORS_W83627HF is not set
   4.276 @@ -894,24 +914,25 @@ CONFIG_HWMON=y
   4.277  #
   4.278  
   4.279  #
   4.280 -# Multimedia Capabilities Port drivers
   4.281 -#
   4.282 -
   4.283 -#
   4.284  # Multimedia devices
   4.285  #
   4.286  CONFIG_VIDEO_DEV=y
   4.287 +CONFIG_VIDEO_V4L1=y
   4.288 +CONFIG_VIDEO_V4L1_COMPAT=y
   4.289 +CONFIG_VIDEO_V4L2=y
   4.290  
   4.291  #
   4.292 -# Video For Linux
   4.293 +# Video Capture Adapters
   4.294  #
   4.295  
   4.296  #
   4.297 -# Video Adapters
   4.298 +# Video Capture Adapters
   4.299  #
   4.300  # CONFIG_VIDEO_ADV_DEBUG is not set
   4.301 +# CONFIG_VIDEO_VIVI is not set
   4.302  # CONFIG_VIDEO_BT848 is not set
   4.303  # CONFIG_VIDEO_CPIA is not set
   4.304 +# CONFIG_VIDEO_CPIA2 is not set
   4.305  # CONFIG_VIDEO_SAA5246A is not set
   4.306  # CONFIG_VIDEO_SAA5249 is not set
   4.307  # CONFIG_TUNER_3036 is not set
   4.308 @@ -923,10 +944,40 @@ CONFIG_VIDEO_DEV=y
   4.309  # CONFIG_VIDEO_HEXIUM_ORION is not set
   4.310  # CONFIG_VIDEO_HEXIUM_GEMINI is not set
   4.311  # CONFIG_VIDEO_CX88 is not set
   4.312 +
   4.313 +#
   4.314 +# Encoders and Decoders
   4.315 +#
   4.316 +# CONFIG_VIDEO_MSP3400 is not set
   4.317 +# CONFIG_VIDEO_CS53L32A is not set
   4.318 +# CONFIG_VIDEO_TLV320AIC23B is not set
   4.319 +# CONFIG_VIDEO_WM8775 is not set
   4.320 +# CONFIG_VIDEO_WM8739 is not set
   4.321 +# CONFIG_VIDEO_CX2341X is not set
   4.322 +# CONFIG_VIDEO_CX25840 is not set
   4.323 +# CONFIG_VIDEO_SAA711X is not set
   4.324 +# CONFIG_VIDEO_SAA7127 is not set
   4.325 +# CONFIG_VIDEO_UPD64031A is not set
   4.326 +# CONFIG_VIDEO_UPD64083 is not set
   4.327 +
   4.328 +#
   4.329 +# V4L USB devices
   4.330 +#
   4.331 +# CONFIG_VIDEO_PVRUSB2 is not set
   4.332  # CONFIG_VIDEO_EM28XX is not set
   4.333 +# CONFIG_USB_VICAM is not set
   4.334 +# CONFIG_USB_IBMCAM is not set
   4.335 +# CONFIG_USB_KONICAWC is not set
   4.336 +# CONFIG_USB_QUICKCAM_MESSENGER is not set
   4.337 +# CONFIG_USB_ET61X251 is not set
   4.338  # CONFIG_VIDEO_OVCAMCHIP is not set
   4.339 -# CONFIG_VIDEO_AUDIO_DECODER is not set
   4.340 -# CONFIG_VIDEO_DECODER is not set
   4.341 +# CONFIG_USB_W9968CF is not set
   4.342 +# CONFIG_USB_OV511 is not set
   4.343 +# CONFIG_USB_SE401 is not set
   4.344 +# CONFIG_USB_SN9C102 is not set
   4.345 +# CONFIG_USB_STV680 is not set
   4.346 +# CONFIG_USB_ZC0301 is not set
   4.347 +# CONFIG_USB_PWC is not set
   4.348  
   4.349  #
   4.350  # Radio Adapters
   4.351 @@ -934,20 +985,24 @@ CONFIG_VIDEO_DEV=y
   4.352  # CONFIG_RADIO_GEMTEK_PCI is not set
   4.353  # CONFIG_RADIO_MAXIRADIO is not set
   4.354  # CONFIG_RADIO_MAESTRO is not set
   4.355 +# CONFIG_USB_DSBR is not set
   4.356  
   4.357  #
   4.358  # Digital Video Broadcasting Devices
   4.359  #
   4.360  # CONFIG_DVB is not set
   4.361 +# CONFIG_USB_DABUSB is not set
   4.362  
   4.363  #
   4.364  # Graphics support
   4.365  #
   4.366 +CONFIG_FIRMWARE_EDID=y
   4.367  CONFIG_FB=y
   4.368  CONFIG_FB_CFB_FILLRECT=y
   4.369  CONFIG_FB_CFB_COPYAREA=y
   4.370  CONFIG_FB_CFB_IMAGEBLIT=y
   4.371  # CONFIG_FB_MACMODES is not set
   4.372 +# CONFIG_FB_BACKLIGHT is not set
   4.373  CONFIG_FB_MODE_HELPERS=y
   4.374  # CONFIG_FB_TILEBLITTING is not set
   4.375  # CONFIG_FB_CIRRUS is not set
   4.376 @@ -959,7 +1014,6 @@ CONFIG_FB_MODE_HELPERS=y
   4.377  # CONFIG_FB_NVIDIA is not set
   4.378  # CONFIG_FB_RIVA is not set
   4.379  # CONFIG_FB_MATROX is not set
   4.380 -# CONFIG_FB_RADEON_OLD is not set
   4.381  CONFIG_FB_RADEON=y
   4.382  CONFIG_FB_RADEON_I2C=y
   4.383  CONFIG_FB_RADEON_DEBUG=y
   4.384 @@ -978,6 +1032,7 @@ CONFIG_FB_RADEON_DEBUG=y
   4.385  # Console display driver support
   4.386  #
   4.387  CONFIG_VGA_CONSOLE=y
   4.388 +# CONFIG_VGACON_SOFT_SCROLLBACK is not set
   4.389  CONFIG_DUMMY_CONSOLE=y
   4.390  # CONFIG_FRAMEBUFFER_CONSOLE is not set
   4.391  
   4.392 @@ -1008,9 +1063,11 @@ CONFIG_SND_SEQ_DUMMY=y
   4.393  CONFIG_SND_OSSEMUL=y
   4.394  CONFIG_SND_MIXER_OSS=y
   4.395  CONFIG_SND_PCM_OSS=y
   4.396 +CONFIG_SND_PCM_OSS_PLUGINS=y
   4.397  CONFIG_SND_SEQUENCER_OSS=y
   4.398  # CONFIG_SND_DYNAMIC_MINORS is not set
   4.399  CONFIG_SND_SUPPORT_OLD_API=y
   4.400 +CONFIG_SND_VERBOSE_PROCFS=y
   4.401  # CONFIG_SND_VERBOSE_PRINTK is not set
   4.402  # CONFIG_SND_DEBUG is not set
   4.403  
   4.404 @@ -1031,6 +1088,7 @@ CONFIG_SND_MPU401=y
   4.405  # PCI devices
   4.406  #
   4.407  # CONFIG_SND_AD1889 is not set
   4.408 +# CONFIG_SND_ALS300 is not set
   4.409  # CONFIG_SND_ALI5451 is not set
   4.410  CONFIG_SND_ATIIXP=y
   4.411  # CONFIG_SND_ATIIXP_MODEM is not set
   4.412 @@ -1043,6 +1101,18 @@ CONFIG_SND_ATIIXP=y
   4.413  # CONFIG_SND_CMIPCI is not set
   4.414  # CONFIG_SND_CS4281 is not set
   4.415  # CONFIG_SND_CS46XX is not set
   4.416 +# CONFIG_SND_DARLA20 is not set
   4.417 +# CONFIG_SND_GINA20 is not set
   4.418 +# CONFIG_SND_LAYLA20 is not set
   4.419 +# CONFIG_SND_DARLA24 is not set
   4.420 +# CONFIG_SND_GINA24 is not set
   4.421 +# CONFIG_SND_LAYLA24 is not set
   4.422 +# CONFIG_SND_MONA is not set
   4.423 +# CONFIG_SND_MIA is not set
   4.424 +# CONFIG_SND_ECHO3G is not set
   4.425 +# CONFIG_SND_INDIGO is not set
   4.426 +# CONFIG_SND_INDIGOIO is not set
   4.427 +# CONFIG_SND_INDIGODJ is not set
   4.428  # CONFIG_SND_EMU10K1 is not set
   4.429  # CONFIG_SND_EMU10K1X is not set
   4.430  # CONFIG_SND_ENS1370 is not set
   4.431 @@ -1063,6 +1133,7 @@ CONFIG_SND_FM801=y
   4.432  # CONFIG_SND_MIXART is not set
   4.433  # CONFIG_SND_NM256 is not set
   4.434  # CONFIG_SND_PCXHR is not set
   4.435 +# CONFIG_SND_RIPTIDE is not set
   4.436  # CONFIG_SND_RME32 is not set
   4.437  # CONFIG_SND_RME96 is not set
   4.438  # CONFIG_SND_RME9652 is not set
   4.439 @@ -1082,12 +1153,14 @@ CONFIG_SND_FM801=y
   4.440  # Open Sound System
   4.441  #
   4.442  CONFIG_SOUND_PRIME=y
   4.443 -# CONFIG_OBSOLETE_OSS_DRIVER is not set
   4.444 -# CONFIG_SOUND_FUSION is not set
   4.445 +# CONFIG_OSS_OBSOLETE_DRIVER is not set
   4.446 +# CONFIG_SOUND_BT878 is not set
   4.447 +# CONFIG_SOUND_ES1371 is not set
   4.448  # CONFIG_SOUND_ICH is not set
   4.449  # CONFIG_SOUND_TRIDENT is not set
   4.450  # CONFIG_SOUND_MSNDCLAS is not set
   4.451  # CONFIG_SOUND_MSNDPIN is not set
   4.452 +# CONFIG_SOUND_VIA82CXXX is not set
   4.453  # CONFIG_SOUND_TVMIXER is not set
   4.454  
   4.455  #
   4.456 @@ -1095,6 +1168,7 @@ CONFIG_SOUND_PRIME=y
   4.457  #
   4.458  CONFIG_USB_ARCH_HAS_HCD=y
   4.459  CONFIG_USB_ARCH_HAS_OHCI=y
   4.460 +CONFIG_USB_ARCH_HAS_EHCI=y
   4.461  CONFIG_USB=y
   4.462  # CONFIG_USB_DEBUG is not set
   4.463  
   4.464 @@ -1113,6 +1187,7 @@ CONFIG_USB_BANDWIDTH=y
   4.465  CONFIG_USB_EHCI_HCD=y
   4.466  # CONFIG_USB_EHCI_SPLIT_ISO is not set
   4.467  # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
   4.468 +# CONFIG_USB_EHCI_TT_NEWSCHED is not set
   4.469  # CONFIG_USB_ISP116X_HCD is not set
   4.470  CONFIG_USB_OHCI_HCD=y
   4.471  # CONFIG_USB_OHCI_BIG_ENDIAN is not set
   4.472 @@ -1123,7 +1198,6 @@ CONFIG_USB_UHCI_HCD=y
   4.473  #
   4.474  # USB Device Class drivers
   4.475  #
   4.476 -# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
   4.477  # CONFIG_USB_ACM is not set
   4.478  # CONFIG_USB_PRINTER is not set
   4.479  
   4.480 @@ -1160,9 +1234,7 @@ CONFIG_USB_HIDDEV=y
   4.481  # CONFIG_USB_ACECAD is not set
   4.482  # CONFIG_USB_KBTAB is not set
   4.483  # CONFIG_USB_POWERMATE is not set
   4.484 -# CONFIG_USB_MTOUCH is not set
   4.485 -# CONFIG_USB_ITMTOUCH is not set
   4.486 -# CONFIG_USB_EGALAX is not set
   4.487 +# CONFIG_USB_TOUCHSCREEN is not set
   4.488  # CONFIG_USB_YEALINK is not set
   4.489  # CONFIG_USB_XPAD is not set
   4.490  # CONFIG_USB_ATI_REMOTE is not set
   4.491 @@ -1177,21 +1249,6 @@ CONFIG_USB_HIDDEV=y
   4.492  # CONFIG_USB_MICROTEK is not set
   4.493  
   4.494  #
   4.495 -# USB Multimedia devices
   4.496 -#
   4.497 -# CONFIG_USB_DABUSB is not set
   4.498 -# CONFIG_USB_VICAM is not set
   4.499 -# CONFIG_USB_DSBR is not set
   4.500 -# CONFIG_USB_ET61X251 is not set
   4.501 -# CONFIG_USB_IBMCAM is not set
   4.502 -# CONFIG_USB_KONICAWC is not set
   4.503 -# CONFIG_USB_OV511 is not set
   4.504 -# CONFIG_USB_SE401 is not set
   4.505 -# CONFIG_USB_SN9C102 is not set
   4.506 -# CONFIG_USB_STV680 is not set
   4.507 -# CONFIG_USB_PWC is not set
   4.508 -
   4.509 -#
   4.510  # USB Network Adapters
   4.511  #
   4.512  # CONFIG_USB_CATC is not set
   4.513 @@ -1220,10 +1277,12 @@ CONFIG_USB_MON=y
   4.514  # CONFIG_USB_LEGOTOWER is not set
   4.515  # CONFIG_USB_LCD is not set
   4.516  # CONFIG_USB_LED is not set
   4.517 +# CONFIG_USB_CYPRESS_CY7C63 is not set
   4.518  # CONFIG_USB_CYTHERM is not set
   4.519  # CONFIG_USB_PHIDGETKIT is not set
   4.520  # CONFIG_USB_PHIDGETSERVO is not set
   4.521  # CONFIG_USB_IDMOUSE is not set
   4.522 +# CONFIG_USB_APPLEDISPLAY is not set
   4.523  # CONFIG_USB_SISUSBVGA is not set
   4.524  # CONFIG_USB_LD is not set
   4.525  # CONFIG_USB_TEST is not set
   4.526 @@ -1243,6 +1302,19 @@ CONFIG_USB_MON=y
   4.527  # CONFIG_MMC is not set
   4.528  
   4.529  #
   4.530 +# LED devices
   4.531 +#
   4.532 +# CONFIG_NEW_LEDS is not set
   4.533 +
   4.534 +#
   4.535 +# LED drivers
   4.536 +#
   4.537 +
   4.538 +#
   4.539 +# LED Triggers
   4.540 +#
   4.541 +
   4.542 +#
   4.543  # InfiniBand support
   4.544  #
   4.545  # CONFIG_INFINIBAND is not set
   4.546 @@ -1252,6 +1324,24 @@ CONFIG_USB_MON=y
   4.547  #
   4.548  
   4.549  #
   4.550 +# Real Time Clock
   4.551 +#
   4.552 +# CONFIG_RTC_CLASS is not set
   4.553 +
   4.554 +#
   4.555 +# DMA Engine support
   4.556 +#
   4.557 +# CONFIG_DMA_ENGINE is not set
   4.558 +
   4.559 +#
   4.560 +# DMA Clients
   4.561 +#
   4.562 +
   4.563 +#
   4.564 +# DMA Devices
   4.565 +#
   4.566 +
   4.567 +#
   4.568  # File systems
   4.569  #
   4.570  CONFIG_EXT2_FS=y
   4.571 @@ -1275,7 +1365,6 @@ CONFIG_REISERFS_FS_SECURITY=y
   4.572  # CONFIG_JFS_FS is not set
   4.573  CONFIG_FS_POSIX_ACL=y
   4.574  CONFIG_XFS_FS=y
   4.575 -CONFIG_XFS_EXPORT=y
   4.576  # CONFIG_XFS_QUOTA is not set
   4.577  # CONFIG_XFS_SECURITY is not set
   4.578  # CONFIG_XFS_POSIX_ACL is not set
   4.579 @@ -1284,6 +1373,7 @@ CONFIG_XFS_EXPORT=y
   4.580  # CONFIG_MINIX_FS is not set
   4.581  # CONFIG_ROMFS_FS is not set
   4.582  CONFIG_INOTIFY=y
   4.583 +CONFIG_INOTIFY_USER=y
   4.584  # CONFIG_QUOTA is not set
   4.585  CONFIG_DNOTIFY=y
   4.586  CONFIG_AUTOFS_FS=y
   4.587 @@ -1318,7 +1408,6 @@ CONFIG_SYSFS=y
   4.588  CONFIG_TMPFS=y
   4.589  # CONFIG_HUGETLB_PAGE is not set
   4.590  CONFIG_RAMFS=y
   4.591 -# CONFIG_RELAYFS_FS is not set
   4.592  # CONFIG_CONFIGFS_FS is not set
   4.593  
   4.594  #
   4.595 @@ -1364,7 +1453,9 @@ CONFIG_SMB_NLS_DEFAULT=y
   4.596  CONFIG_SMB_NLS_REMOTE="cp437"
   4.597  CONFIG_CIFS=y
   4.598  # CONFIG_CIFS_STATS is not set
   4.599 +# CONFIG_CIFS_WEAK_PW_HASH is not set
   4.600  # CONFIG_CIFS_XATTR is not set
   4.601 +# CONFIG_CIFS_DEBUG2 is not set
   4.602  # CONFIG_CIFS_EXPERIMENTAL is not set
   4.603  # CONFIG_NCP_FS is not set
   4.604  # CONFIG_CODA_FS is not set
   4.605 @@ -1443,9 +1534,11 @@ CONFIG_NLS_UTF8=y
   4.606  # CONFIG_CRC16 is not set
   4.607  CONFIG_CRC32=y
   4.608  # CONFIG_LIBCRC32C is not set
   4.609 +CONFIG_PLIST=y
   4.610  CONFIG_GENERIC_HARDIRQS=y
   4.611  CONFIG_GENERIC_IRQ_PROBE=y
   4.612  CONFIG_GENERIC_PENDING_IRQ=y
   4.613 +CONFIG_IRQ_PER_CPU=y
   4.614  
   4.615  #
   4.616  # Instrumentation Support
   4.617 @@ -1458,14 +1551,19 @@ CONFIG_GENERIC_PENDING_IRQ=y
   4.618  #
   4.619  # CONFIG_PRINTK_TIME is not set
   4.620  CONFIG_MAGIC_SYSRQ=y
   4.621 +CONFIG_UNUSED_SYMBOLS=y
   4.622  CONFIG_DEBUG_KERNEL=y
   4.623  CONFIG_LOG_BUF_SHIFT=20
   4.624  CONFIG_DETECT_SOFTLOCKUP=y
   4.625  # CONFIG_SCHEDSTATS is not set
   4.626  # CONFIG_DEBUG_SLAB is not set
   4.627 -CONFIG_DEBUG_MUTEXES=y
   4.628 +# CONFIG_DEBUG_RT_MUTEXES is not set
   4.629 +# CONFIG_RT_MUTEX_TESTER is not set
   4.630  # CONFIG_DEBUG_SPINLOCK is not set
   4.631 +CONFIG_DEBUG_MUTEXES=y
   4.632 +# CONFIG_DEBUG_RWSEMS is not set
   4.633  # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
   4.634 +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
   4.635  # CONFIG_DEBUG_KOBJECT is not set
   4.636  # CONFIG_DEBUG_INFO is not set
   4.637  # CONFIG_DEBUG_FS is not set
   4.638 @@ -1519,7 +1617,6 @@ CONFIG_CRYPTO_DES=y
   4.639  #
   4.640  # CONFIG_XEN_UTIL is not set
   4.641  CONFIG_XEN_BALLOON=y
   4.642 -# CONFIG_XEN_DEVMEM is not set
   4.643  CONFIG_XEN_REBOOT=y
   4.644  # CONFIG_XEN_SMPBOOT is not set
   4.645  CONFIG_XEN_INTERFACE_VERSION=0x00030203
   4.646 @@ -1555,3 +1652,4 @@ CONFIG_XEN_COMPAT_030002_AND_LATER=y
   4.647  CONFIG_XEN_COMPAT_030002=y
   4.648  CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
   4.649  CONFIG_NO_IDLE_HZ=y
   4.650 +CONFIG_XEN_DEVMEM=y
     5.1 --- a/extras/mini-os/gnttab.c	Fri Feb 09 14:43:22 2007 -0600
     5.2 +++ b/extras/mini-os/gnttab.c	Wed Feb 14 19:01:35 2007 +0000
     5.3 @@ -21,7 +21,12 @@
     5.4  
     5.5  #define NR_RESERVED_ENTRIES 8
     5.6  
     5.7 +/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
     5.8 +#ifdef __ia64__
     5.9 +#define NR_GRANT_FRAMES 1
    5.10 +#else
    5.11  #define NR_GRANT_FRAMES 4
    5.12 +#endif
    5.13  #define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
    5.14  
    5.15  static grant_entry_t *gnttab_table;
     6.1 --- a/extras/mini-os/netfront.c	Fri Feb 09 14:43:22 2007 -0600
     6.2 +++ b/extras/mini-os/netfront.c	Wed Feb 14 19:01:35 2007 +0000
     6.3 @@ -349,7 +349,9 @@ done:
     6.4      init_rx_buffers();
     6.5  
     6.6      unsigned char rawmac[6];
     6.7 -    sscanf(mac,"%x:%x:%x:%x:%x:%x",
     6.8 +        /* Special conversion specifier 'hh' needed for __ia64__. Without
     6.9 +           this mini-os panics with 'Unaligned reference'. */
    6.10 +    sscanf(mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
    6.11              &rawmac[0],
    6.12              &rawmac[1],
    6.13              &rawmac[2],
     7.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S	Fri Feb 09 14:43:22 2007 -0600
     7.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S	Wed Feb 14 19:01:35 2007 +0000
     7.3 @@ -747,7 +747,7 @@ ENTRY(hypervisor_callback)
     7.4  	jb   11f
     7.5  	cmpl $sysexit_ecrit,%eax
     7.6  	ja   11f
     7.7 -	addl $0x34,%esp			# Remove cs...ebx from stack frame.
     7.8 +	addl $OLDESP,%esp		# Remove eflags...ebx from stack frame.
     7.9  11:	push %esp
    7.10  	call evtchn_do_upcall
    7.11  	add  $4,%esp
    7.12 @@ -777,18 +777,13 @@ ecrit:  /**** END OF CRITICAL REGION ***
    7.13  # provides the number of bytes which have already been popped from the
    7.14  # interrupted stack frame.
    7.15  critical_region_fixup:
    7.16 -	addl $critical_fixup_table-scrit,%eax
    7.17 -	movzbl (%eax),%eax		# %eax contains num bytes popped
    7.18 -	cmpb $0xff,%al                  # 0xff => vcpu_info critical region
    7.19 +	movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
    7.20 +	cmpb $0xff,%cl                  # 0xff => vcpu_info critical region
    7.21  	jne  15f
    7.22 -	GET_THREAD_INFO(%ebp)
    7.23 -        xorl %eax,%eax
    7.24 -15:	mov  %esp,%esi
    7.25 -	add  %eax,%esi			# %esi points at end of src region
    7.26 -	mov  %esp,%edi
    7.27 -	add  $0x34,%edi			# %edi points at end of dst region
    7.28 -	mov  %eax,%ecx
    7.29 -	shr  $2,%ecx			# convert words to bytes
    7.30 +	xorl %ecx,%ecx
    7.31 +15:	leal (%esp,%ecx),%esi		# %esi points at end of src region
    7.32 +	leal OLDESP(%esp),%edi		# %edi points at end of dst region
    7.33 +	shrl $2,%ecx			# convert words to bytes
    7.34  	je   17f			# skip loop if nothing to copy
    7.35  16:	subl $4,%esi			# pre-decrementing copy loop
    7.36  	subl $4,%edi
    7.37 @@ -798,6 +793,7 @@ 16:	subl $4,%esi			# pre-decrementing co
    7.38  17:	movl %edi,%esp			# final %edi is top of merged stack
    7.39  	jmp  11b
    7.40  
    7.41 +.section .rodata,"a"
    7.42  critical_fixup_table:
    7.43  	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = __TEST_PENDING
    7.44  	.byte 0xff,0xff			# jnz  14f
    7.45 @@ -814,6 +810,7 @@ critical_fixup_table:
    7.46  	.byte 0x28			# iret
    7.47  	.byte 0xff,0xff,0xff,0xff	# movb $1,1(%esi)
    7.48  	.byte 0x00,0x00			# jmp  11b
    7.49 +.previous
    7.50  
    7.51  # Hypervisor uses this for application faults while it executes.
    7.52  # We get here for two reasons:
    7.53 @@ -1194,6 +1191,7 @@ ENTRY(fixup_4gb_segment)
    7.54  	jmp error_code
    7.55  
    7.56  .section .rodata,"a"
    7.57 +.align 4
    7.58  #include "syscall_table.S"
    7.59  
    7.60  syscall_table_size=(.-sys_call_table)
     8.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c	Fri Feb 09 14:43:22 2007 -0600
     8.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c	Wed Feb 14 19:01:35 2007 +0000
     8.3 @@ -319,81 +319,85 @@ static struct irqaction resched_irqactio
     8.4   * required.
     8.5   */
     8.6  static void
     8.7 -xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
     8.8 +xen_register_percpu_irq (unsigned int vec, struct irqaction *action, int save)
     8.9  {
    8.10  	unsigned int cpu = smp_processor_id();
    8.11 -	int ret = 0;
    8.12 +	irq_desc_t *desc;
    8.13 +	int irq = 0;
    8.14  
    8.15  	if (xen_slab_ready) {
    8.16 -		switch (irq) {
    8.17 +		switch (vec) {
    8.18  		case IA64_TIMER_VECTOR:
    8.19  			sprintf(timer_name[cpu], "%s%d", action->name, cpu);
    8.20 -			ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
    8.21 +			irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
    8.22  				action->handler, action->flags,
    8.23  				timer_name[cpu], action->dev_id);
    8.24 -			per_cpu(timer_irq,cpu) = ret;
    8.25 -			printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret);
    8.26 +			per_cpu(timer_irq,cpu) = irq;
    8.27  			break;
    8.28  		case IA64_IPI_RESCHEDULE:
    8.29  			sprintf(resched_name[cpu], "%s%d", action->name, cpu);
    8.30 -			ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
    8.31 +			irq = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
    8.32  				action->handler, action->flags,
    8.33  				resched_name[cpu], action->dev_id);
    8.34 -			per_cpu(resched_irq,cpu) = ret;
    8.35 -			printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret);
    8.36 +			per_cpu(resched_irq,cpu) = irq;
    8.37  			break;
    8.38  		case IA64_IPI_VECTOR:
    8.39  			sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
    8.40 -			ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
    8.41 +			irq = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
    8.42  				action->handler, action->flags,
    8.43  				ipi_name[cpu], action->dev_id);
    8.44 -			per_cpu(ipi_irq,cpu) = ret;
    8.45 -			printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret);
    8.46 -			break;
    8.47 -		case IA64_SPURIOUS_INT_VECTOR:
    8.48 +			per_cpu(ipi_irq,cpu) = irq;
    8.49  			break;
    8.50  		case IA64_CMC_VECTOR:
    8.51  			sprintf(cmc_name[cpu], "%s%d", action->name, cpu);
    8.52 -			ret = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
    8.53 +			irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
    8.54  			                              action->handler,
    8.55  			                              action->flags,
    8.56  			                              cmc_name[cpu],
    8.57  			                              action->dev_id);
    8.58 -			per_cpu(cmc_irq,cpu) = ret;
    8.59 -			printk(KERN_INFO "register VIRQ_MCA_CMC (%s) to xen "
    8.60 -			       "irq (%d)\n", cmc_name[cpu], ret);
    8.61 +			per_cpu(cmc_irq,cpu) = irq;
    8.62  			break;
    8.63  		case IA64_CMCP_VECTOR:
    8.64  			sprintf(cmcp_name[cpu], "%s%d", action->name, cpu);
    8.65 -			ret = bind_ipi_to_irqhandler(CMCP_VECTOR, cpu,
    8.66 +			irq = bind_ipi_to_irqhandler(CMCP_VECTOR, cpu,
    8.67  			                             action->handler,
    8.68  			                             action->flags,
    8.69  			                             cmcp_name[cpu],
    8.70  			                             action->dev_id);
    8.71 -			per_cpu(cmcp_irq,cpu) = ret;
    8.72 -			printk(KERN_INFO "register CMCP_VECTOR (%s) to xen "
    8.73 -			       "irq (%d)\n", cmcp_name[cpu], ret);
    8.74 +			per_cpu(cmcp_irq,cpu) = irq;
    8.75  			break;
    8.76  		case IA64_CPEP_VECTOR:
    8.77  			sprintf(cpep_name[cpu], "%s%d", action->name, cpu);
    8.78 -			ret = bind_ipi_to_irqhandler(CPEP_VECTOR, cpu,
    8.79 +			irq = bind_ipi_to_irqhandler(CPEP_VECTOR, cpu,
    8.80  			                             action->handler,
    8.81  			                             action->flags,
    8.82  			                             cpep_name[cpu],
    8.83  			                             action->dev_id);
    8.84 -			per_cpu(cpep_irq,cpu) = ret;
    8.85 -			printk(KERN_INFO "register CPEP_VECTOR (%s) to xen "
    8.86 -			       "irq (%d)\n", cpep_name[cpu], ret);
    8.87 +			per_cpu(cpep_irq,cpu) = irq;
    8.88  			break;
    8.89  		case IA64_CPE_VECTOR:
    8.90 -			printk(KERN_WARNING "register IA64_CPE_VECTOR "
    8.91 -			       "IGNORED\n");
    8.92 +		case IA64_MCA_RENDEZ_VECTOR:
    8.93 +		case IA64_PERFMON_VECTOR:
    8.94 +		case IA64_MCA_WAKEUP_VECTOR:
    8.95 +		case IA64_SPURIOUS_INT_VECTOR:
    8.96 +			/* No need to complain, these aren't supported. */
    8.97  			break;
    8.98  		default:
    8.99 -			printk(KERN_WARNING "Percpu irq %d is unsupported by xen!\n", irq);
   8.100 +			printk(KERN_WARNING "Percpu irq %d is unsupported "
   8.101 +			       "by xen!\n", vec);
   8.102  			break;
   8.103  		}
   8.104 -		BUG_ON(ret < 0);
   8.105 +		BUG_ON(irq < 0);
   8.106 +
   8.107 +		if (irq > 0) {
   8.108 +			/*
   8.109 +			 * Mark percpu.  Without this, migrate_irqs() will
   8.110 +			 * mark the interrupt for migrations and trigger it
   8.111 +			 * on cpu hotplug.
   8.112 +			 */
   8.113 +			desc = irq_desc + irq;
   8.114 +			desc->status |= IRQ_PER_CPU;
   8.115 +		}
   8.116  	} 
   8.117  
   8.118  	/* For BSP, we cache registered percpu irqs, and then re-walk
   8.119 @@ -401,7 +405,7 @@ xen_register_percpu_irq (unsigned int ir
   8.120  	 */
   8.121  	if (!cpu && save) {
   8.122  		BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
   8.123 -		saved_percpu_irqs[saved_irq_cnt].irq = irq;
   8.124 +		saved_percpu_irqs[saved_irq_cnt].irq = vec;
   8.125  		saved_percpu_irqs[saved_irq_cnt].action = action;
   8.126  		saved_irq_cnt++;
   8.127  		if (!xen_slab_ready)
   8.128 @@ -588,7 +592,8 @@ ia64_send_ipi (int cpu, int vector, int 
   8.129  			irq = per_cpu(ipi_to_irq, cpu)[CPEP_VECTOR];
   8.130  			break;
   8.131  		default:
   8.132 -			printk(KERN_WARNING"Unsupported IPI type 0x%x\n", vector);
   8.133 +			printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
   8.134 +			       vector);
   8.135  			irq = 0;
   8.136  			break;
   8.137  		}		
     9.1 --- a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c	Fri Feb 09 14:43:22 2007 -0600
     9.2 +++ b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c	Wed Feb 14 19:01:35 2007 +0000
     9.3 @@ -63,6 +63,7 @@
     9.4  #ifdef CONFIG_XEN
     9.5  #include <asm/hypervisor.h>
     9.6  #include <asm/xen/xencomm.h>
     9.7 +#include <xen/xencons.h>
     9.8  #endif
     9.9  #include <linux/dma-mapping.h>
    9.10  
    9.11 @@ -95,6 +96,12 @@ xen_panic_event(struct notifier_block *t
    9.12  static struct notifier_block xen_panic_block = {
    9.13  	xen_panic_event, NULL, 0 /* try to go last */
    9.14  };
    9.15 +
    9.16 +void xen_pm_power_off(void)
    9.17 +{
    9.18 +	local_irq_disable();
    9.19 +	HYPERVISOR_shutdown(SHUTDOWN_poweroff);
    9.20 +}
    9.21  #endif
    9.22  
    9.23  extern void ia64_setup_printk_clock(void);
    9.24 @@ -454,7 +461,9 @@ setup_arch (char **cmdline_p)
    9.25  
    9.26  		setup_xen_features();
    9.27  		/* Register a call for panic conditions. */
    9.28 -		notifier_chain_register(&panic_notifier_list, &xen_panic_block);
    9.29 +		atomic_notifier_chain_register(&panic_notifier_list,
    9.30 +		                               &xen_panic_block);
    9.31 +		pm_power_off = xen_pm_power_off;
    9.32  	}
    9.33  #endif
    9.34  
    10.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c	Fri Feb 09 14:43:22 2007 -0600
    10.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c	Wed Feb 14 19:01:35 2007 +0000
    10.3 @@ -417,12 +417,6 @@ HYPERVISOR_grant_table_op(unsigned int c
    10.4  EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
    10.5  
    10.6  ///////////////////////////////////////////////////////////////////////////
    10.7 -// PageForeign(), SetPageForeign(), ClearPageForeign()
    10.8 -
    10.9 -struct address_space xen_ia64_foreign_dummy_mapping;
   10.10 -EXPORT_SYMBOL(xen_ia64_foreign_dummy_mapping);
   10.11 -
   10.12 -///////////////////////////////////////////////////////////////////////////
   10.13  // foreign mapping
   10.14  #include <linux/efi.h>
   10.15  #include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
    11.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S	Fri Feb 09 14:43:22 2007 -0600
    11.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S	Wed Feb 14 19:01:35 2007 +0000
    11.3 @@ -124,6 +124,8 @@ GLOBAL_ENTRY(ia64_switch_to)
    11.4  	;;
    11.5  	itr.d dtr[r25]=r23		// wire in new mapping...
    11.6  #ifdef CONFIG_XEN
    11.7 +	;;
    11.8 +	srlz.d
    11.9  	mov r9=1
   11.10  	movl r8=XSI_PSR_IC
   11.11  	;;
   11.12 @@ -875,9 +877,6 @@ skip_rbs_switch:
   11.13  	st8 [r2]=r8
   11.14  	st8 [r3]=r10
   11.15  .work_pending:
   11.16 -	tbit.nz p6,p0=r31,TIF_SIGDELAYED		// signal delayed from  MCA/INIT/NMI/PMI context?
   11.17 -(p6)	br.cond.sptk.few .sigdelayed
   11.18 -	;;
   11.19  	tbit.z p6,p0=r31,TIF_NEED_RESCHED		// current_thread_info()->need_resched==0?
   11.20  (p6)	br.cond.sptk.few .notify
   11.21  #ifdef CONFIG_PREEMPT
   11.22 @@ -913,17 +912,6 @@ skip_rbs_switch:
   11.23  (pLvSys)br.cond.sptk.few  .work_pending_syscall_end
   11.24  	br.cond.sptk.many .work_processed_kernel	// don't re-check
   11.25  
   11.26 -// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
   11.27 -// it could not be delivered.  Deliver it now.  The signal might be for us and
   11.28 -// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
   11.29 -// signal.
   11.30 -
   11.31 -.sigdelayed:
   11.32 -	br.call.sptk.many rp=do_sigdelayed
   11.33 -	cmp.eq p6,p0=r0,r0				// p6 <- 1, always re-check
   11.34 -(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
   11.35 -	br.cond.sptk.many .work_processed_kernel	// re-check
   11.36 -
   11.37  .work_pending_syscall_end:
   11.38  	adds r2=PT(R8)+16,r12
   11.39  	adds r3=PT(R10)+16,r12
    12.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Fri Feb 09 14:43:22 2007 -0600
    12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c	Wed Feb 14 19:01:35 2007 +0000
    12.3 @@ -344,7 +344,7 @@ static void frontend_changed(struct xenb
    12.4  	switch (frontend_state) {
    12.5  	case XenbusStateInitialising:
    12.6  		if (dev->state == XenbusStateClosed) {
    12.7 -			printk("%s: %s: prepare for reconnect\n",
    12.8 +			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
    12.9  			       __FUNCTION__, dev->nodename);
   12.10  			xenbus_switch_state(dev, XenbusStateInitWait);
   12.11  		}
   12.12 @@ -488,7 +488,8 @@ static int connect_ring(struct backend_i
   12.13  		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
   12.14  		return -1;
   12.15  	}
   12.16 -	printk("blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
   12.17 +	printk(KERN_INFO
   12.18 +	       "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
   12.19  	       ring_ref, evtchn, be->blkif->blk_protocol, protocol);
   12.20  
   12.21  	/* Map the shared frame, irq etc. */
    13.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c	Fri Feb 09 14:43:22 2007 -0600
    13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c	Wed Feb 14 19:01:35 2007 +0000
    13.3 @@ -272,7 +272,7 @@ static void tap_frontend_changed(struct 
    13.4  	switch (frontend_state) {
    13.5  	case XenbusStateInitialising:
    13.6  		if (dev->state == XenbusStateClosed) {
    13.7 -			printk("%s: %s: prepare for reconnect\n",
    13.8 +			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
    13.9  			       __FUNCTION__, dev->nodename);
   13.10  			xenbus_switch_state(dev, XenbusStateInitWait);
   13.11  		}
   13.12 @@ -369,7 +369,8 @@ static int connect_ring(struct backend_i
   13.13  		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
   13.14  		return -1;
   13.15  	}
   13.16 -	printk("blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
   13.17 +	printk(KERN_INFO
   13.18 +	       "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
   13.19  	       ring_ref, evtchn, be->blkif->blk_protocol, protocol);
   13.20  
   13.21  	/* Map the shared frame, irq etc. */
    14.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Fri Feb 09 14:43:22 2007 -0600
    14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Wed Feb 14 19:01:35 2007 +0000
    14.3 @@ -424,7 +424,7 @@ static int bind_ipi_to_irq(unsigned int 
    14.4  static void unbind_from_irq(unsigned int irq)
    14.5  {
    14.6  	struct evtchn_close close;
    14.7 -	int evtchn = evtchn_from_irq(irq);
    14.8 +	int cpu, evtchn = evtchn_from_irq(irq);
    14.9  
   14.10  	spin_lock(&irq_mapping_update_lock);
   14.11  
   14.12 @@ -452,6 +452,10 @@ static void unbind_from_irq(unsigned int
   14.13  
   14.14  		evtchn_to_irq[evtchn] = -1;
   14.15  		irq_info[irq] = IRQ_UNBOUND;
   14.16 +
   14.17 +		/* Zap stats across IRQ changes of use. */
   14.18 +		for_each_possible_cpu(cpu)
   14.19 +			kstat_cpu(cpu).irqs[irq] = 0;
   14.20  	}
   14.21  
   14.22  	spin_unlock(&irq_mapping_update_lock);
    15.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Fri Feb 09 14:43:22 2007 -0600
    15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c	Wed Feb 14 19:01:35 2007 +0000
    15.3 @@ -100,6 +100,7 @@ static void __shutdown_handler(void *unu
    15.4  static void shutdown_handler(struct xenbus_watch *watch,
    15.5  			     const char **vec, unsigned int len)
    15.6  {
    15.7 +	extern void ctrl_alt_del(void);
    15.8  	char *str;
    15.9  	struct xenbus_transaction xbt;
   15.10  	int err;
   15.11 @@ -129,7 +130,7 @@ static void shutdown_handler(struct xenb
   15.12  	if (strcmp(str, "poweroff") == 0)
   15.13  		shutting_down = SHUTDOWN_POWEROFF;
   15.14  	else if (strcmp(str, "reboot") == 0)
   15.15 -		kill_proc(1, SIGINT, 1); /* interrupt init */
   15.16 +		ctrl_alt_del();
   15.17  	else if (strcmp(str, "suspend") == 0)
   15.18  		shutting_down = SHUTDOWN_SUSPEND;
   15.19  	else if (strcmp(str, "halt") == 0)
    16.1 --- a/linux-2.6-xen-sparse/drivers/xen/fbfront/xenkbd.c	Fri Feb 09 14:43:22 2007 -0600
    16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/fbfront/xenkbd.c	Wed Feb 14 19:01:35 2007 +0000
    16.3 @@ -29,10 +29,12 @@
    16.4  
    16.5  struct xenkbd_info
    16.6  {
    16.7 -	struct input_dev *dev;
    16.8 +	struct input_dev *kbd;
    16.9 +	struct input_dev *ptr;
   16.10  	struct xenkbd_page *page;
   16.11  	int irq;
   16.12  	struct xenbus_device *xbdev;
   16.13 +	char phys[32];
   16.14  };
   16.15  
   16.16  static int xenkbd_remove(struct xenbus_device *);
   16.17 @@ -56,23 +58,36 @@ static irqreturn_t input_handler(int rq,
   16.18  	rmb();			/* ensure we see ring contents up to prod */
   16.19  	for (cons = page->in_cons; cons != prod; cons++) {
   16.20  		union xenkbd_in_event *event;
   16.21 +		struct input_dev *dev;
   16.22  		event = &XENKBD_IN_RING_REF(page, cons);
   16.23  
   16.24 +		dev = info->ptr;
   16.25  		switch (event->type) {
   16.26  		case XENKBD_TYPE_MOTION:
   16.27 -			input_report_rel(info->dev, REL_X, event->motion.rel_x);
   16.28 -			input_report_rel(info->dev, REL_Y, event->motion.rel_y);
   16.29 +			input_report_rel(dev, REL_X, event->motion.rel_x);
   16.30 +			input_report_rel(dev, REL_Y, event->motion.rel_y);
   16.31  			break;
   16.32  		case XENKBD_TYPE_KEY:
   16.33 -			input_report_key(info->dev, event->key.keycode, event->key.pressed);
   16.34 +			dev = NULL;
   16.35 +			if (test_bit(event->key.keycode, info->kbd->keybit))
   16.36 +				dev = info->kbd;
   16.37 +			if (test_bit(event->key.keycode, info->ptr->keybit))
   16.38 +				dev = info->ptr;
   16.39 +			if (dev)
   16.40 +				input_report_key(dev, event->key.keycode,
   16.41 +						 event->key.pressed);
   16.42 +			else
   16.43 +				printk("xenkbd: unhandled keycode 0x%x\n",
   16.44 +				       event->key.keycode);
   16.45  			break;
   16.46  		case XENKBD_TYPE_POS:
   16.47 -			input_report_abs(info->dev, ABS_X, event->pos.abs_x);
   16.48 -			input_report_abs(info->dev, ABS_Y, event->pos.abs_y);
   16.49 +			input_report_abs(dev, ABS_X, event->pos.abs_x);
   16.50 +			input_report_abs(dev, ABS_Y, event->pos.abs_y);
   16.51  			break;
   16.52  		}
   16.53 +		if (dev)
   16.54 +			input_sync(dev);
   16.55  	}
   16.56 -	input_sync(info->dev);
   16.57  	mb();			/* ensure we got ring contents */
   16.58  	page->in_cons = cons;
   16.59  	notify_remote_via_irq(info->irq);
   16.60 @@ -85,7 +100,7 @@ int __devinit xenkbd_probe(struct xenbus
   16.61  {
   16.62  	int ret, i;
   16.63  	struct xenkbd_info *info;
   16.64 -	struct input_dev *input_dev;
   16.65 +	struct input_dev *kbd, *ptr;
   16.66  
   16.67  	info = kzalloc(sizeof(*info), GFP_KERNEL);
   16.68  	if (!info) {
   16.69 @@ -94,6 +109,7 @@ int __devinit xenkbd_probe(struct xenbus
   16.70  	}
   16.71  	dev->dev.driver_data = info;
   16.72  	info->xbdev = dev;
   16.73 +	snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
   16.74  
   16.75  	info->page = (void *)__get_free_page(GFP_KERNEL);
   16.76  	if (!info->page)
   16.77 @@ -101,32 +117,52 @@ int __devinit xenkbd_probe(struct xenbus
   16.78  	info->page->in_cons = info->page->in_prod = 0;
   16.79  	info->page->out_cons = info->page->out_prod = 0;
   16.80  
   16.81 -	input_dev = input_allocate_device();
   16.82 -	if (!input_dev)
   16.83 +	/* keyboard */
   16.84 +	kbd = input_allocate_device();
   16.85 +	if (!kbd)
   16.86  		goto error_nomem;
   16.87 -
   16.88 -	input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
   16.89 -	input_dev->keybit[LONG(BTN_MOUSE)]
   16.90 -		= BIT(BTN_LEFT) | BIT(BTN_MIDDLE) | BIT(BTN_RIGHT);
   16.91 -	/* TODO additional buttons */
   16.92 -	input_dev->relbit[0] = BIT(REL_X) | BIT(REL_Y);
   16.93 +	kbd->name = "Xen Virtual Keyboard";
   16.94 +	kbd->phys = info->phys;
   16.95 +	kbd->id.bustype = BUS_PCI;
   16.96 +	kbd->id.vendor = 0x5853;
   16.97 +	kbd->id.product = 0xffff;
   16.98 +	kbd->evbit[0] = BIT(EV_KEY);
   16.99 +	for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
  16.100 +		set_bit(i, kbd->keybit);
  16.101 +	for (i = KEY_OK; i < KEY_MAX; i++)
  16.102 +		set_bit(i, kbd->keybit);
  16.103  
  16.104 -	/* FIXME not sure this is quite right */
  16.105 -	for (i = 0; i < 256; i++)
  16.106 -		set_bit(i, input_dev->keybit);
  16.107 -
  16.108 -	input_dev->name = "Xen Virtual Keyboard/Mouse";
  16.109 -
  16.110 -	input_set_abs_params(input_dev, ABS_X, 0, XENFB_WIDTH, 0, 0);
  16.111 -	input_set_abs_params(input_dev, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
  16.112 -
  16.113 -	ret = input_register_device(input_dev);
  16.114 +	ret = input_register_device(kbd);
  16.115  	if (ret) {
  16.116 -		input_free_device(input_dev);
  16.117 -		xenbus_dev_fatal(dev, ret, "input_register_device");
  16.118 +		input_free_device(kbd);
  16.119 +		xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
  16.120  		goto error;
  16.121  	}
  16.122 -	info->dev = input_dev;
  16.123 +	info->kbd = kbd;
  16.124 +
  16.125 +	/* pointing device */
  16.126 +	ptr = input_allocate_device();
  16.127 +	if (!ptr)
  16.128 +		goto error_nomem;
  16.129 +	ptr->name = "Xen Virtual Pointer";
  16.130 +	ptr->phys = info->phys;
  16.131 +	ptr->id.bustype = BUS_PCI;
  16.132 +	ptr->id.vendor = 0x5853;
  16.133 +	ptr->id.product = 0xfffe;
  16.134 +	ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
  16.135 +	for (i = BTN_LEFT; i <= BTN_TASK; i++)
  16.136 +		set_bit(i, ptr->keybit);
  16.137 +	ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y);
  16.138 +	input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
  16.139 +	input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
  16.140 +
  16.141 +	ret = input_register_device(ptr);
  16.142 +	if (ret) {
  16.143 +		input_free_device(ptr);
  16.144 +		xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
  16.145 +		goto error;
  16.146 +	}
  16.147 +	info->ptr = ptr;
  16.148  
  16.149  	ret = xenkbd_connect_backend(dev, info);
  16.150  	if (ret < 0)
  16.151 @@ -155,7 +191,8 @@ static int xenkbd_remove(struct xenbus_d
  16.152  	struct xenkbd_info *info = dev->dev.driver_data;
  16.153  
  16.154  	xenkbd_disconnect_backend(info);
  16.155 -	input_unregister_device(info->dev);
  16.156 +	input_unregister_device(info->kbd);
  16.157 +	input_unregister_device(info->ptr);
  16.158  	free_page((unsigned long)info->page);
  16.159  	kfree(info);
  16.160  	return 0;
    17.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Fri Feb 09 14:43:22 2007 -0600
    17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Wed Feb 14 19:01:35 2007 +0000
    17.3 @@ -217,7 +217,7 @@ static void frontend_changed(struct xenb
    17.4  	switch (frontend_state) {
    17.5  	case XenbusStateInitialising:
    17.6  		if (dev->state == XenbusStateClosed) {
    17.7 -			printk("%s: %s: prepare for reconnect\n",
    17.8 +			printk(KERN_INFO "%s: %s: prepare for reconnect\n",
    17.9  			       __FUNCTION__, dev->nodename);
   17.10  			if (be->netif) {
   17.11  				netif_disconnect(be->netif);
    18.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Feb 09 14:43:22 2007 -0600
    18.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Feb 14 19:01:35 2007 +0000
    18.3 @@ -1505,7 +1505,7 @@ static void netif_release_rx_bufs(struct
    18.4  	int id, ref;
    18.5  
    18.6  	if (np->copying_receiver) {
    18.7 -		printk("%s: fix me for copying receiver.\n", __FUNCTION__);
    18.8 +		WPRINTK("%s: fix me for copying receiver.\n", __FUNCTION__);
    18.9  		return;
   18.10  	}
   18.11  
   18.12 @@ -1555,8 +1555,8 @@ static void netif_release_rx_bufs(struct
   18.13  		xfer++;
   18.14  	}
   18.15  
   18.16 -	printk("%s: %d xfer, %d noxfer, %d unused\n",
   18.17 -	       __FUNCTION__, xfer, noxfer, unused);
   18.18 +	IPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
   18.19 +		__FUNCTION__, xfer, noxfer, unused);
   18.20  
   18.21  	if (xfer) {
   18.22  		/* Some pages are no longer absent... */
    19.1 --- a/linux-2.6-xen-sparse/include/asm-i386/a.out.h	Fri Feb 09 14:43:22 2007 -0600
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,26 +0,0 @@
    19.4 -#ifndef __I386_A_OUT_H__
    19.5 -#define __I386_A_OUT_H__
    19.6 -
    19.7 -struct exec
    19.8 -{
    19.9 -  unsigned long a_info;		/* Use macros N_MAGIC, etc for access */
   19.10 -  unsigned a_text;		/* length of text, in bytes */
   19.11 -  unsigned a_data;		/* length of data, in bytes */
   19.12 -  unsigned a_bss;		/* length of uninitialized data area for file, in bytes */
   19.13 -  unsigned a_syms;		/* length of symbol table data in file, in bytes */
   19.14 -  unsigned a_entry;		/* start address */
   19.15 -  unsigned a_trsize;		/* length of relocation info for text, in bytes */
   19.16 -  unsigned a_drsize;		/* length of relocation info for data, in bytes */
   19.17 -};
   19.18 -
   19.19 -#define N_TRSIZE(a)	((a).a_trsize)
   19.20 -#define N_DRSIZE(a)	((a).a_drsize)
   19.21 -#define N_SYMSIZE(a)	((a).a_syms)
   19.22 -
   19.23 -#ifdef __KERNEL__
   19.24 -
   19.25 -#define STACK_TOP	(TASK_SIZE - 3*PAGE_SIZE)
   19.26 -
   19.27 -#endif
   19.28 -
   19.29 -#endif /* __A_OUT_GNU_H__ */
    20.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/hw_irq.h	Fri Feb 09 14:43:22 2007 -0600
    20.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/hw_irq.h	Wed Feb 14 19:01:35 2007 +0000
    20.3 @@ -104,10 +104,9 @@ extern void register_percpu_irq (ia64_ve
    20.4  static inline void ia64_resend_irq(unsigned int vector)
    20.5  {
    20.6  #ifdef CONFIG_XEN
    20.7 -	extern void resend_irq_on_evtchn(struct hw_interrupt_type *h,
    20.8 -					 unsigned int i);
    20.9 +	extern int resend_irq_on_evtchn(unsigned int i);
   20.10  	if (is_running_on_xen())
   20.11 -		resend_irq_on_evtchn(h, vector);
   20.12 +		resend_irq_on_evtchn(vector);
   20.13  	else
   20.14  #endif /* CONFIG_XEN */
   20.15  	platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
    21.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/maddr.h	Fri Feb 09 14:43:22 2007 -0600
    21.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/maddr.h	Wed Feb 14 19:01:35 2007 +0000
    21.3 @@ -84,9 +84,6 @@ mfn_to_local_pfn(unsigned long mfn)
    21.4  
    21.5  #endif /* !CONFIG_XEN */
    21.6  
    21.7 -/* XXX to compile set_phys_to_machine(vaddr, FOREIGN_FRAME(m)) */
    21.8 -#define FOREIGN_FRAME(m)        (INVALID_P2M_ENTRY)
    21.9 -
   21.10  #define mfn_to_pfn(mfn) (mfn)
   21.11  #define pfn_to_mfn(pfn) (pfn)
   21.12  
    22.1 --- a/linux-2.6-xen-sparse/include/asm-ia64/page.h	Fri Feb 09 14:43:22 2007 -0600
    22.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/page.h	Wed Feb 14 19:01:35 2007 +0000
    22.3 @@ -236,46 +236,17 @@ get_order (unsigned long size)
    22.4  #include <linux/kernel.h>
    22.5  #include <asm/hypervisor.h>
    22.6  #include <xen/features.h>	// to compile netback, netfront
    22.7 -
    22.8 -/*
    22.9 - * XXX hack!
   22.10 - * Linux/IA64 uses PG_arch_1.
   22.11 - * This hack will be removed once PG_foreign bit is taken.
   22.12 - * #include <xen/foreign_page.h>
   22.13 - */
   22.14 -#ifdef __ASM_XEN_FOREIGN_PAGE_H__
   22.15 -# error "don't include include/xen/foreign_page.h!"
   22.16 -#endif
   22.17 -
   22.18 -extern struct address_space xen_ia64_foreign_dummy_mapping;
   22.19 -#define PageForeign(page)	\
   22.20 -	((page)->mapping == &xen_ia64_foreign_dummy_mapping)
   22.21 +#include <asm/maddr.h>
   22.22  
   22.23 -#define SetPageForeign(page, dtor) do {				\
   22.24 -	set_page_private((page), (unsigned long)(dtor));	\
   22.25 -	(page)->mapping = &xen_ia64_foreign_dummy_mapping;	\
   22.26 -	smp_rmb();						\
   22.27 -} while (0)
   22.28 -
   22.29 -#define ClearPageForeign(page) do {	\
   22.30 -	(page)->mapping = NULL;		\
   22.31 -	smp_rmb();			\
   22.32 -	set_page_private((page), 0);	\
   22.33 -} while (0)
   22.34 -
   22.35 -#define PageForeignDestructor(page)	\
   22.36 -	( (void (*) (struct page *)) page_private(page) )
   22.37 -
   22.38 -#define arch_free_page(_page,_order)			\
   22.39 -({      int foreign = PageForeign(_page);               \
   22.40 -	if (foreign)                                    \
   22.41 -		(PageForeignDestructor(_page))(_page);  \
   22.42 -	foreign;                                        \
   22.43 +#define arch_free_page(_page, _order)		\
   22.44 +({						\
   22.45 +	int foreign = PageForeign(_page);	\
   22.46 +	if (foreign)                            \
   22.47 +		PageForeignDestructor(_page);   \
   22.48 +	foreign;                                \
   22.49  })
   22.50  #define HAVE_ARCH_FREE_PAGE
   22.51  
   22.52 -#include <asm/maddr.h>
   22.53 -
   22.54  #endif /* CONFIG_XEN */
   22.55  #endif /* __ASSEMBLY__ */
   22.56  
    23.1 --- a/linux-2.6-xen-sparse/net/core/skbuff.c	Fri Feb 09 14:43:22 2007 -0600
    23.2 +++ b/linux-2.6-xen-sparse/net/core/skbuff.c	Wed Feb 14 19:01:35 2007 +0000
    23.3 @@ -1897,6 +1897,29 @@ int skb_append_datato_frags(struct sock 
    23.4  }
    23.5  
    23.6  /**
    23.7 + *	skb_pull_rcsum - pull skb and update receive checksum
    23.8 + *	@skb: buffer to update
    23.9 + *	@start: start of data before pull
   23.10 + *	@len: length of data pulled
   23.11 + *
   23.12 + *	This function performs an skb_pull on the packet and updates
   23.13 + *	update the CHECKSUM_HW checksum.  It should be used on receive
   23.14 + *	path processing instead of skb_pull unless you know that the
   23.15 + *	checksum difference is zero (e.g., a valid IP header) or you
   23.16 + *	are setting ip_summed to CHECKSUM_NONE.
   23.17 + */
   23.18 +unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
   23.19 +{
   23.20 +	BUG_ON(len > skb->len);
   23.21 +	skb->len -= len;
   23.22 +	BUG_ON(skb->len < skb->data_len);
   23.23 +	skb_postpull_rcsum(skb, skb->data, len);
   23.24 +	return skb->data += len;
   23.25 +}
   23.26 +
   23.27 +EXPORT_SYMBOL_GPL(skb_pull_rcsum);
   23.28 +
   23.29 +/**
   23.30   *	skb_segment - Perform protocol segmentation on skb.
   23.31   *	@skb: buffer to segment
   23.32   *	@features: features for the output path (see dev->features)
   23.33 @@ -2022,29 +2045,6 @@ err:
   23.34  
   23.35  EXPORT_SYMBOL_GPL(skb_segment);
   23.36  
   23.37 -/**
   23.38 - *	skb_pull_rcsum - pull skb and update receive checksum
   23.39 - *	@skb: buffer to update
   23.40 - *	@start: start of data before pull
   23.41 - *	@len: length of data pulled
   23.42 - *
   23.43 - *	This function performs an skb_pull on the packet and updates
   23.44 - *	update the CHECKSUM_HW checksum.  It should be used on receive
   23.45 - *	path processing instead of skb_pull unless you know that the
   23.46 - *	checksum difference is zero (e.g., a valid IP header) or you
   23.47 - *	are setting ip_summed to CHECKSUM_NONE.
   23.48 - */
   23.49 -unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
   23.50 -{
   23.51 -	BUG_ON(len > skb->len);
   23.52 -	skb->len -= len;
   23.53 -	BUG_ON(skb->len < skb->data_len);
   23.54 -	skb_postpull_rcsum(skb, skb->data, len);
   23.55 -	return skb->data += len;
   23.56 -}
   23.57 -
   23.58 -EXPORT_SYMBOL_GPL(skb_pull_rcsum);
   23.59 -
   23.60  void __init skb_init(void)
   23.61  {
   23.62  	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
    24.1 --- a/patches/linux-2.6.18/series	Fri Feb 09 14:43:22 2007 -0600
    24.2 +++ b/patches/linux-2.6.18/series	Wed Feb 14 19:01:35 2007 +0000
    24.3 @@ -18,3 +18,4 @@ git-dbaab49f92ff6ae6255762a948375e4036cb
    24.4  x86-elfnote-as-preprocessor-macro.patch
    24.5  fixaddr-top.patch
    24.6  git-c06cb8b1c4d25e5b4d7a2d7c2462619de1e0dbc4.patch
    24.7 +softlockup-no-idle-hz.patch
    25.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.2 +++ b/patches/linux-2.6.18/softlockup-no-idle-hz.patch	Wed Feb 14 19:01:35 2007 +0000
    25.3 @@ -0,0 +1,56 @@
    25.4 +diff -pruN ../orig-linux-2.6.18/include/linux/sched.h ./include/linux/sched.h
    25.5 +--- ../orig-linux-2.6.18/include/linux/sched.h	2006-09-20 04:42:06.000000000 +0100
    25.6 ++++ ./include/linux/sched.h	2007-02-07 01:10:24.000000000 +0000
    25.7 +@@ -211,10 +211,15 @@ extern void update_process_times(int use
    25.8 + extern void scheduler_tick(void);
    25.9 + 
   25.10 + #ifdef CONFIG_DETECT_SOFTLOCKUP
   25.11 ++extern unsigned long softlockup_get_next_event(void);
   25.12 + extern void softlockup_tick(void);
   25.13 + extern void spawn_softlockup_task(void);
   25.14 + extern void touch_softlockup_watchdog(void);
   25.15 + #else
   25.16 ++static inline unsigned long softlockup_get_next_event(void)
   25.17 ++{
   25.18 ++	return MAX_JIFFY_OFFSET;
   25.19 ++}
   25.20 + static inline void softlockup_tick(void)
   25.21 + {
   25.22 + }
   25.23 +diff -pruN ../orig-linux-2.6.18/kernel/softlockup.c ./kernel/softlockup.c
   25.24 +--- ../orig-linux-2.6.18/kernel/softlockup.c	2006-09-20 04:42:06.000000000 +0100
   25.25 ++++ ./kernel/softlockup.c	2007-02-07 01:53:22.000000000 +0000
   25.26 +@@ -40,6 +40,19 @@ void touch_softlockup_watchdog(void)
   25.27 + }
   25.28 + EXPORT_SYMBOL(touch_softlockup_watchdog);
   25.29 + 
   25.30 ++unsigned long softlockup_get_next_event(void)
   25.31 ++{
   25.32 ++	int this_cpu = smp_processor_id();
   25.33 ++	unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
   25.34 ++
   25.35 ++	if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
   25.36 ++		did_panic ||
   25.37 ++			!per_cpu(watchdog_task, this_cpu))
   25.38 ++		return MAX_JIFFY_OFFSET;
   25.39 ++
   25.40 ++	return min_t(long, 0, touch_timestamp + HZ - jiffies);
   25.41 ++}
   25.42 ++
   25.43 + /*
   25.44 +  * This callback runs from the timer interrupt, and checks
   25.45 +  * whether the watchdog thread has hung or not:
   25.46 +diff -pruN ../orig-linux-2.6.18/kernel/timer.c ./kernel/timer.c
   25.47 +--- ../orig-linux-2.6.18/kernel/timer.c	2006-09-20 04:42:06.000000000 +0100
   25.48 ++++ ./kernel/timer.c	2007-02-07 01:29:34.000000000 +0000
   25.49 +@@ -485,7 +485,9 @@ unsigned long next_timer_interrupt(void)
   25.50 + 		if (hr_expires < 3)
   25.51 + 			return hr_expires + jiffies;
   25.52 + 	}
   25.53 +-	hr_expires += jiffies;
   25.54 ++	hr_expires = min_t(unsigned long,
   25.55 ++			   softlockup_get_next_event(),
   25.56 ++			   hr_expires) + jiffies;
   25.57 + 
   25.58 + 	base = __get_cpu_var(tvec_bases);
   25.59 + 	spin_lock(&base->lock);
    26.1 --- a/tools/examples/block	Fri Feb 09 14:43:22 2007 -0600
    26.2 +++ b/tools/examples/block	Wed Feb 14 19:01:35 2007 +0000
    26.3 @@ -367,13 +367,7 @@ mount it read-write in a guest domain."
    26.4            fatal 'Failed to find an unused loop device'
    26.5          fi
    26.6  
    26.7 -        cmode=`canonicalise_mode $mode`
    26.8 -        if [ "$cmode" == 'r' ]
    26.9 -        then
   26.10 -          do_or_die losetup -r "$loopdev" "$file"
   26.11 -        else
   26.12 -          do_or_die losetup "$loopdev" "$file"
   26.13 -        fi
   26.14 +        do_or_die losetup "$loopdev" "$file"
   26.15          xenstore_write "$XENBUS_PATH/node" "$loopdev"
   26.16          write_dev "$loopdev"
   26.17          release_lock "block"
    27.1 --- a/tools/examples/vif-bridge	Fri Feb 09 14:43:22 2007 -0600
    27.2 +++ b/tools/examples/vif-bridge	Wed Feb 14 19:01:35 2007 +0000
    27.3 @@ -46,6 +46,13 @@ then
    27.4    fi
    27.5  fi
    27.6  
    27.7 +RET=0
    27.8 +ip link show $bridge 1>/dev/null 2>&1 || RET=1
    27.9 +if [ "$RET" -eq 1 ]
   27.10 +then
   27.11 +    fatal "Could not find bridge device $bridge"
   27.12 +fi
   27.13 +
   27.14  case "$command" in
   27.15      online)
   27.16  	setup_bridge_port "$vif"
    28.1 --- a/tools/examples/xen-hotplug-common.sh	Fri Feb 09 14:43:22 2007 -0600
    28.2 +++ b/tools/examples/xen-hotplug-common.sh	Wed Feb 14 19:01:35 2007 +0000
    28.3 @@ -28,14 +28,15 @@ export LANG="POSIX"
    28.4  unset $(set | grep ^LC_ | cut -d= -f1)
    28.5  
    28.6  fatal() {
    28.7 -  xenstore_write "$XENBUS_PATH"/hotplug-status error
    28.8 +  xenstore_write "$XENBUS_PATH/hotplug-error" "$*" \
    28.9 +                 "$XENBUS_PATH/hotplug-status" error
   28.10    log err "$@"
   28.11    exit 1
   28.12  }
   28.13  
   28.14  success() {
   28.15    # Tell DevController that backend is "connected"
   28.16 -  xenstore_write "$XENBUS_PATH"/hotplug-status connected
   28.17 +  xenstore_write "$XENBUS_PATH/hotplug-status" connected
   28.18  }
   28.19  
   28.20  do_or_die() {
    29.1 --- a/tools/firmware/rombios/32bit/tcgbios/tcgbios.c	Fri Feb 09 14:43:22 2007 -0600
    29.2 +++ b/tools/firmware/rombios/32bit/tcgbios/tcgbios.c	Wed Feb 14 19:01:35 2007 +0000
    29.3 @@ -146,7 +146,7 @@ static tcpa_acpi_t tcpa_acpi;
    29.4  static int tpm_driver_to_use = TPM_INVALID_DRIVER;
    29.5  
    29.6  static
    29.7 -uint32_t MA_IsTPMPresent()
    29.8 +uint32_t MA_IsTPMPresent(void)
    29.9  {
   29.10  	uint32_t rc = 0;
   29.11  	unsigned int i;
   29.12 @@ -263,11 +263,11 @@ void tcpa_acpi_init(void)
   29.13  {
   29.14  	struct acpi_20_rsdt *rsdt;
   29.15  	uint32_t length;
   29.16 -	struct acpi_20_tcpa *tcpa;
   29.17 +	struct acpi_20_tcpa *tcpa = (void *)0;
   29.18  	uint16_t found = 0;
   29.19  	uint16_t rsdp_off;
   29.20  	uint16_t off;
   29.21 -	struct acpi_20_rsdp *rsdp;
   29.22 +	struct acpi_20_rsdp *rsdp = (void *)0;
   29.23  
   29.24  	if (MA_IsTPMPresent() == 0) {
   29.25  		return;
   29.26 @@ -732,8 +732,8 @@ void tcpa_ipl(Bit32u seg)
   29.27  void tcpa_measure_post(Bit32u from, Bit32u to)
   29.28  {
   29.29  	struct pcpes pcpes; /* PCClientPCREventStruc */
   29.30 +	int len = to - from;
   29.31  	memset(&pcpes, 0x0, sizeof(pcpes));
   29.32 -	int len = to - from;
   29.33  
   29.34  	if (len > 0) {
   29.35  		sha1((unsigned char *)from,
   29.36 @@ -986,7 +986,7 @@ uint32_t PassThroughToTPM32(struct pttti
   29.37  {
   29.38  	uint32_t rc = 0;
   29.39  	uint8_t *cmd32;
   29.40 -	uint32_t resbuflen;
   29.41 +	uint32_t resbuflen = 0;
   29.42  
   29.43  	if (TCG_IsShutdownPreBootInterface() != 0) {
   29.44  		rc = (TCG_PC_TPMERROR |
   29.45 @@ -1277,9 +1277,7 @@ typedef struct _sha1_ctx {
   29.46  } sha1_ctx;
   29.47  
   29.48  
   29.49 -static inline uint32_t rol(val, rol)
   29.50 -  uint32_t val;
   29.51 -  uint16_t rol;
   29.52 +static inline uint32_t rol(uint32_t val, uint16_t rol)
   29.53  {
   29.54  	return (val << rol) | (val >> (32 - rol));
   29.55  }
    30.1 --- a/tools/firmware/rombios/32bit/tcgbios/tpm_drivers.c	Fri Feb 09 14:43:22 2007 -0600
    30.2 +++ b/tools/firmware/rombios/32bit/tcgbios/tpm_drivers.c	Wed Feb 14 19:01:35 2007 +0000
    30.3 @@ -27,12 +27,27 @@
    30.4  #include "tpm_drivers.h"
    30.5  #include "tcgbios.h"
    30.6  
    30.7 +#define STS_VALID                    (1 << 7) /* 0x80 */
    30.8 +#define STS_COMMAND_READY            (1 << 6) /* 0x40 */
    30.9 +#define STS_TPM_GO                   (1 << 5) /* 0x20 */
   30.10 +#define STS_DATA_AVAILABLE           (1 << 4) /* 0x10 */
   30.11 +#define STS_EXPECT                   (1 << 3) /* 0x08 */
   30.12 +#define STS_RESPONSE_RETRY           (1 << 1) /* 0x02 */
   30.13 +
   30.14 +#define ACCESS_TPM_REG_VALID_STS     (1 << 7) /* 0x80 */
   30.15 +#define ACCESS_ACTIVE_LOCALITY       (1 << 5) /* 0x20 */
   30.16 +#define ACCESS_BEEN_SEIZED           (1 << 4) /* 0x10 */
   30.17 +#define ACCESS_SEIZE                 (1 << 3) /* 0x08 */
   30.18 +#define ACCESS_PENDING_REQUEST       (1 << 2) /* 0x04 */
   30.19 +#define ACCESS_REQUEST_USE           (1 << 1) /* 0x02 */
   30.20 +#define ACCESS_TPM_ESTABLISHMENT     (1 << 0) /* 0x01 */
   30.21 +
   30.22  static uint32_t tis_wait_sts(uint8_t *addr, uint32_t time,
   30.23                               uint8_t mask, uint8_t expect)
   30.24  {
   30.25  	uint32_t rc = 0;
   30.26  	while (time > 0) {
   30.27 -		uint8_t sts = addr[TPM_STS];
   30.28 +		uint8_t sts = mmio_readb(&addr[TPM_STS]);
   30.29  		if ((sts & mask) == expect) {
   30.30  			rc = 1;
   30.31  			break;
   30.32 @@ -45,16 +60,17 @@ static uint32_t tis_wait_sts(uint8_t *ad
   30.33  
   30.34  static uint32_t tis_activate(uint32_t baseaddr)
   30.35  {
   30.36 -	uint32_t rc = 0;
   30.37 +	uint32_t rc = 1;
   30.38  	uint8_t *tis_addr = (uint8_t*)baseaddr;
   30.39  	uint8_t acc;
   30.40  	/* request access to locality */
   30.41 -	tis_addr[TPM_ACCESS] = 0x2;
   30.42 +	tis_addr[TPM_ACCESS] = ACCESS_REQUEST_USE;
   30.43  
   30.44 -	acc = tis_addr[TPM_ACCESS];
   30.45 -	if ((acc & 0x20) != 0) {
   30.46 -		tis_addr[TPM_STS] = 0x40;
   30.47 -		rc = tis_wait_sts(tis_addr, 100, 0x40, 0x40);
   30.48 +	acc = mmio_readb(&tis_addr[TPM_ACCESS]);
   30.49 +	if ((acc & ACCESS_ACTIVE_LOCALITY) != 0) {
   30.50 +		tis_addr[TPM_STS] = STS_COMMAND_READY;
   30.51 +		rc = tis_wait_sts(tis_addr, 100,
   30.52 +		                  STS_COMMAND_READY, STS_COMMAND_READY);
   30.53  	}
   30.54  	return rc;
   30.55  }
   30.56 @@ -64,8 +80,8 @@ uint32_t tis_ready(uint32_t baseaddr)
   30.57  	uint32_t rc = 0;
   30.58  	uint8_t *tis_addr = (uint8_t*)baseaddr;
   30.59  
   30.60 -	tis_addr[TPM_STS] = 0x40;
   30.61 -	rc = tis_wait_sts(tis_addr, 100, 0x40, 0x40);
   30.62 +	tis_addr[TPM_STS] = STS_COMMAND_READY;
   30.63 +	rc = tis_wait_sts(tis_addr, 100, STS_COMMAND_READY, STS_COMMAND_READY);
   30.64  
   30.65  	return rc;
   30.66  }
   30.67 @@ -81,8 +97,7 @@ uint32_t tis_senddata(uint32_t baseaddr,
   30.68  		uint16_t burst = 0;
   30.69  		uint32_t ctr = 0;
   30.70  		while (burst == 0 && ctr < 2000) {
   30.71 -			burst = (((uint16_t)tis_addr[TPM_STS+1])     ) +
   30.72 -			        (((uint16_t)tis_addr[TPM_STS+2]) << 8);
   30.73 +			burst = mmio_readw((uint16_t *)&tis_addr[TPM_STS+1]);
   30.74  			if (burst == 0) {
   30.75  				mssleep(1);
   30.76  				ctr++;
   30.77 @@ -120,11 +135,11 @@ uint32_t tis_readresp(uint32_t baseaddr,
   30.78  	uint32_t sts;
   30.79  
   30.80  	while (offset < len) {
   30.81 -		buffer[offset] = tis_addr[TPM_DATA_FIFO];
   30.82 +		buffer[offset] = mmio_readb(&tis_addr[TPM_DATA_FIFO]);
   30.83  		offset++;
   30.84 -		sts = tis_addr[TPM_STS];
   30.85 +		sts = mmio_readb(&tis_addr[TPM_STS]);
   30.86  		/* data left ? */
   30.87 -		if ((sts & 0x10) == 0) {
   30.88 +		if ((sts & STS_DATA_AVAILABLE) == 0) {
   30.89  			break;
   30.90  		}
   30.91  	}
   30.92 @@ -136,7 +151,7 @@ uint32_t tis_waitdatavalid(uint32_t base
   30.93  {
   30.94  	uint8_t *tis_addr = (uint8_t*)baseaddr;
   30.95  	uint32_t rc = 0;
   30.96 -	if (tis_wait_sts(tis_addr, 1000, 0x80, 0x80) == 0) {
   30.97 +	if (tis_wait_sts(tis_addr, 1000, STS_VALID, STS_VALID) == 0) {
   30.98  		rc = TCG_NO_RESPONSE;
   30.99  	}
  30.100  	return rc;
  30.101 @@ -146,8 +161,9 @@ uint32_t tis_waitrespready(uint32_t base
  30.102  {
  30.103  	uint32_t rc = 0;
  30.104  	uint8_t *tis_addr = (uint8_t*)baseaddr;
  30.105 -	tis_addr[TPM_STS] = 0x20;
  30.106 -	if (tis_wait_sts(tis_addr, timeout, 0x10, 0x10) == 0) {
  30.107 +	tis_addr[TPM_STS] = STS_TPM_GO;
  30.108 +	if (tis_wait_sts(tis_addr, timeout,
  30.109 +	                 STS_DATA_AVAILABLE, STS_DATA_AVAILABLE) == 0) {
  30.110  		rc = TCG_NO_RESPONSE;
  30.111  	}
  30.112  	return rc;
  30.113 @@ -158,7 +174,7 @@ uint32_t tis_probe(uint32_t baseaddr)
  30.114  {
  30.115  	uint32_t rc = 0;
  30.116  	uint8_t *tis_addr = (uint8_t*)baseaddr;
  30.117 -	uint32_t didvid = *(uint32_t*)&tis_addr[TPM_DID_VID];
  30.118 +	uint32_t didvid = mmio_readl((uint32_t *)&tis_addr[TPM_DID_VID]);
  30.119  	if ((didvid != 0) && (didvid != 0xffffffff)) {
  30.120  		rc = 1;
  30.121  	}
    31.1 --- a/tools/firmware/rombios/32bit/util.h	Fri Feb 09 14:43:22 2007 -0600
    31.2 +++ b/tools/firmware/rombios/32bit/util.h	Wed Feb 14 19:01:35 2007 +0000
    31.3 @@ -24,5 +24,20 @@ void byte_to_hex(char *digits, uint8_t b
    31.4  void uuid_to_string(char *dest, uint8_t *uuid);
    31.5  int printf(const char *fmt, ...);
    31.6  
    31.7 +static inline uint8_t mmio_readb(uint8_t *addr)
    31.8 +{
    31.9 +	return *(volatile uint8_t *)addr;
   31.10 +}
   31.11 +
   31.12 +static inline uint16_t mmio_readw(uint16_t *addr)
   31.13 +{
   31.14 +	return *(volatile uint16_t *)addr;
   31.15 +}
   31.16 +
   31.17 +static inline uint32_t mmio_readl(uint32_t *addr)
   31.18 +{
   31.19 +	return *(volatile uint32_t *)addr;
   31.20 +}
   31.21 +
   31.22  
   31.23  #endif
    32.1 --- a/tools/firmware/rombios/rombios.c	Fri Feb 09 14:43:22 2007 -0600
    32.2 +++ b/tools/firmware/rombios/rombios.c	Wed Feb 14 19:01:35 2007 +0000
    32.3 @@ -5722,9 +5722,6 @@ int13_cdemu(DS, ES, DI, SI, BP, SP, BX, 
    32.4      goto int13_fail;
    32.5      }
    32.6  
    32.7 -#if BX_TCGBIOS
    32.8 -  tcpa_ipl((Bit32u)bootseg);               /* specs: 8.2.3 steps 4 and 5 */
    32.9 -#endif
   32.10    
   32.11    switch (GET_AH()) {
   32.12  
   32.13 @@ -7741,6 +7738,10 @@ ASM_END
   32.14        }
   32.15      }
   32.16  
   32.17 +#if BX_TCGBIOS
   32.18 +    tcpa_add_bootdevice((Bit32u)0L, (Bit32u)bootdrv);
   32.19 +#endif
   32.20 +
   32.21      /* Canonicalize bootseg:bootip */
   32.22      bootip = (bootseg & 0x0fff) << 4;
   32.23      bootseg &= 0xf000;
   32.24 @@ -7760,6 +7761,9 @@ ASM_END
   32.25      bootdrv = (Bit8u)(status>>8);
   32.26      bootseg = read_word(ebda_seg,&EbdaData->cdemu.load_segment);
   32.27      /* Canonicalize bootseg:bootip */
   32.28 +#if BX_TCGBIOS
   32.29 +    tcpa_add_bootdevice((Bit32u)1L, (Bit32u)0L);
   32.30 +#endif
   32.31      bootip = (bootseg & 0x0fff) << 4;
   32.32      bootseg &= 0xf000;
   32.33      break;
   32.34 @@ -7773,6 +7777,9 @@ ASM_END
   32.35    default: return;
   32.36    }
   32.37  
   32.38 +#if BX_TCGBIOS
   32.39 +  tcpa_ipl((Bit32u)bootseg);               /* specs: 8.2.3 steps 4 and 5 */
   32.40 +#endif
   32.41    /* Debugging info */
   32.42    printf("Booting from %x:%x\n", bootseg, bootip);
   32.43    
    33.1 --- a/tools/ioemu/hw/cirrus_vga.c	Fri Feb 09 14:43:22 2007 -0600
    33.2 +++ b/tools/ioemu/hw/cirrus_vga.c	Wed Feb 14 19:01:35 2007 +0000
    33.3 @@ -3339,6 +3339,10 @@ void pci_cirrus_vga_init(PCIBus *bus, Di
    33.4      pci_conf[0x0a] = PCI_CLASS_SUB_VGA;
    33.5      pci_conf[0x0b] = PCI_CLASS_BASE_DISPLAY;
    33.6      pci_conf[0x0e] = PCI_CLASS_HEADERTYPE_00h;
    33.7 +    pci_conf[0x2c] = 0x53; /* subsystem vendor: XenSource */
    33.8 +    pci_conf[0x2d] = 0x58;
    33.9 +    pci_conf[0x2e] = 0x01; /* subsystem device */
   33.10 +    pci_conf[0x2f] = 0x00;
   33.11  
   33.12      /* setup VGA */
   33.13      s = &d->cirrus_vga;
    34.1 --- a/tools/ioemu/hw/ide.c	Fri Feb 09 14:43:22 2007 -0600
    34.2 +++ b/tools/ioemu/hw/ide.c	Wed Feb 14 19:01:35 2007 +0000
    34.3 @@ -2502,6 +2502,10 @@ void pci_piix3_ide_init(PCIBus *bus, Blo
    34.4      pci_conf[0x0a] = 0x01; // class_sub = PCI_IDE
    34.5      pci_conf[0x0b] = 0x01; // class_base = PCI_mass_storage
    34.6      pci_conf[0x0e] = 0x00; // header_type
    34.7 +    pci_conf[0x2c] = 0x53; /* subsystem vendor: XenSource */
    34.8 +    pci_conf[0x2d] = 0x58;
    34.9 +    pci_conf[0x2e] = 0x01; /* subsystem device */
   34.10 +    pci_conf[0x2f] = 0x00;
   34.11  
   34.12      pci_register_io_region((PCIDevice *)d, 4, 0x10, 
   34.13                             PCI_ADDRESS_SPACE_IO, bmdma_map);
    35.1 --- a/tools/ioemu/hw/rtl8139.c	Fri Feb 09 14:43:22 2007 -0600
    35.2 +++ b/tools/ioemu/hw/rtl8139.c	Wed Feb 14 19:01:35 2007 +0000
    35.3 @@ -3423,8 +3423,10 @@ void pci_rtl8139_init(PCIBus *bus, NICIn
    35.4      pci_conf[0x0e] = 0x00; /* header_type */
    35.5      pci_conf[0x3d] = 1;    /* interrupt pin 0 */
    35.6      pci_conf[0x34] = 0xdc;
    35.7 -    pci_conf[0x2c] = pci_conf[0x00]; // same as Vendor ID
    35.8 -    pci_conf[0x2d] = pci_conf[0x01];
    35.9 +    pci_conf[0x2c] = 0x53; /* subsystem vendor: XenSource */
   35.10 +    pci_conf[0x2d] = 0x58;
   35.11 +    pci_conf[0x2e] = 0x01; /* subsystem device */
   35.12 +    pci_conf[0x2f] = 0x00;
   35.13  
   35.14      s = &d->rtl8139;
   35.15  
    36.1 --- a/tools/ioemu/hw/tpm_tis.c	Fri Feb 09 14:43:22 2007 -0600
    36.2 +++ b/tools/ioemu/hw/tpm_tis.c	Wed Feb 14 19:01:35 2007 +0000
    36.3 @@ -517,7 +517,7 @@ static uint32_t tis_mem_readl(void *opaq
    36.4  
    36.5  #ifdef DEBUG_TPM
    36.6      fprintf(logfile," read(%08x) = %08x\n",
    36.7 -            addr,
    36.8 +            (int)addr,
    36.9              val);
   36.10  #endif
   36.11  
   36.12 @@ -538,7 +538,7 @@ static void tis_mem_writel(void *opaque,
   36.13  
   36.14  #ifdef DEBUG_TPM
   36.15      fprintf(logfile,"write(%08x) = %08x\n",
   36.16 -            addr,
   36.17 +            (int)addr,
   36.18              val);
   36.19  #endif
   36.20  
   36.21 @@ -757,10 +757,11 @@ static CPUWriteMemoryFunc *tis_writefn[3
   36.22  static void tpm_save(QEMUFile* f,void* opaque)
   36.23  {
   36.24      tpmState* s=(tpmState*)opaque;
   36.25 +    uint8_t locty = s->active_loc;
   36.26      int c;
   36.27  
   36.28      /* need to wait for outstanding requests to complete */
   36.29 -    if (IS_COMM_WITH_VTPM(s)) {
   36.30 +    if (s->loc[locty].state == STATE_EXECUTION) {
   36.31          int repeats = 30; /* 30 seconds; really should be infty */
   36.32          while (repeats > 0 &&
   36.33                 !(s->loc[s->active_loc].sts & STS_DATA_AVAILABLE)) {
   36.34 @@ -777,6 +778,10 @@ static void tpm_save(QEMUFile* f,void* o
   36.35          }
   36.36      }
   36.37  
   36.38 +    if (IS_COMM_WITH_VTPM(s)) {
   36.39 +        close_vtpm_channel(s, 1);
   36.40 +    }
   36.41 +
   36.42      qemu_put_be32s(f,&s->offset);
   36.43      qemu_put_buffer(f, s->buffer.buf, TPM_MAX_PKT);
   36.44      qemu_put_8s(f, &s->active_loc);
   36.45 @@ -993,7 +998,7 @@ static int TPM_Receive(tpmState *s, tpmB
   36.46          uint32_t size = tpm_get_size_from_buffer(buffer->buf);
   36.47          if (size + sizeof(buffer->instance) != off) {
   36.48              fprintf(logfile,"TPM: Packet size is bad! %d != %d\n",
   36.49 -                    size + sizeof(buffer->instance),
   36.50 +                    (int)(size + sizeof(buffer->instance)),
   36.51                      off);
   36.52          } else {
   36.53              uint32_t ret;
    37.1 --- a/tools/ioemu/xenstore.c	Fri Feb 09 14:43:22 2007 -0600
    37.2 +++ b/tools/ioemu/xenstore.c	Wed Feb 14 19:01:35 2007 +0000
    37.3 @@ -10,6 +10,7 @@
    37.4  
    37.5  #include "vl.h"
    37.6  #include "block_int.h"
    37.7 +#include <unistd.h>
    37.8  
    37.9  static struct xs_handle *xsh = NULL;
   37.10  static char *hd_filename[MAX_DISKS];
   37.11 @@ -52,11 +53,40 @@ void xenstore_check_new_media_present(in
   37.12      qemu_mod_timer(insert_timer, qemu_get_clock(rt_clock) + timeout);
   37.13  }
   37.14  
   37.15 +static int waitForDevice(char *path, char *field, char *desired)
   37.16 +{ 
   37.17 +    char *buf = NULL, *stat = NULL;
   37.18 +    unsigned int len;
   37.19 +    int val = 1;
   37.20 +
   37.21 +    /* loop until we find a value in xenstore, return 
   37.22 +     * if it was what we wanted, or not
   37.23 +     */
   37.24 +    while (1) {
   37.25 +        if (pasprintf(&buf, "%s/%s", path, field) == -1)
   37.26 +            goto done;
   37.27 +        free(stat);
   37.28 +        stat = xs_read(xsh, XBT_NULL, buf, &len);
   37.29 +        if (stat == NULL) {
   37.30 +            usleep(100000); /* 1/10th second, no path found */
   37.31 +        } else {
   37.32 +            val = strcmp(stat, desired);
   37.33 +            goto done;
   37.34 +        }
   37.35 +    }
   37.36 +
   37.37 +done:
   37.38 +    free(stat);
   37.39 +    free(buf);
   37.40 +    return val;
   37.41 +}
   37.42 +
   37.43  void xenstore_parse_domain_config(int domid)
   37.44  {
   37.45      char **e = NULL;
   37.46      char *buf = NULL, *path;
   37.47 -    char *bpath = NULL, *dev = NULL, *params = NULL, *type = NULL;
   37.48 +    char *fpath = NULL, *bpath = NULL,
   37.49 +         *dev = NULL, *params = NULL, *type = NULL;
   37.50      int i;
   37.51      unsigned int len, num, hd_index;
   37.52  
   37.53 @@ -120,7 +150,35 @@ void xenstore_parse_domain_config(int do
   37.54  	    hd_filename[hd_index] = params;	/* strdup() */
   37.55  	    params = NULL;		/* don't free params on re-use */
   37.56  	}
   37.57 +        /* 
   37.58 +         * check if device has a phantom vbd; the phantom is hooked
   37.59 +         * to the frontend device (for ease of cleanup), so lookup 
   37.60 +         * the frontend device, and see if there is a phantom_vbd
   37.61 +         * if there is, we will use resolution as the filename
   37.62 +         */
   37.63 +	if (pasprintf(&buf, "%s/device/vbd/%s/phantom_vbd", path, e[i]) == -1)
   37.64 +	    continue;
   37.65 +	free(fpath);
   37.66 +        fpath = xs_read(xsh, XBT_NULL, buf, &len);
   37.67 +	if (fpath != NULL) {
   37.68 +
   37.69 +            if (waitForDevice(fpath, "hotplug-status", "connected")) {
   37.70 +               continue;
   37.71 +            }
   37.72 +
   37.73 +	    if (pasprintf(&buf, "%s/dev", fpath) == -1)
   37.74 +	        continue;
   37.75 +            params = xs_read(xsh, XBT_NULL, buf , &len);
   37.76 +	    if (params != NULL) {
   37.77 +                free(hd_filename[hd_index]);
   37.78 +                hd_filename[hd_index] = params;
   37.79 +                params = NULL;              /* don't free params on re-use */
   37.80 +            }
   37.81 +        }
   37.82  	bs_table[hd_index] = bdrv_new(dev);
   37.83 +        /* re-establish buf */
   37.84 +	if (pasprintf(&buf, "%s/params", bpath) == -1)
   37.85 +	    continue;
   37.86  	/* check if it is a cdrom */
   37.87  	if (type && !strcmp(type, "cdrom")) {
   37.88  	    bdrv_set_type_hint(bs_table[hd_index], BDRV_TYPE_CDROM);
    38.1 --- a/tools/libxc/xc_domain.c	Fri Feb 09 14:43:22 2007 -0600
    38.2 +++ b/tools/libxc/xc_domain.c	Wed Feb 14 19:01:35 2007 +0000
    38.3 @@ -252,12 +252,14 @@ int xc_domain_hvm_getcontext(int xc_hand
    38.4      domctl.u.hvmcontext.size = size;
    38.5      set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
    38.6  
    38.7 -    if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
    38.8 -        return ret;
    38.9 +    if ( ctxt_buf ) 
   38.10 +        if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
   38.11 +            return ret;
   38.12  
   38.13      ret = do_domctl(xc_handle, &domctl);
   38.14  
   38.15 -    unlock_pages(ctxt_buf, size);
   38.16 +    if ( ctxt_buf ) 
   38.17 +        unlock_pages(ctxt_buf, size);
   38.18  
   38.19      return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
   38.20  }
    39.1 --- a/tools/libxc/xc_hvm_restore.c	Fri Feb 09 14:43:22 2007 -0600
    39.2 +++ b/tools/libxc/xc_hvm_restore.c	Wed Feb 14 19:01:35 2007 +0000
    39.3 @@ -41,11 +41,8 @@ static unsigned long hvirt_start;
    39.4  /* #levels of page tables used by the currrent guest */
    39.5  static unsigned int pt_levels;
    39.6  
    39.7 -/* total number of pages used by the current guest */
    39.8 -static unsigned long max_pfn;
    39.9 -
   39.10 -/* A table mapping each PFN to its new MFN. */
   39.11 -static xen_pfn_t *p2m = NULL;
   39.12 +/* A list of PFNs that exist, used when allocating memory to the guest */
   39.13 +static xen_pfn_t *pfns = NULL;
   39.14  
   39.15  static ssize_t
   39.16  read_exact(int fd, void *buf, size_t count)
   39.17 @@ -67,9 +64,8 @@ read_exact(int fd, void *buf, size_t cou
   39.18  }
   39.19  
   39.20  int xc_hvm_restore(int xc_handle, int io_fd,
   39.21 -                     uint32_t dom, unsigned long nr_pfns,
   39.22 +                     uint32_t dom, unsigned long max_pfn,
   39.23                       unsigned int store_evtchn, unsigned long *store_mfn,
   39.24 -                     unsigned int console_evtchn, unsigned long *console_mfn,
   39.25                       unsigned int pae, unsigned int apic)
   39.26  {
   39.27      DECLARE_DOMCTL;
   39.28 @@ -91,7 +87,7 @@ int xc_hvm_restore(int xc_handle, int io
   39.29      unsigned long long v_end, memsize;
   39.30      unsigned long shared_page_nr;
   39.31  
   39.32 -    unsigned long mfn, pfn;
   39.33 +    unsigned long pfn;
   39.34      unsigned int prev_pc, this_pc;
   39.35      int verify = 0;
   39.36  
   39.37 @@ -100,22 +96,26 @@ int xc_hvm_restore(int xc_handle, int io
   39.38  
   39.39      struct xen_add_to_physmap xatp;
   39.40  
   39.41 +    /* Number of pages of memory the guest has.  *Not* the same as max_pfn. */
   39.42 +    unsigned long nr_pages;
   39.43 +
   39.44      /* hvm guest mem size (Mb) */
   39.45      memsize = (unsigned long long)*store_mfn;
   39.46      v_end = memsize << 20;
   39.47 +    nr_pages = (unsigned long) memsize << (20 - PAGE_SHIFT);
   39.48  
   39.49 -    DPRINTF("xc_hvm_restore:dom=%d, nr_pfns=0x%lx, store_evtchn=%d, *store_mfn=%ld, console_evtchn=%d, *console_mfn=%ld, pae=%u, apic=%u.\n", 
   39.50 -            dom, nr_pfns, store_evtchn, *store_mfn, console_evtchn, *console_mfn, pae, apic);
   39.51 +    DPRINTF("xc_hvm_restore:dom=%d, nr_pages=0x%lx, store_evtchn=%d, *store_mfn=%ld, pae=%u, apic=%u.\n", 
   39.52 +            dom, nr_pages, store_evtchn, *store_mfn, pae, apic);
   39.53  
   39.54 -    max_pfn = nr_pfns;
   39.55 -
   39.56 +    
   39.57      if(!get_platform_info(xc_handle, dom,
   39.58                            &max_mfn, &hvirt_start, &pt_levels)) {
   39.59          ERROR("Unable to get platform info.");
   39.60          return 1;
   39.61      }
   39.62  
   39.63 -    DPRINTF("xc_hvm_restore start: max_pfn = %lx, max_mfn = %lx, hvirt_start=%lx, pt_levels=%d\n",
   39.64 +    DPRINTF("xc_hvm_restore start: nr_pages = %lx, max_pfn = %lx, max_mfn = %lx, hvirt_start=%lx, pt_levels=%d\n",
   39.65 +            nr_pages,
   39.66              max_pfn,
   39.67              max_mfn,
   39.68              hvirt_start,
   39.69 @@ -128,30 +128,30 @@ int xc_hvm_restore(int xc_handle, int io
   39.70      }
   39.71  
   39.72  
   39.73 -    p2m = malloc(max_pfn * sizeof(xen_pfn_t));
   39.74 -    if (p2m == NULL) {
   39.75 +    pfns = malloc(max_pfn * sizeof(xen_pfn_t));
   39.76 +    if (pfns == NULL) {
   39.77          ERROR("memory alloc failed");
   39.78          errno = ENOMEM;
   39.79          goto out;
   39.80      }
   39.81  
   39.82 -    if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) {
   39.83 +    if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(nr_pages)) != 0) {
   39.84          errno = ENOMEM;
   39.85          goto out;
   39.86      }
   39.87  
   39.88      for ( i = 0; i < max_pfn; i++ )
   39.89 -        p2m[i] = i;
   39.90 +        pfns[i] = i;
   39.91      for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < max_pfn; i++ )
   39.92 -        p2m[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
   39.93 +        pfns[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
   39.94  
   39.95      /* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */
   39.96      rc = xc_domain_memory_populate_physmap(
   39.97 -        xc_handle, dom, (max_pfn > 0xa0) ? 0xa0 : max_pfn,
   39.98 -        0, 0, &p2m[0x00]);
   39.99 -    if ( (rc == 0) && (max_pfn > 0xc0) )
  39.100 +        xc_handle, dom, (nr_pages > 0xa0) ? 0xa0 : nr_pages,
  39.101 +        0, 0, &pfns[0x00]);
  39.102 +    if ( (rc == 0) && (nr_pages > 0xc0) )
  39.103          rc = xc_domain_memory_populate_physmap(
  39.104 -            xc_handle, dom, max_pfn - 0xc0, 0, 0, &p2m[0xc0]);
  39.105 +            xc_handle, dom, nr_pages - 0xc0, 0, 0, &pfns[0xc0]);
  39.106      if ( rc != 0 )
  39.107      {
  39.108          PERROR("Could not allocate memory for HVM guest.\n");
  39.109 @@ -172,9 +172,6 @@ int xc_hvm_restore(int xc_handle, int io
  39.110          goto out;
  39.111      }
  39.112  
  39.113 -    for ( i = 0; i < max_pfn; i++)
  39.114 -        p2m[i] = i;
  39.115 -
  39.116      prev_pc = 0;
  39.117  
  39.118      n = 0;
  39.119 @@ -182,7 +179,7 @@ int xc_hvm_restore(int xc_handle, int io
  39.120  
  39.121          int j;
  39.122  
  39.123 -        this_pc = (n * 100) / max_pfn;
  39.124 +        this_pc = (n * 100) / nr_pages;
  39.125          if ( (this_pc - prev_pc) >= 5 )
  39.126          {
  39.127              PPRINTF("\b\b\b\b%3d%%", this_pc);
  39.128 @@ -235,8 +232,6 @@ int xc_hvm_restore(int xc_handle, int io
  39.129              }
  39.130  
  39.131  
  39.132 -            mfn = p2m[pfn];
  39.133 -
  39.134              /* In verify mode, we use a copy; otherwise we work in place */
  39.135              page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
  39.136  
  39.137 @@ -253,8 +248,8 @@ int xc_hvm_restore(int xc_handle, int io
  39.138  
  39.139                      int v;
  39.140  
  39.141 -                    DPRINTF("************** pfn=%lx mfn=%lx gotcs=%08lx "
  39.142 -                            "actualcs=%08lx\n", pfn, p2m[pfn],
  39.143 +                    DPRINTF("************** pfn=%lx gotcs=%08lx "
  39.144 +                            "actualcs=%08lx\n", pfn, 
  39.145                              csum_page(region_base + i*PAGE_SIZE),
  39.146                              csum_page(buf));
  39.147  
  39.148 @@ -362,7 +357,7 @@ int xc_hvm_restore(int xc_handle, int io
  39.149   out:
  39.150      if ( (rc != 0) && (dom != 0) )
  39.151          xc_domain_destroy(xc_handle, dom);
  39.152 -    free(p2m);
  39.153 +    free(pfns);
  39.154      free(hvm_buf);
  39.155  
  39.156      DPRINTF("Restore exit with rc=%d\n", rc);
    40.1 --- a/tools/libxc/xc_hvm_save.c	Fri Feb 09 14:43:22 2007 -0600
    40.2 +++ b/tools/libxc/xc_hvm_save.c	Wed Feb 14 19:01:35 2007 +0000
    40.3 @@ -27,18 +27,13 @@
    40.4  #include <stdlib.h>
    40.5  #include <unistd.h>
    40.6  #include <sys/time.h>
    40.7 +#include <xen/hvm/e820.h>
    40.8  
    40.9  #include "xc_private.h"
   40.10  #include "xg_private.h"
   40.11  #include "xg_save_restore.h"
   40.12  
   40.13  /*
   40.14 - * Size of a buffer big enough to take the HVM state of a domain.
   40.15 - * Ought to calculate this a bit more carefully, or maybe ask Xen.
   40.16 - */
   40.17 -#define HVM_CTXT_SIZE 8192
   40.18 -
   40.19 -/*
   40.20  ** Default values for important tuning parameters. Can override by passing
   40.21  ** non-zero replacement values to xc_hvm_save().
   40.22  **
   40.23 @@ -281,11 +276,11 @@ int xc_hvm_save(int xc_handle, int io_fd
   40.24      /* A copy of the CPU context of the guest. */
   40.25      vcpu_guest_context_t ctxt;
   40.26  
   40.27 -    /* A table containg the type of each PFN (/not/ MFN!). */
   40.28 -    unsigned long *pfn_type = NULL;
   40.29 -    unsigned long *pfn_batch = NULL;
   40.30 +    /* A table containg the PFNs (/not/ MFN!) to map. */
   40.31 +    xen_pfn_t *pfn_batch = NULL;
   40.32  
   40.33      /* A copy of hvm domain context buffer*/
   40.34 +    uint32_t hvm_buf_size;
   40.35      uint8_t *hvm_buf = NULL;
   40.36  
   40.37      /* Live mapping of shared info structure */
   40.38 @@ -295,7 +290,6 @@ int xc_hvm_save(int xc_handle, int io_fd
   40.39      unsigned char *region_base = NULL;
   40.40  
   40.41      uint32_t nr_pfns, rec_size, nr_vcpus;
   40.42 -    unsigned long *page_array = NULL;
   40.43  
   40.44      /* power of 2 order of max_pfn */
   40.45      int order_nr;
   40.46 @@ -366,18 +360,12 @@ int xc_hvm_save(int xc_handle, int io_fd
   40.47          goto out;
   40.48      }
   40.49  
   40.50 -    max_pfn = live_shinfo->arch.max_pfn;
   40.51 -
   40.52      DPRINTF("saved hvm domain info:max_memkb=0x%lx, max_mfn=0x%lx, nr_pages=0x%lx\n", info.max_memkb, max_mfn, info.nr_pages); 
   40.53  
   40.54 -    /* nr_pfns: total pages excluding vga acc mem
   40.55 -     * max_pfn: nr_pfns + 0x20 vga hole(0xa0~0xc0)
   40.56 -     * getdomaininfo.tot_pages: all the allocated pages for this domain
   40.57 -     */
   40.58      if (live) {
   40.59          ERROR("hvm domain doesn't support live migration now.\n");
   40.60          goto out;
   40.61 -
   40.62 +        
   40.63          if (xc_shadow_control(xc_handle, dom,
   40.64                                XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
   40.65                                NULL, 0, NULL, 0, NULL) < 0) {
   40.66 @@ -386,6 +374,7 @@ int xc_hvm_save(int xc_handle, int io_fd
   40.67          }
   40.68  
   40.69          /* excludes vga acc mem */
   40.70 +        /* XXX will need to check whether acceleration is enabled here! */
   40.71          nr_pfns = info.nr_pages - 0x800;
   40.72  
   40.73          last_iter = 0;
   40.74 @@ -401,8 +390,8 @@ int xc_hvm_save(int xc_handle, int io_fd
   40.75              ERROR("HVM Domain appears not to have suspended");
   40.76              goto out;
   40.77          }
   40.78 -        nr_pfns = info.nr_pages;
   40.79 -        DPRINTF("after suspend hvm domain nr_pages=0x%x.\n", nr_pfns);
   40.80 +
   40.81 +        nr_pfns = info.nr_pages; 
   40.82      }
   40.83  
   40.84      DPRINTF("after 1st handle hvm domain nr_pfns=0x%x, nr_pages=0x%lx, max_memkb=0x%lx, live=%d.\n",
   40.85 @@ -411,10 +400,15 @@ int xc_hvm_save(int xc_handle, int io_fd
   40.86              info.max_memkb,
   40.87              live);
   40.88  
   40.89 -    nr_pfns = info.nr_pages;
   40.90 -
   40.91 -    /*XXX: caculate the VGA hole*/
   40.92 -    max_pfn = nr_pfns + 0x20;
   40.93 +    /* Calculate the highest PFN of "normal" memory:
   40.94 +     * HVM memory is sequential except for the VGA and MMIO holes, and
   40.95 +     * we have nr_pfns of it (which now excludes the cirrus video RAM) */
   40.96 +    max_pfn = nr_pfns; 
   40.97 +    /* Skip the VGA hole from 0xa0000 to 0xc0000 */
   40.98 +    max_pfn += 0x20;   
   40.99 +    /* Skip the MMIO hole: 256MB just below 4GB */
  40.100 +    if ( max_pfn >= (HVM_BELOW_4G_MMIO_START >> PAGE_SHIFT) )
  40.101 +        max_pfn += (HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT); 
  40.102  
  40.103      skip_this_iter = 0;/*XXX*/
  40.104      /* pretend we sent all the pages last iteration */
  40.105 @@ -429,11 +423,16 @@ int xc_hvm_save(int xc_handle, int io_fd
  40.106      to_send = malloc(BITMAP_SIZE);
  40.107      to_skip = malloc(BITMAP_SIZE);
  40.108  
  40.109 -    page_array = (unsigned long *) malloc( sizeof(unsigned long) * max_pfn);
  40.110  
  40.111 -    hvm_buf = malloc(HVM_CTXT_SIZE);
  40.112 +    hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
  40.113 +    if ( hvm_buf_size == -1 )
  40.114 +    {
  40.115 +        ERROR("Couldn't get HVM context size from Xen");
  40.116 +        goto out;
  40.117 +    }
  40.118 +    hvm_buf = malloc(hvm_buf_size);
  40.119  
  40.120 -    if (!to_send ||!to_skip ||!page_array ||!hvm_buf ) {
  40.121 +    if (!to_send ||!to_skip ||!hvm_buf) {
  40.122          ERROR("Couldn't allocate memory");
  40.123          goto out;
  40.124      }
  40.125 @@ -453,26 +452,16 @@ int xc_hvm_save(int xc_handle, int io_fd
  40.126  
  40.127      analysis_phase(xc_handle, dom, max_pfn, to_skip, 0);
  40.128  
  40.129 -    /* get all the HVM domain pfns */
  40.130 -    for ( i = 0; i < max_pfn; i++)
  40.131 -        page_array[i] = i;
  40.132 -
  40.133  
  40.134      /* We want zeroed memory so use calloc rather than malloc. */
  40.135 -    pfn_type  = calloc(MAX_BATCH_SIZE, sizeof(*pfn_type));
  40.136      pfn_batch = calloc(MAX_BATCH_SIZE, sizeof(*pfn_batch));
  40.137  
  40.138 -    if ((pfn_type == NULL) || (pfn_batch == NULL)) {
  40.139 -        ERROR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
  40.140 +    if (pfn_batch == NULL) {
  40.141 +        ERROR("failed to alloc memory for pfn_batch array");
  40.142          errno = ENOMEM;
  40.143          goto out;
  40.144      }
  40.145  
  40.146 -    if (lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) {
  40.147 -        ERROR("Unable to lock");
  40.148 -        goto out;
  40.149 -    }
  40.150 -
  40.151      /* Start writing out the saved-domain record. */
  40.152      if (!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) {
  40.153          ERROR("write: max_pfn");
  40.154 @@ -510,16 +499,15 @@ int xc_hvm_save(int xc_handle, int io_fd
  40.155              }
  40.156  
  40.157  
  40.158 -            /* load pfn_type[] with the mfn of all the pages we're doing in
  40.159 +            /* load pfn_batch[] with the mfn of all the pages we're doing in
  40.160                 this batch. */
  40.161              for (batch = 0; batch < MAX_BATCH_SIZE && N < max_pfn ; N++) {
  40.162  
  40.163                  int n = permute(N, max_pfn, order_nr);
  40.164  
  40.165                  if (debug) {
  40.166 -                    DPRINTF("%d pfn= %08lx mfn= %08lx %d \n",
  40.167 -                            iter, (unsigned long)n, page_array[n],
  40.168 -                            test_bit(n, to_send));
  40.169 +                    DPRINTF("%d pfn= %08lx %d \n",
  40.170 +                            iter, (unsigned long)n, test_bit(n, to_send));
  40.171                  }
  40.172  
  40.173                  if (!last_iter && test_bit(n, to_send)&& test_bit(n, to_skip))
  40.174 @@ -529,10 +517,12 @@ int xc_hvm_save(int xc_handle, int io_fd
  40.175                        (test_bit(n, to_send) && last_iter)))
  40.176                      continue;
  40.177  
  40.178 -                if (n >= 0xa0 && n < 0xc0) {
  40.179 -/*                    DPRINTF("get a vga hole pfn= %x.\n", n);*/
  40.180 +                /* Skip PFNs that aren't really there */
  40.181 +                if ((n >= 0xa0 && n < 0xc0) /* VGA hole */
  40.182 +                    || (n >= (HVM_BELOW_4G_MMIO_START >> PAGE_SHIFT)
  40.183 +                        && n < (1ULL << 32) >> PAGE_SHIFT)) /* 4G MMIO hole */
  40.184                      continue;
  40.185 -                }
  40.186 +
  40.187                  /*
  40.188                  ** we get here if:
  40.189                  **  1. page is marked to_send & hasn't already been re-dirtied
  40.190 @@ -540,7 +530,6 @@ int xc_hvm_save(int xc_handle, int io_fd
  40.191                  */
  40.192  
  40.193                  pfn_batch[batch] = n;
  40.194 -                pfn_type[batch]  = page_array[n];
  40.195  
  40.196                  batch++;
  40.197              }
  40.198 @@ -572,7 +561,6 @@ int xc_hvm_save(int xc_handle, int io_fd
  40.199                  goto out;
  40.200              }
  40.201  
  40.202 -
  40.203              sent_this_iter += batch;
  40.204  
  40.205              munmap(region_base, batch*PAGE_SIZE);
  40.206 @@ -661,7 +649,7 @@ int xc_hvm_save(int xc_handle, int io_fd
  40.207      }
  40.208  
  40.209      if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, 
  40.210 -                                              HVM_CTXT_SIZE)) == -1) {
  40.211 +                                              hvm_buf_size)) == -1) {
  40.212          ERROR("HVM:Could not get hvm buffer");
  40.213          goto out;
  40.214      }
  40.215 @@ -722,9 +710,6 @@ int xc_hvm_save(int xc_handle, int io_fd
  40.216      }
  40.217  
  40.218      free(hvm_buf);
  40.219 -    free(page_array);
  40.220 -
  40.221 -    free(pfn_type);
  40.222      free(pfn_batch);
  40.223      free(to_send);
  40.224      free(to_skip);
    41.1 --- a/tools/libxc/xenguest.h	Fri Feb 09 14:43:22 2007 -0600
    41.2 +++ b/tools/libxc/xenguest.h	Wed Feb 14 19:01:35 2007 +0000
    41.3 @@ -57,9 +57,8 @@ int xc_linux_restore(int xc_handle, int 
    41.4   * @return 0 on success, -1 on failure
    41.5   */
    41.6  int xc_hvm_restore(int xc_handle, int io_fd, uint32_t dom,
    41.7 -                      unsigned long nr_pfns, unsigned int store_evtchn,
    41.8 -                      unsigned long *store_mfn, unsigned int console_evtchn,
    41.9 -                      unsigned long *console_mfn,
   41.10 +                      unsigned long max_pfn, unsigned int store_evtchn,
   41.11 +                      unsigned long *store_mfn, 
   41.12                        unsigned int pae, unsigned int apic);
   41.13  
   41.14  /**
    42.1 --- a/tools/libxc/xg_private.c	Fri Feb 09 14:43:22 2007 -0600
    42.2 +++ b/tools/libxc/xg_private.c	Wed Feb 14 19:01:35 2007 +0000
    42.3 @@ -209,9 +209,8 @@ int xc_hvm_save(int xc_handle, int io_fd
    42.4  
    42.5  __attribute__((weak)) 
    42.6  int xc_hvm_restore(int xc_handle, int io_fd, uint32_t dom,
    42.7 -                   unsigned long nr_pfns, unsigned int store_evtchn,
    42.8 -                   unsigned long *store_mfn, unsigned int console_evtchn,
    42.9 -                   unsigned long *console_mfn,
   42.10 +                   unsigned long max_pfn, unsigned int store_evtchn,
   42.11 +                   unsigned long *store_mfn,
   42.12                     unsigned int pae, unsigned int apic)
   42.13  {
   42.14      errno = ENOSYS;
    43.1 --- a/tools/misc/Makefile	Fri Feb 09 14:43:22 2007 -0600
    43.2 +++ b/tools/misc/Makefile	Wed Feb 14 19:01:35 2007 +0000
    43.3 @@ -9,7 +9,9 @@ CFLAGS   += $(INCLUDES)
    43.4  
    43.5  HDRS     = $(wildcard *.h)
    43.6  
    43.7 -TARGETS  = xenperf xc_shadow
    43.8 +TARGETS-y := xenperf xc_shadow
    43.9 +TARGETS-$(CONFIG_X86) += xen-detect
   43.10 +TARGETS := $(TARGETS-y)
   43.11  
   43.12  INSTALL_BIN  = $(TARGETS) xencons
   43.13  INSTALL_SBIN = netfix xm xen-bugtool xen-python-path xend xenperf
   43.14 @@ -41,5 +43,5 @@ clean:
   43.15  %.o: %.c $(HDRS) Makefile
   43.16  	$(CC) -c $(CFLAGS) -o $@ $<
   43.17  
   43.18 -$(TARGETS): %: %.o Makefile
   43.19 +xenperf xc_shadow: %: %.o Makefile
   43.20  	$(CC) $(CFLAGS) -o $@ $< -L$(XEN_LIBXC) -lxenctrl
    44.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    44.2 +++ b/tools/misc/xen-detect.c	Wed Feb 14 19:01:35 2007 +0000
    44.3 @@ -0,0 +1,108 @@
    44.4 +/******************************************************************************
    44.5 + * xen_detect.c
    44.6 + * 
    44.7 + * Simple GNU C / POSIX application to detect execution on Xen VMM platform.
    44.8 + * 
    44.9 + * Copyright (c) 2007, XenSource Inc.
   44.10 + * 
   44.11 + * Permission is hereby granted, free of charge, to any person obtaining a copy
   44.12 + * of this software and associated documentation files (the "Software"), to
   44.13 + * deal in the Software without restriction, including without limitation the
   44.14 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
   44.15 + * sell copies of the Software, and to permit persons to whom the Software is
   44.16 + * furnished to do so, subject to the following conditions:
   44.17 + *
   44.18 + * The above copyright notice and this permission notice shall be included in
   44.19 + * all copies or substantial portions of the Software.
   44.20 + *
   44.21 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   44.22 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   44.23 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   44.24 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   44.25 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   44.26 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
   44.27 + * DEALINGS IN THE SOFTWARE.
   44.28 + */
   44.29 +
   44.30 +#include <stdint.h>
   44.31 +#include <stdio.h>
   44.32 +#include <stdlib.h>
   44.33 +#include <string.h>
   44.34 +#include <sys/types.h>
   44.35 +#include <sys/wait.h>
   44.36 +#include <unistd.h>
   44.37 +
   44.38 +static int pv_context;
   44.39 +
   44.40 +static void cpuid(uint32_t idx,
   44.41 +                  uint32_t *eax,
   44.42 +                  uint32_t *ebx,
   44.43 +                  uint32_t *ecx,
   44.44 +                  uint32_t *edx)
   44.45 +{
   44.46 +    asm volatile (
   44.47 +        "test %1,%1 ; jz 1f ; ud2a ; .ascii \"xen\" ; 1: cpuid"
   44.48 +        : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
   44.49 +        : "0" (idx), "1" (pv_context) );
   44.50 +}
   44.51 +
   44.52 +static int check_for_xen(void)
   44.53 +{
   44.54 +    uint32_t eax, ebx, ecx, edx;
   44.55 +    char signature[13];
   44.56 +
   44.57 +    cpuid(0x40000000, &eax, &ebx, &ecx, &edx);
   44.58 +    *(uint32_t *)(signature + 0) = ebx;
   44.59 +    *(uint32_t *)(signature + 4) = ecx;
   44.60 +    *(uint32_t *)(signature + 8) = edx;
   44.61 +    signature[12] = '\0';
   44.62 +
   44.63 +    if ( strcmp("XenVMMXenVMM", signature) || (eax < 0x40000002) )
   44.64 +        return 0;
   44.65 +
   44.66 +    cpuid(0x40000001, &eax, &ebx, &ecx, &edx);
   44.67 +    printf("Running in %s context on Xen v%d.%d.\n",
   44.68 +           pv_context ? "PV" : "HVM", (uint16_t)(eax >> 16), (uint16_t)eax);
   44.69 +    return 1;
   44.70 +}
   44.71 +
   44.72 +int main(void)
   44.73 +{
   44.74 +    pid_t pid;
   44.75 +    int status;
   44.76 +    uint32_t dummy;
   44.77 +
   44.78 +    /* Check for execution in HVM context. */
   44.79 +    if ( check_for_xen() )
   44.80 +        return 0;
   44.81 +
   44.82 +    /* Now we check for execution in PV context. */
   44.83 +    pv_context = 1;
   44.84 +
   44.85 +    /*
   44.86 +     * Fork a child to test the paravirtualised CPUID instruction.
   44.87 +     * If executed outside Xen PV context, the extended opcode will fault.
   44.88 +     */
   44.89 +    pid = fork();
   44.90 +    switch ( pid )
   44.91 +    {
   44.92 +    case 0:
   44.93 +        /* Child: test paravirtualised CPUID opcode and then exit cleanly. */
   44.94 +        cpuid(0x40000000, &dummy, &dummy, &dummy, &dummy);
   44.95 +        exit(0);
   44.96 +    case -1:
   44.97 +        fprintf(stderr, "Fork failed.\n");
   44.98 +        return 0;
   44.99 +    }
  44.100 +
  44.101 +    /*
  44.102 +     * Parent waits for child to terminate and checks for clean exit.
  44.103 +     * Only if the exit is clean is it safe for us to try the extended CPUID.
  44.104 +     */
  44.105 +    waitpid(pid, &status, 0);
  44.106 +    if ( WIFEXITED(status) && check_for_xen() )
  44.107 +        return 0;
  44.108 +
  44.109 +    printf("Not running on Xen.\n");
  44.110 +    return 0;
  44.111 +}
    45.1 --- a/tools/python/xen/xend/XendCheckpoint.py	Fri Feb 09 14:43:22 2007 -0600
    45.2 +++ b/tools/python/xen/xend/XendCheckpoint.py	Wed Feb 14 19:01:35 2007 +0000
    45.3 @@ -239,8 +239,11 @@ def restore(xd, fd, dominfo = None, paus
    45.4  
    45.5          forkHelper(cmd, fd, handler.handler, True)
    45.6  
    45.7 -        if handler.store_mfn is None or handler.console_mfn is None:
    45.8 -            raise XendError('Could not read store/console MFN')
    45.9 +        if handler.store_mfn is None:
   45.10 +            raise XendError('Could not read store MFN')
   45.11 +
   45.12 +        if not is_hvm and handler.console_mfn is None:
   45.13 +            raise XendError('Could not read console MFN')        
   45.14  
   45.15          dominfo.waitForDevices() # Wait for backends to set up
   45.16          if not paused:
    46.1 --- a/tools/python/xen/xend/XendConfig.py	Fri Feb 09 14:43:22 2007 -0600
    46.2 +++ b/tools/python/xen/xend/XendConfig.py	Wed Feb 14 19:01:35 2007 +0000
    46.3 @@ -1148,6 +1148,47 @@ class XendConfig(dict):
    46.4          # no valid device to add
    46.5          return ''
    46.6  
    46.7 +    def phantom_device_add(self, dev_type, cfg_xenapi = None,
    46.8 +                   target = None):
    46.9 +        """Add a phantom tap device configuration in XenAPI struct format.
   46.10 +        """
   46.11 +
   46.12 +        if target == None:
   46.13 +            target = self
   46.14 +        
   46.15 +        if dev_type not in XendDevices.valid_devices() and \
   46.16 +           dev_type not in XendDevices.pseudo_devices():        
   46.17 +            raise XendConfigError("XendConfig: %s not a valid device type" %
   46.18 +                            dev_type)
   46.19 +
   46.20 +        if cfg_xenapi == None:
   46.21 +            raise XendConfigError("XendConfig: device_add requires some "
   46.22 +                                  "config.")
   46.23 +
   46.24 +        if cfg_xenapi:
   46.25 +            log.debug("XendConfig.phantom_device_add: %s" % str(cfg_xenapi))
   46.26 + 
   46.27 +        if cfg_xenapi:
   46.28 +            dev_info = {}            
   46.29 +            if dev_type in ('vbd', 'tap'):
   46.30 +                if dev_type == 'vbd':
   46.31 +                    dev_info['uname'] = cfg_xenapi.get('image', '')
   46.32 +                    dev_info['dev'] = '%s:disk' % cfg_xenapi.get('device')
   46.33 +                elif dev_type == 'tap':
   46.34 +                    if cfg_xenapi.get('image').find('tap:') == -1:
   46.35 +                        dev_info['uname'] = 'tap:qcow:%s' % cfg_xenapi.get('image')
   46.36 +                    dev_info['dev'] =  '/dev/%s' % cfg_xenapi.get('device')
   46.37 +                    dev_info['uname'] = cfg_xenapi.get('image')
   46.38 +                dev_info['mode'] = cfg_xenapi.get('mode')
   46.39 +                dev_info['backend'] = '0'
   46.40 +                dev_uuid = cfg_xenapi.get('uuid', uuid.createString())
   46.41 +                dev_info['uuid'] = dev_uuid
   46.42 +                self['devices'][dev_uuid] = (dev_type, dev_info)
   46.43 +                self['vbd_refs'].append(dev_uuid)
   46.44 +                return dev_uuid
   46.45 +
   46.46 +        return ''
   46.47 +
   46.48      def console_add(self, protocol, location, other_config = {}):
   46.49          dev_uuid = uuid.createString()
   46.50          if protocol == 'vt100':
    47.1 --- a/tools/python/xen/xend/XendDomain.py	Fri Feb 09 14:43:22 2007 -0600
    47.2 +++ b/tools/python/xen/xend/XendDomain.py	Wed Feb 14 19:01:35 2007 +0000
    47.3 @@ -800,7 +800,10 @@ class XendDomain:
    47.4                                  "support.")
    47.5  
    47.6              path = self._managed_check_point_path(dom_uuid)
    47.7 -            fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
    47.8 +            oflags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
    47.9 +            if hasattr(os, "O_LARGEFILE"):
   47.10 +                oflags |= os.O_LARGEFILE
   47.11 +            fd = os.open(path, oflags)
   47.12              try:
   47.13                  # For now we don't support 'live checkpoint' 
   47.14                  XendCheckpoint.save(fd, dominfo, False, False, path)
   47.15 @@ -840,8 +843,11 @@ class XendDomain:
   47.16                  # Restore that replaces the existing XendDomainInfo
   47.17                  try:
   47.18                      log.debug('Current DomainInfo state: %d' % dominfo.state)
   47.19 +                    oflags = os.O_RDONLY
   47.20 +                    if hasattr(os, "O_LARGEFILE"):
   47.21 +                        oflags |= os.O_LARGEFILE
   47.22                      XendCheckpoint.restore(self,
   47.23 -                                           os.open(chkpath, os.O_RDONLY),
   47.24 +                                           os.open(chkpath, oflags),
   47.25                                             dominfo,
   47.26                                             paused = start_paused)
   47.27                      os.unlink(chkpath)
   47.28 @@ -1009,7 +1015,10 @@ class XendDomain:
   47.29          @raise XendError: Failure to restore domain
   47.30          """
   47.31          try:
   47.32 -            fd = os.open(src, os.O_RDONLY)
   47.33 +            oflags = os.O_RDONLY
   47.34 +            if hasattr(os, "O_LARGEFILE"):
   47.35 +                oflags |= os.O_LARGEFILE
   47.36 +            fd = os.open(src, oflags)
   47.37              try:
   47.38                  return self.domain_restore_fd(fd, paused=paused)
   47.39              finally:
   47.40 @@ -1193,7 +1202,10 @@ class XendDomain:
   47.41              if dominfo.getDomid() == DOM0_ID:
   47.42                  raise XendError("Cannot save privileged domain %i" % domid)
   47.43  
   47.44 -            fd = os.open(dst, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
   47.45 +            oflags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
   47.46 +            if hasattr(os, "O_LARGEFILE"):
   47.47 +                oflags |= os.O_LARGEFILE
   47.48 +            fd = os.open(dst, oflags)
   47.49              try:
   47.50                  # For now we don't support 'live checkpoint' 
   47.51                  XendCheckpoint.save(fd, dominfo, False, False, dst)
    48.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Fri Feb 09 14:43:22 2007 -0600
    48.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Wed Feb 14 19:01:35 2007 +0000
    48.3 @@ -1416,7 +1416,7 @@ class XendDomainInfo:
    48.4                                        self.info['image'],
    48.5                                        self.info['devices'])
    48.6  
    48.7 -            localtime = self.info.get('localtime', False)
    48.8 +            localtime = self.info.get('platform_localtime', False)
    48.9              if localtime:
   48.10                  xc.domain_set_time_offset(self.domid)
   48.11  
   48.12 @@ -1565,19 +1565,54 @@ class XendDomainInfo:
   48.13      # VM Destroy
   48.14      # 
   48.15  
   48.16 +    def _prepare_phantom_paths(self):
   48.17 +        # get associated devices to destroy
   48.18 +        # build list of phantom devices to be removed after normal devices
   48.19 +        plist = []
   48.20 +        from xen.xend.xenstore.xstransact import xstransact
   48.21 +        t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
   48.22 +        for dev in t.list():
   48.23 +            backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
   48.24 +                                  % (self.dompath, dev))
   48.25 +            if backend_phantom_vbd is not None:
   48.26 +                frontend_phantom_vbd =  xstransact.Read("%s/frontend" \
   48.27 +                                  % backend_phantom_vbd)
   48.28 +                plist.append(backend_phantom_vbd)
   48.29 +                plist.append(frontend_phantom_vbd)
   48.30 +        return plist
   48.31 +
   48.32 +    def _cleanup_phantom_devs(self, plist):
   48.33 +        # remove phantom devices
   48.34 +        if not plist == []:
   48.35 +            time.sleep(2)
   48.36 +        for paths in plist:
   48.37 +            if paths.find('backend') != -1:
   48.38 +                from xen.xend.server import DevController
   48.39 +                # Modify online status /before/ updating state (latter is watched by
   48.40 +                # drivers, so this ordering avoids a race).
   48.41 +                xstransact.Write(paths, 'online', "0")
   48.42 +                xstransact.Write(paths, 'state', str(DevController.xenbusState['Closing']))
   48.43 +            # force
   48.44 +            xstransact.Remove(paths)
   48.45 +
   48.46      def destroy(self):
   48.47          """Cleanup VM and destroy domain.  Nothrow guarantee."""
   48.48  
   48.49          log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
   48.50  
   48.51 +        paths = self._prepare_phantom_paths()
   48.52 +
   48.53          self._cleanupVm()
   48.54          if self.dompath is not None:
   48.55              self.destroyDomain()
   48.56  
   48.57 +        self._cleanup_phantom_devs(paths)
   48.58  
   48.59      def destroyDomain(self):
   48.60          log.debug("XendDomainInfo.destroyDomain(%s)", str(self.domid))
   48.61  
   48.62 +        paths = self._prepare_phantom_paths()
   48.63 +
   48.64          try:
   48.65              if self.domid is not None:
   48.66                  xc.domain_destroy(self.domid)
   48.67 @@ -1591,7 +1626,7 @@ class XendDomainInfo:
   48.68          XendDomain.instance().remove_domain(self)
   48.69  
   48.70          self.cleanupDomain()
   48.71 -
   48.72 +        self._cleanup_phantom_devs(paths)
   48.73  
   48.74      def resumeDomain(self):
   48.75          log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
   48.76 @@ -2211,6 +2246,25 @@ class XendDomainInfo:
   48.77  
   48.78          return dev_uuid
   48.79  
   48.80 +    def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
   48.81 +        """Create a VBD using a VDI from XendStorageRepository.
   48.82 +
   48.83 +        @param xenapi_vbd: vbd struct from the Xen API
   48.84 +        @param vdi_image_path: VDI UUID
   48.85 +        @rtype: string
   48.86 +        @return: uuid of the device
   48.87 +        """
   48.88 +        xenapi_vbd['image'] = vdi_image_path
   48.89 +        dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
   48.90 +        if not dev_uuid:
   48.91 +            raise XendError('Failed to create device')
   48.92 +
   48.93 +        if self.state == XEN_API_VM_POWER_STATE_RUNNING:
   48.94 +            _, config = self.info['devices'][dev_uuid]
   48.95 +            config['devid'] = self.getDeviceController('tap').createDevice(config)
   48.96 +
   48.97 +        return config['devid']
   48.98 +
   48.99      def create_vif(self, xenapi_vif):
  48.100          """Create VIF device from the passed struct in Xen API format.
  48.101  
    49.1 --- a/tools/python/xen/xend/server/BlktapController.py	Fri Feb 09 14:43:22 2007 -0600
    49.2 +++ b/tools/python/xen/xend/server/BlktapController.py	Wed Feb 14 19:01:35 2007 +0000
    49.3 @@ -2,7 +2,10 @@
    49.4  
    49.5  
    49.6  from xen.xend.server.blkif import BlkifController
    49.7 +from xen.xend.XendLogging import log
    49.8  
    49.9 +phantomDev = 0;
   49.10 +phantomId = 0;
   49.11  
   49.12  class BlktapController(BlkifController):
   49.13      def __init__(self, vm):
   49.14 @@ -12,3 +15,62 @@ class BlktapController(BlkifController):
   49.15          """@see DevController#frontendRoot"""
   49.16          
   49.17          return "%s/device/vbd" % self.vm.getDomainPath()
   49.18 +
   49.19 +    def getDeviceDetails(self, config):
   49.20 +        (devid, back, front) = BlkifController.getDeviceDetails(self, config)
   49.21 +
   49.22 +        phantomDevid = 0
   49.23 +        wrapped = False
   49.24 +
   49.25 +        try:
   49.26 +            imagetype = self.vm.info['image']['type']
   49.27 +        except:
   49.28 +            imagetype = ""
   49.29 +
   49.30 +        if imagetype == 'hvm':
   49.31 +            tdevname = back['dev']
   49.32 +            index = ['c', 'd', 'e', 'f', 'g', 'h', 'i', \
   49.33 +                     'j', 'l', 'm', 'n', 'o', 'p']
   49.34 +            while True:
   49.35 +                global phantomDev
   49.36 +                global phantomId
   49.37 +                import os, stat
   49.38 +
   49.39 +                phantomId = phantomId + 1
   49.40 +                if phantomId == 16:
   49.41 +                    if index[phantomDev] == index[-1]:
   49.42 +                        if wrapped:
   49.43 +                            raise VmError(" No loopback block \
   49.44 +                                       devices are available. ")
   49.45 +                        wrapped = True
   49.46 +                        phantomDev = 0
   49.47 +                    else:
   49.48 +                        phantomDev = phantomDev + 1
   49.49 +                    phantomId = 1
   49.50 +                devname = 'xvd%s%d' % (index[phantomDev], phantomId)
   49.51 +                try:
   49.52 +                    info = os.stat('/dev/%s' % devname)
   49.53 +                except:
   49.54 +                    break
   49.55 +
   49.56 +            vbd = { 'mode': 'w', 'device': devname }
   49.57 +            fn = 'tap:%s' % back['params']
   49.58 +
   49.59 +            # recurse ... by creating the vbd, then fallthrough
   49.60 +            # and finish creating the original device
   49.61 +
   49.62 +            from xen.xend import XendDomain
   49.63 +            dom0 = XendDomain.instance().privilegedDomain()
   49.64 +            phantomDevid = dom0.create_phantom_vbd_with_vdi(vbd, fn)
   49.65 +            # we need to wait for this device at a higher level
   49.66 +            # the vbd that gets created will have a link to us
   49.67 +            # and will let them do it there
   49.68 +
   49.69 +        # add a hook to point to the phantom device,
   49.70 +        # root path is always the same (dom0 tap)
   49.71 +        if phantomDevid != 0:
   49.72 +            front['phantom_vbd'] = '/local/domain/0/backend/tap/0/%s' \
   49.73 +                                   % str(phantomDevid)
   49.74 +
   49.75 +        return (devid, back, front)
   49.76 +
    50.1 --- a/tools/python/xen/xend/server/DevController.py	Fri Feb 09 14:43:22 2007 -0600
    50.2 +++ b/tools/python/xen/xend/server/DevController.py	Wed Feb 14 19:01:35 2007 +0000
    50.3 @@ -153,9 +153,9 @@ class DevController:
    50.4          log.debug("Waiting for %s.", devid)
    50.5  
    50.6          if not self.hotplug:
    50.7 -            return 
    50.8 -        
    50.9 -        status = self.waitForBackend(devid)
   50.10 +            return
   50.11 +
   50.12 +        (status, err) = self.waitForBackend(devid)
   50.13  
   50.14          if status == Timeout:
   50.15              self.destroyDevice(devid, False)
   50.16 @@ -165,25 +165,22 @@ class DevController:
   50.17  
   50.18          elif status == Error:
   50.19              self.destroyDevice(devid, False)
   50.20 -            raise VmError("Device %s (%s) could not be connected. "
   50.21 -                          "Backend device not found." %
   50.22 -                          (devid, self.deviceClass))
   50.23 -
   50.24 +            if err is None:
   50.25 +                raise VmError("Device %s (%s) could not be connected. "
   50.26 +                              "Backend device not found." %
   50.27 +                              (devid, self.deviceClass))
   50.28 +            else:
   50.29 +                raise VmError("Device %s (%s) could not be connected. "
   50.30 +                              "%s" % (devid, self.deviceClass, err))
   50.31          elif status == Missing:
   50.32              # Don't try to destroy the device; it's already gone away.
   50.33              raise VmError("Device %s (%s) could not be connected. "
   50.34                            "Device not found." % (devid, self.deviceClass))
   50.35  
   50.36          elif status == Busy:
   50.37 -            err = None
   50.38 -            frontpath = self.frontendPath(devid)
   50.39 -            backpath = xstransact.Read(frontpath, "backend")
   50.40 -            if backpath:
   50.41 -                err = xstransact.Read(backpath, HOTPLUG_ERROR_NODE)
   50.42 -            if not err:
   50.43 +            self.destroyDevice(devid, False)
   50.44 +            if err is None:
   50.45                  err = "Busy."
   50.46 -                
   50.47 -            self.destroyDevice(devid, False)
   50.48              raise VmError("Device %s (%s) could not be connected.\n%s" %
   50.49                            (devid, self.deviceClass, err))
   50.50  
   50.51 @@ -476,19 +473,36 @@ class DevController:
   50.52      def waitForBackend(self, devid):
   50.53  
   50.54          frontpath = self.frontendPath(devid)
   50.55 +        # lookup a phantom 
   50.56 +        phantomPath = xstransact.Read(frontpath, 'phantom_vbd')
   50.57 +        if phantomPath is not None:
   50.58 +            log.debug("Waiting for %s's phantom %s.", devid, phantomPath)
   50.59 +            statusPath = phantomPath + '/' + HOTPLUG_STATUS_NODE
   50.60 +            ev = Event()
   50.61 +            result = { 'status': Timeout }
   50.62 +            xswatch(statusPath, hotplugStatusCallback, ev, result)
   50.63 +            ev.wait(DEVICE_CREATE_TIMEOUT)
   50.64 +            err = xstransact.Read(statusPath, HOTPLUG_ERROR_NODE)
   50.65 +            if result['status'] != 'Connected':
   50.66 +                return (result['status'], err)
   50.67 +            
   50.68          backpath = xstransact.Read(frontpath, "backend")
   50.69  
   50.70 +
   50.71          if backpath:
   50.72              statusPath = backpath + '/' + HOTPLUG_STATUS_NODE
   50.73              ev = Event()
   50.74              result = { 'status': Timeout }
   50.75 -            
   50.76 +
   50.77              xswatch(statusPath, hotplugStatusCallback, ev, result)
   50.78  
   50.79              ev.wait(DEVICE_CREATE_TIMEOUT)
   50.80 -            return result['status']
   50.81 +
   50.82 +            err = xstransact.Read(backpath, HOTPLUG_ERROR_NODE)
   50.83 +
   50.84 +            return (result['status'], err)
   50.85          else:
   50.86 -            return Missing
   50.87 +            return (Missing, None)
   50.88  
   50.89  
   50.90      def backendPath(self, backdom, devid):
    51.1 --- a/tools/xcutils/xc_restore.c	Fri Feb 09 14:43:22 2007 -0600
    51.2 +++ b/tools/xcutils/xc_restore.c	Wed Feb 14 19:01:35 2007 +0000
    51.3 @@ -18,14 +18,14 @@
    51.4  int
    51.5  main(int argc, char **argv)
    51.6  {
    51.7 -    unsigned int xc_fd, io_fd, domid, nr_pfns, store_evtchn, console_evtchn;
    51.8 +    unsigned int xc_fd, io_fd, domid, max_pfn, store_evtchn, console_evtchn;
    51.9      unsigned int hvm, pae, apic;
   51.10      int ret;
   51.11      unsigned long store_mfn, console_mfn;
   51.12  
   51.13      if (argc != 9)
   51.14  	errx(1,
   51.15 -	     "usage: %s iofd domid nr_pfns store_evtchn console_evtchn hvm pae apic",
   51.16 +	     "usage: %s iofd domid max_pfn store_evtchn console_evtchn hvm pae apic",
   51.17  	     argv[0]);
   51.18  
   51.19      xc_fd = xc_interface_open();
   51.20 @@ -34,7 +34,7 @@ main(int argc, char **argv)
   51.21  
   51.22      io_fd = atoi(argv[1]);
   51.23      domid = atoi(argv[2]);
   51.24 -    nr_pfns = atoi(argv[3]);
   51.25 +    max_pfn = atoi(argv[3]);
   51.26      store_evtchn = atoi(argv[4]);
   51.27      console_evtchn = atoi(argv[5]);
   51.28      hvm  = atoi(argv[6]);
   51.29 @@ -44,15 +44,16 @@ main(int argc, char **argv)
   51.30      if (hvm) {
   51.31           /* pass the memsize to xc_hvm_restore to find the store_mfn */
   51.32          store_mfn = hvm;
   51.33 -        ret = xc_hvm_restore(xc_fd, io_fd, domid, nr_pfns, store_evtchn,
   51.34 -                &store_mfn, console_evtchn, &console_mfn, pae, apic);
   51.35 +        ret = xc_hvm_restore(xc_fd, io_fd, domid, max_pfn, store_evtchn,
   51.36 +                &store_mfn, pae, apic);
   51.37      } else 
   51.38 -        ret = xc_linux_restore(xc_fd, io_fd, domid, nr_pfns, store_evtchn,
   51.39 +        ret = xc_linux_restore(xc_fd, io_fd, domid, max_pfn, store_evtchn,
   51.40                  &store_mfn, console_evtchn, &console_mfn);
   51.41  
   51.42      if (ret == 0) {
   51.43  	printf("store-mfn %li\n", store_mfn);
   51.44 -	printf("console-mfn %li\n", console_mfn);
   51.45 +        if (!hvm)
   51.46 +            printf("console-mfn %li\n", console_mfn);
   51.47  	fflush(stdout);
   51.48      }
   51.49  
    52.1 --- a/tools/xenfb/vncfb.c	Fri Feb 09 14:43:22 2007 -0600
    52.2 +++ b/tools/xenfb/vncfb.c	Wed Feb 14 19:01:35 2007 +0000
    52.3 @@ -57,7 +57,8 @@ unsigned char keycode_table[512];
    52.4  static void *kbd_layout;
    52.5  
    52.6  static int btnmap[] = {
    52.7 -	BTN_LEFT, BTN_MIDDLE, BTN_RIGHT, BTN_FORWARD, BTN_BACK
    52.8 +	BTN_LEFT, BTN_MIDDLE, BTN_RIGHT, BTN_SIDE,
    52.9 +	BTN_EXTRA, BTN_FORWARD, BTN_BACK, BTN_TASK
   52.10  };
   52.11  
   52.12  static void on_kbd_event(rfbBool down, rfbKeySym keycode, rfbClientPtr cl)
   52.13 @@ -73,11 +74,12 @@ static void on_kbd_event(rfbBool down, r
   52.14  	 */
   52.15  	rfbScreenInfoPtr server = cl->screen;
   52.16  	struct xenfb *xenfb = server->screenData;
   52.17 +	int scancode;
   52.18  
   52.19 -	if( keycode >= 'A' && keycode <= 'Z' )
   52.20 +	if (keycode >= 'A' && keycode <= 'Z')
   52.21  		keycode += 'a' - 'A';
   52.22  
   52.23 -	int scancode = keycode_table[keysym2scancode(kbd_layout, keycode)];
   52.24 +	scancode = keycode_table[keysym2scancode(kbd_layout, keycode)];
   52.25  	if (scancode == 0)
   52.26  		return;
   52.27  	if (xenfb_send_key(xenfb, down, scancode) < 0)
    53.1 --- a/tools/xentrace/xentrace_format	Fri Feb 09 14:43:22 2007 -0600
    53.2 +++ b/tools/xentrace/xentrace_format	Wed Feb 14 19:01:35 2007 +0000
    53.3 @@ -107,6 +107,9 @@ while not interrupted:
    53.4  
    53.5          (tsc, event, d1, d2, d3, d4, d5) = struct.unpack(TRCREC, line)
    53.6  
    53.7 +        # Event field is 'uint32_t', not 'long'.
    53.8 +        event &= 0xffffffff
    53.9 +
   53.10  	#tsc = (tscH<<32) | tscL
   53.11  
   53.12  	#print i, tsc
    54.1 --- a/unmodified_drivers/linux-2.6/overrides.mk	Fri Feb 09 14:43:22 2007 -0600
    54.2 +++ b/unmodified_drivers/linux-2.6/overrides.mk	Wed Feb 14 19:01:35 2007 +0000
    54.3 @@ -6,3 +6,6 @@
    54.4  # a Xen kernel to find the right headers)
    54.5  EXTRA_CFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030202
    54.6  EXTRA_CFLAGS += -I$(M)/include -I$(M)/compat-include -DHAVE_XEN_PLATFORM_COMPAT_H
    54.7 +ifeq ($(ARCH),ia64)
    54.8 +  EXTRA_CFLAGS += -DCONFIG_VMX_GUEST
    54.9 +endif
    55.1 --- a/unmodified_drivers/linux-2.6/platform-pci/platform-compat.c	Fri Feb 09 14:43:22 2007 -0600
    55.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-compat.c	Wed Feb 14 19:01:35 2007 +0000
    55.3 @@ -13,6 +13,12 @@ static int system_state = 1;
    55.4  EXPORT_SYMBOL(system_state);
    55.5  #endif
    55.6  
    55.7 +static inline void ctrl_alt_del(void)
    55.8 +{
    55.9 +	kill_proc(1, SIGINT, 1); /* interrupt init */
   55.10 +}
   55.11 +EXPORT_SYMBOL(ctrl_alt_del);
   55.12 +
   55.13  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8)
   55.14  size_t strcspn(const char *s, const char *reject)
   55.15  {
    56.1 --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c	Fri Feb 09 14:43:22 2007 -0600
    56.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c	Wed Feb 14 19:01:35 2007 +0000
    56.3 @@ -118,14 +118,10 @@ unsigned long alloc_xen_mmio(unsigned lo
    56.4  {
    56.5  	unsigned long addr;
    56.6  
    56.7 -	addr = 0;
    56.8 -	if (platform_mmio_alloc + len <= platform_mmiolen)
    56.9 -	{
   56.10 -		addr = platform_mmio + platform_mmio_alloc;
   56.11 -		platform_mmio_alloc += len;
   56.12 -	} else {
   56.13 -		panic("ran out of xen mmio space");
   56.14 -	}
   56.15 +	addr = platform_mmio + platform_mmio_alloc;
   56.16 +	platform_mmio_alloc += len;
   56.17 +	BUG_ON(platform_mmio_alloc > platform_mmiolen);
   56.18 +
   56.19  	return addr;
   56.20  }
   56.21  
   56.22 @@ -181,23 +177,19 @@ static int get_hypercall_stubs(void)
   56.23  
   56.24  static uint64_t get_callback_via(struct pci_dev *pdev)
   56.25  {
   56.26 +	u8 pin;
   56.27 +	int irq;
   56.28 +
   56.29  #ifdef __ia64__
   56.30 -	int irq, rid;
   56.31  	for (irq = 0; irq < 16; irq++) {
   56.32  		if (isa_irq_to_vector(irq) == pdev->irq)
   56.33 -			return irq;
   56.34 +			return irq; /* ISA IRQ */
   56.35  	}
   56.36 -	/* use Requester-ID as callback_irq */
   56.37 -	/* RID: '<#bus(8)><#dev(5)><#func(3)>' (cf. PCI-Express spec) */
   56.38 -	rid = ((pdev->bus->number & 0xff) << 8) | pdev->devfn;
   56.39 -	printk(KERN_INFO DRV_NAME ":use Requester-ID(%04x) as callback irq\n",
   56.40 -	       rid);
   56.41 -	return rid | IA64_CALLBACK_IRQ_RID;
   56.42  #else /* !__ia64__ */
   56.43 -	u8 pin;
   56.44 -
   56.45 -	if (pdev->irq < 16)
   56.46 -		return pdev->irq; /* ISA IRQ */
   56.47 +	irq = pdev->irq;
   56.48 +	if (irq < 16)
   56.49 +		return irq; /* ISA IRQ */
   56.50 +#endif
   56.51  
   56.52  #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
   56.53  	pin = pdev->pin;
   56.54 @@ -211,7 +203,6 @@ static uint64_t get_callback_via(struct 
   56.55  		((uint64_t)pdev->bus->number << 16) |
   56.56  		((uint64_t)(pdev->devfn & 0xff) << 8) |
   56.57  		((uint64_t)(pin - 1) & 3));
   56.58 -#endif
   56.59  }
   56.60  
   56.61  /* Invalidate foreign mappings (e.g., in qemu-based device model). */
    57.1 --- a/unmodified_drivers/linux-2.6/platform-pci/xen_support.c	Fri Feb 09 14:43:22 2007 -0600
    57.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/xen_support.c	Wed Feb 14 19:01:35 2007 +0000
    57.3 @@ -30,6 +30,23 @@
    57.4  #include <xen/platform-compat.h>
    57.5  #endif
    57.6  
    57.7 +#if defined (__ia64__)
    57.8 +unsigned long __hypercall(unsigned long a1, unsigned long a2,
    57.9 +			  unsigned long a3, unsigned long a4,
   57.10 +			  unsigned long a5, unsigned long cmd)
   57.11 +{
   57.12 +	unsigned long __res;
   57.13 +	__asm__ __volatile__ (";;\n"
   57.14 +		"mov r2=%1\n"
   57.15 +		"break 0x1000 ;;\n"
   57.16 +		"mov %0=r8 ;;\n"
   57.17 +		: "=r"(__res) : "r"(cmd) : "r2", "r8", "memory");
   57.18 +
   57.19 +	return __res;
   57.20 +}
   57.21 +EXPORT_SYMBOL(__hypercall);
   57.22 +#endif
   57.23 +
   57.24  void xen_machphys_update(unsigned long mfn, unsigned long pfn)
   57.25  {
   57.26  	BUG();
    58.1 --- a/xen/arch/ia64/Rules.mk	Fri Feb 09 14:43:22 2007 -0600
    58.2 +++ b/xen/arch/ia64/Rules.mk	Wed Feb 14 19:01:35 2007 +0000
    58.3 @@ -4,7 +4,6 @@
    58.4  HAS_ACPI := y
    58.5  HAS_VGA  := y
    58.6  xenoprof := y
    58.7 -VALIDATE_VT	?= n
    58.8  no_warns ?= n
    58.9  xen_ia64_expose_p2m	?= y
   58.10  xen_ia64_pervcpu_vhpt	?= y
   58.11 @@ -37,9 +36,6 @@ CFLAGS	+= -DIA64 -DXEN -DLINUX_2_6
   58.12  CFLAGS	+= -ffixed-r13 -mfixed-range=f2-f5,f12-f127
   58.13  CFLAGS	+= -g
   58.14  #CFLAGS  += -DVTI_DEBUG
   58.15 -ifeq ($(VALIDATE_VT),y)
   58.16 -CFLAGS  += -DVALIDATE_VT
   58.17 -endif
   58.18  ifeq ($(xen_ia64_expose_p2m),y)
   58.19  CFLAGS	+= -DCONFIG_XEN_IA64_EXPOSE_P2M
   58.20  endif
    59.1 --- a/xen/arch/ia64/asm-offsets.c	Fri Feb 09 14:43:22 2007 -0600
    59.2 +++ b/xen/arch/ia64/asm-offsets.c	Wed Feb 14 19:01:35 2007 +0000
    59.3 @@ -57,6 +57,7 @@ void foo(void)
    59.4  
    59.5  	DEFINE(IA64_VCPU_DOMAIN_OFFSET, offsetof (struct vcpu, domain));
    59.6  	DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu, arch.hypercall_continuation));
    59.7 +	DEFINE(IA64_VCPU_FP_PSR_OFFSET, offsetof (struct vcpu, arch.fp_psr));
    59.8  	DEFINE(IA64_VCPU_META_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_rr0));
    59.9  	DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
   59.10  	DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
   59.11 @@ -199,6 +200,7 @@ void foo(void)
   59.12  	DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs));
   59.13  	DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs));
   59.14   	DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0]));
   59.15 +	DEFINE(IA64_VPD_VPTA_OFFSET, offsetof (struct mapped_regs, pta));
   59.16  	DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
   59.17  	DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
   59.18  
    60.1 --- a/xen/arch/ia64/asm-xsi-offsets.c	Fri Feb 09 14:43:22 2007 -0600
    60.2 +++ b/xen/arch/ia64/asm-xsi-offsets.c	Wed Feb 14 19:01:35 2007 +0000
    60.3 @@ -61,6 +61,8 @@ void foo(void)
    60.4  	DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
    60.5  	DEFINE_MAPPED_REG_OFS(XSI_ITV_OFS, itv);
    60.6  	DEFINE_MAPPED_REG_OFS(XSI_PTA_OFS, pta);
    60.7 +	DEFINE_MAPPED_REG_OFS(XSI_VPSR_DFH_OFS, vpsr_dfh);
    60.8 +	DEFINE_MAPPED_REG_OFS(XSI_HPSR_DFH_OFS, hpsr_dfh);
    60.9  	DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
   60.10  	DEFINE_MAPPED_REG_OFS(XSI_VPSR_PP_OFS, vpsr_pp);
   60.11  	DEFINE_MAPPED_REG_OFS(XSI_METAPHYS_OFS, metaphysical_mode);
    61.1 --- a/xen/arch/ia64/linux-xen/head.S	Fri Feb 09 14:43:22 2007 -0600
    61.2 +++ b/xen/arch/ia64/linux-xen/head.S	Wed Feb 14 19:01:35 2007 +0000
    61.3 @@ -267,13 +267,8 @@ start_ap:
    61.4  	/*
    61.5  	 * Switch into virtual mode:
    61.6  	 */
    61.7 -#if defined(XEN) && defined(VALIDATE_VT)
    61.8 -	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH\
    61.9 -		  |IA64_PSR_DI)
   61.10 -#else
   61.11  	movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
   61.12  		  |IA64_PSR_DI)
   61.13 -#endif
   61.14  	;;
   61.15  	mov cr.ipsr=r16
   61.16  	movl r17=1f
   61.17 @@ -292,11 +287,7 @@ 1:	// now we are in virtual mode
   61.18  	;;
   61.19  
   61.20  	// set IVT entry point---can't access I/O ports without it
   61.21 -#if defined(XEN) && defined(VALIDATE_VT)
   61.22 -	movl r3=vmx_ia64_ivt
   61.23 -#else
   61.24  	movl r3=ia64_ivt
   61.25 -#endif
   61.26  	;;
   61.27  	mov cr.iva=r3
   61.28  	movl r2=FPSR_DEFAULT
   61.29 @@ -368,15 +359,8 @@ 1:	// now we are in virtual mode
   61.30  	
   61.31  .load_current:
   61.32  	// load the "current" pointer (r13) and ar.k6 with the current task
   61.33 -#if defined(XEN) && defined(VALIDATE_VT)
   61.34 -	mov r21=r2
   61.35 -	;;
   61.36 -	bsw.1
   61.37 -	;;
   61.38 -#else
   61.39  	mov IA64_KR(CURRENT)=r2		// virtual address
   61.40  	mov IA64_KR(CURRENT_STACK)=r16
   61.41 -#endif
   61.42  	mov r13=r2
   61.43  	/*
   61.44  	 * Reserve space at the top of the stack for "struct pt_regs".  Kernel
    62.1 --- a/xen/arch/ia64/linux-xen/mca.c	Fri Feb 09 14:43:22 2007 -0600
    62.2 +++ b/xen/arch/ia64/linux-xen/mca.c	Wed Feb 14 19:01:35 2007 +0000
    62.3 @@ -84,6 +84,7 @@
    62.4  #include <xen/event.h>
    62.5  #include <xen/softirq.h>
    62.6  #include <asm/xenmca.h>
    62.7 +#include <linux/shutdown.h>
    62.8  #endif
    62.9  
   62.10  #if defined(IA64_MCA_DEBUG_INFO)
   62.11 @@ -684,16 +685,8 @@ fetch_min_state (pal_min_state_area_t *m
   62.12  #ifdef XEN
   62.13  static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
   62.14  static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
   62.15 -
   62.16 -static void
   62.17 -save_ksp (struct unw_frame_info *info, void *arg)
   62.18 -{
   62.19 -	current->arch._thread.ksp = (__u64)(info->sw) - 16;
   62.20 -	wmb();
   62.21 -}
   62.22 -
   62.23 -/* FIXME */
   62.24 -int try_crashdump(struct pt_regs *a) { return 0; }
   62.25 +static atomic_t num_stopped_cpus = ATOMIC_INIT(0);
   62.26 +extern void show_stack (struct task_struct *, unsigned long *);
   62.27  
   62.28  #define CPU_FLUSH_RETRY_MAX 5
   62.29  static void
   62.30 @@ -716,6 +709,35 @@ init_cache_flush (void)
   62.31  	}
   62.32  	printk("\nPAL cache flush failed. status=%ld\n",rval);
   62.33  }
   62.34 +
   62.35 +static void inline
   62.36 +save_ksp (struct unw_frame_info *info)
   62.37 +{
   62.38 +	current->arch._thread.ksp = (__u64)(info->sw) - 16;
   62.39 +	wmb();
   62.40 +	init_cache_flush();
   62.41 +}	
   62.42 +
   62.43 +static void
   62.44 +freeze_cpu_osinit (struct unw_frame_info *info, void *arg)
   62.45 +{
   62.46 +	save_ksp(info);
   62.47 +	atomic_inc(&num_stopped_cpus);
   62.48 +	printk("%s: CPU%d init handler done\n",
   62.49 +	       __FUNCTION__, smp_processor_id());
   62.50 +	for (;;)
   62.51 +		local_irq_disable();
   62.52 +}
   62.53 +
   62.54 +/* FIXME */
   62.55 +static void
   62.56 +try_crashdump(struct unw_frame_info *info, void *arg)
   62.57 +{ 
   62.58 +	save_ksp(info);
   62.59 +	printk("\nINIT dump complete.  Please reboot now.\n");
   62.60 +	for (;;)
   62.61 +		local_irq_disable();
   62.62 +}
   62.63  #endif /* XEN */
   62.64  
   62.65  static void
   62.66 @@ -741,7 +763,8 @@ init_handler_platform (pal_min_state_are
   62.67  	show_min_state(ms);
   62.68  
   62.69  #ifdef XEN
   62.70 -	printk("Backtrace of current vcpu (vcpu_id %d)\n", current->vcpu_id);
   62.71 +	printk("Backtrace of current vcpu (vcpu_id %d of domid %d)\n",
   62.72 +	       current->vcpu_id, current->domain->domain_id);
   62.73  #else
   62.74  	printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
   62.75  	fetch_min_state(ms, pt, sw);
   62.76 @@ -749,20 +772,35 @@ init_handler_platform (pal_min_state_are
   62.77  	unw_init_from_interruption(&info, current, pt, sw);
   62.78  	ia64_do_show_stack(&info, NULL);
   62.79  #ifdef XEN
   62.80 -	unw_init_running(save_ksp, NULL);
   62.81  	spin_unlock(&show_stack_lock);
   62.82 -	wmb();
   62.83 -	init_cache_flush();
   62.84  
   62.85  	if (spin_trylock(&init_dump_lock)) {
   62.86 +		struct domain *d;
   62.87 +		struct vcpu *v;
   62.88  #ifdef CONFIG_SMP
   62.89 -		udelay(5*1000000);
   62.90 +		int other_cpus = num_online_cpus() - 1;
   62.91 +		int wait = 1000 * other_cpus;
   62.92 +
   62.93 +		while ((atomic_read(&num_stopped_cpus) != other_cpus) && wait--)
   62.94 +			udelay(1000);
   62.95 +		if (other_cpus && wait < 0)
   62.96 +			printk("timeout %d\n", atomic_read(&num_stopped_cpus));
   62.97  #endif
   62.98 -		if (try_crashdump(pt) == 0)
   62.99 -			printk("\nINIT dump complete.  Please reboot now.\n");
  62.100 +		if (opt_noreboot) {
  62.101 +			/* this route is for dump routine */
  62.102 +			unw_init_running(try_crashdump, pt);
  62.103 +		} else {
  62.104 +			for_each_domain(d) {
  62.105 +				for_each_vcpu(d, v) {
  62.106 +					printk("Backtrace of current vcpu "
  62.107 +					       "(vcpu_id %d of domid %d)\n",
  62.108 +					       v->vcpu_id, d->domain_id);
  62.109 +					show_stack(v, NULL);
  62.110 +				}
  62.111 +			}
  62.112 +		}
  62.113  	}
  62.114 -	printk("%s: CPU%d init handler done\n",
  62.115 -	       __FUNCTION__, smp_processor_id());
  62.116 +	unw_init_running(freeze_cpu_osinit, NULL);
  62.117  #else /* XEN */
  62.118  #ifdef CONFIG_SMP
  62.119  	/* read_trylock() would be handy... */
    63.1 --- a/xen/arch/ia64/vmx/optvfault.S	Fri Feb 09 14:43:22 2007 -0600
    63.2 +++ b/xen/arch/ia64/vmx/optvfault.S	Wed Feb 14 19:01:35 2007 +0000
    63.3 @@ -15,6 +15,7 @@
    63.4  #include <asm/vmx_vpd.h>
    63.5  #include <asm/vmx_pal_vsa.h>
    63.6  #include <asm/asm-offsets.h>
    63.7 +#include <asm-ia64/vmx_mm_def.h>
    63.8  
    63.9  #define ACCE_MOV_FROM_AR
   63.10  #define ACCE_MOV_FROM_RR
   63.11 @@ -22,6 +23,7 @@
   63.12  #define ACCE_RSM
   63.13  #define ACCE_SSM
   63.14  #define ACCE_MOV_TO_PSR
   63.15 +#define ACCE_THASH
   63.16  
   63.17  //mov r1=ar3
   63.18  GLOBAL_ENTRY(vmx_asm_mov_from_ar)
   63.19 @@ -192,6 +194,13 @@ GLOBAL_ENTRY(vmx_asm_rsm)
   63.20      ;;   
   63.21      st8 [r17]=r19
   63.22      and r20=r20,r28
   63.23 +    adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
   63.24 +    ;;
   63.25 +    ld8 r27=[r27]
   63.26 +    ;;
   63.27 +    tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
   63.28 +    ;;
   63.29 +    (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
   63.30      ;;
   63.31      mov cr.ipsr=r20
   63.32      tbit.nz p6,p0=r23,0
   63.33 @@ -360,6 +369,14 @@ vmx_asm_mov_to_psr_1:
   63.34      add r20=r19,r20
   63.35      mov b0=r24
   63.36      ;;
   63.37 +    adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
   63.38 +    ;;
   63.39 +    ld8 r27=[r27]
   63.40 +    ;;
   63.41 +    tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
   63.42 +    ;;
   63.43 +    (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
   63.44 +    ;;
   63.45      mov cr.ipsr=r20
   63.46      cmp.ne p6,p0=r0,r0
   63.47      ;;
   63.48 @@ -403,6 +420,64 @@ ENTRY(vmx_asm_dispatch_vexirq)
   63.49      br.many vmx_dispatch_vexirq
   63.50  END(vmx_asm_dispatch_vexirq)
   63.51  
   63.52 +// thash
   63.53 +// TODO: add support when pta.vf = 1
   63.54 +GLOBAL_ENTRY(vmx_asm_thash)
   63.55 +#ifndef ACCE_THASH
   63.56 +    br.many vmx_virtualization_fault_back
   63.57 +#endif
   63.58 +    extr.u r17=r25,20,7		// get r3 from opcode in r25 
   63.59 +    extr.u r18=r25,6,7		// get r1 from opcode in r25
   63.60 +    movl r20=asm_mov_from_reg
   63.61 +    ;;
   63.62 +    adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20
   63.63 +    shladd r17=r17,4,r20	// get addr of MOVE_FROM_REG(r17)
   63.64 +    adds r16=IA64_VPD_BASE_OFFSET,r21	// get vcpu.arch.priveregs
   63.65 +    ;;
   63.66 +    mov r24=b0
   63.67 +    ;;
   63.68 +    ld8 r16=[r16]		// get VPD addr
   63.69 +    mov b0=r17
   63.70 +    br.many b0			// r19 return value
   63.71 +    ;;                                                     
   63.72 +vmx_asm_thash_back1:
   63.73 +    shr.u r23=r19,61		// get RR number
   63.74 +    adds r25=VCPU_VRR0_OFS,r21	// get vcpu->arch.arch_vmx.vrr[0]'s addr
   63.75 +    adds r16=IA64_VPD_VPTA_OFFSET,r16	// get vpta 
   63.76 +    ;;
   63.77 +    shladd r27=r23,3,r25	// get vcpu->arch.arch_vmx.vrr[r23]'s addr
   63.78 +    ld8 r17=[r16]		// get PTA
   63.79 +    mov r26=1
   63.80 +    ;;
   63.81 +    extr.u r29=r17,2,6		// get pta.size
   63.82 +    ld8 r25=[r27]		// get vcpu->arch.arch_vmx.vrr[r23]'s value
   63.83 +    ;;
   63.84 +    extr.u r25=r25,2,6		// get rr.ps
   63.85 +    shl r22=r26,r29		// 1UL << pta.size
   63.86 +    ;;
   63.87 +    shr.u r23=r19,r25		// vaddr >> rr.ps
   63.88 +    adds r26=3,r29		// pta.size + 3 
   63.89 +    shl r27=r17,3		// pta << 3 
   63.90 +    ;;
   63.91 +    shl r23=r23,3		// (vaddr >> rr.ps) << 3
   63.92 +    shr.u r27=r27,r26		// (pta << 3) >> (pta.size+3)
   63.93 +    movl r16=VRN_MASK
   63.94 +    ;;
   63.95 +    adds r22=-1,r22		// (1UL << pta.size) - 1
   63.96 +    shl r27=r27,r29		// ((pta<<3)>>(pta.size+3))<<pta.size
   63.97 +    and r19=r19,r16		// vaddr & VRN_MASK
   63.98 +    ;;
   63.99 +    and r22=r22,r23		// vhpt_offset 
  63.100 +    or r19=r19,r27		// (vadr&VRN_MASK) |(((pta<<3)>>(pta.size + 3))<<pta.size) 
  63.101 +    adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
  63.102 +    ;;
  63.103 +    or r19=r19,r22		// calc pval
  63.104 +    shladd r17=r18,4,r26
  63.105 +    adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
  63.106 +    ;;
  63.107 +    mov b0=r17
  63.108 +    br.many b0
  63.109 +END(vmx_asm_thash)
  63.110  
  63.111  #define MOV_TO_REG0	\
  63.112  {;			\
    64.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Fri Feb 09 14:43:22 2007 -0600
    64.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Wed Feb 14 19:01:35 2007 +0000
    64.3 @@ -171,7 +171,12 @@ static void vtm_timer_fn(void *data)
    64.4      } else
    64.5          vtm->pending = 1;
    64.6  
    64.7 -    update_last_itc(vtm, VCPU(vcpu, itm));  // update vITC
    64.8 +    /*
    64.9 +     * "+ 1" is for fixing oops message at timer_interrupt() on VTI guest. 
   64.10 +     * If oops checking condition changed to timer_after_eq() on VTI guest,
   64.11 +     * this parameter should be erased.
   64.12 +     */
   64.13 +    update_last_itc(vtm, VCPU(vcpu, itm) + 1);  // update vITC
   64.14  }
   64.15  
   64.16  void vtm_init(VCPU *vcpu)
    65.1 --- a/xen/arch/ia64/vmx/vmmu.c	Fri Feb 09 14:43:22 2007 -0600
    65.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Wed Feb 14 19:01:35 2007 +0000
    65.3 @@ -129,13 +129,15 @@ purge_machine_tc_by_domid(domid_t domid)
    65.4  #endif
    65.5  }
    65.6  
    65.7 -static void init_domain_vhpt(struct vcpu *v)
    65.8 +static int init_domain_vhpt(struct vcpu *v)
    65.9  {
   65.10      struct page_info *page;
   65.11      void * vbase;
   65.12      page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
   65.13      if ( page == NULL ) {
   65.14 -        panic_domain(vcpu_regs(v),"No enough contiguous memory for init_domain_vhpt\n");
   65.15 +        printk("No enough contiguous memory for init_domain_vhpt\n");
   65.16 +
   65.17 +        return -1;
   65.18      }
   65.19      vbase = page_to_virt(page);
   65.20      memset(vbase, 0, VCPU_VHPT_SIZE);
   65.21 @@ -147,18 +149,36 @@ static void init_domain_vhpt(struct vcpu
   65.22      VHPT(v,cch_sz) = VCPU_VHPT_SIZE - VHPT(v,hash_sz);
   65.23      thash_init(&(v->arch.vhpt),VCPU_VHPT_SHIFT-1);
   65.24      v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val;
   65.25 +
   65.26 +    return 0;
   65.27  }
   65.28  
   65.29  
   65.30 +static void free_domain_vhpt(struct vcpu *v)
   65.31 +{
   65.32 +    struct page_info *page;
   65.33  
   65.34 -void init_domain_tlb(struct vcpu *v)
   65.35 +    if (v->arch.vhpt.hash) {
   65.36 +        page = virt_to_page(v->arch.vhpt.hash);
   65.37 +        free_domheap_pages(page, VCPU_VHPT_ORDER);
   65.38 +    }
   65.39 +
   65.40 +    return;
   65.41 +}
   65.42 +
   65.43 +int init_domain_tlb(struct vcpu *v)
   65.44  {
   65.45      struct page_info *page;
   65.46      void * vbase;
   65.47 -    init_domain_vhpt(v);
   65.48 +
   65.49 +    if (init_domain_vhpt(v) != 0)
   65.50 +        return -1;
   65.51 +
   65.52      page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
   65.53      if ( page == NULL ) {
   65.54 -        panic_domain(vcpu_regs(v),"No enough contiguous memory for init_domain_tlb\n");
   65.55 +        printk("No enough contiguous memory for init_domain_tlb\n");
   65.56 +        free_domain_vhpt(v);
   65.57 +        return -1;
   65.58      }
   65.59      vbase = page_to_virt(page);
   65.60      memset(vbase, 0, VCPU_VTLB_SIZE);
   65.61 @@ -169,8 +189,11 @@ void init_domain_tlb(struct vcpu *v)
   65.62      VTLB(v,cch_buf) = (void *)((u64)vbase + VTLB(v,hash_sz));
   65.63      VTLB(v,cch_sz) = VCPU_VTLB_SIZE - VTLB(v,hash_sz);
   65.64      thash_init(&(v->arch.vtlb),VCPU_VTLB_SHIFT-1);
   65.65 +    
   65.66 +    return 0;
   65.67  }
   65.68  
   65.69 +
   65.70  void free_domain_tlb(struct vcpu *v)
   65.71  {
   65.72      struct page_info *page;
   65.73 @@ -179,10 +202,8 @@ void free_domain_tlb(struct vcpu *v)
   65.74          page = virt_to_page(v->arch.vtlb.hash);
   65.75          free_domheap_pages(page, VCPU_VTLB_ORDER);
   65.76      }
   65.77 -    if ( v->arch.vhpt.hash) {
   65.78 -        page = virt_to_page(v->arch.vhpt.hash);
   65.79 -        free_domheap_pages(page, VCPU_VHPT_ORDER);
   65.80 -    }
   65.81 +
   65.82 +    free_domain_vhpt(v);
   65.83  }
   65.84  
   65.85  /*
   65.86 @@ -553,7 +574,8 @@ static void ptc_ga_remote_func (void *va
   65.87      mpta = ia64_get_pta();
   65.88      ia64_set_pta(v->arch.arch_vmx.mpta&(~1));
   65.89      ia64_srlz_d();
   65.90 -    vmx_vcpu_ptc_l(v, REGION_OFFSET(vadr), args->ps);
   65.91 +    vadr = PAGEALIGN(vadr, args->ps);
   65.92 +    thash_purge_entries_remote(v, vadr, args->ps);
   65.93      VMX(v, vrr[0]) = oldrid; 
   65.94      VMX(v, psbits[0]) = oldpsbits;
   65.95      ia64_set_rr(0x0,moldrid);
    66.1 --- a/xen/arch/ia64/vmx/vmx_entry.S	Fri Feb 09 14:43:22 2007 -0600
    66.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S	Wed Feb 14 19:01:35 2007 +0000
    66.3 @@ -190,12 +190,8 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
    66.4      PT_REGS_UNWIND_INFO(0)
    66.5      rsm psr.i
    66.6      ;;
    66.7 -    alloc loc0=ar.pfs,0,1,1,0
    66.8 -    ;;
    66.9 -    adds out0=16,r12
   66.10      br.call.sptk.many b0=leave_hypervisor_tail
   66.11      ;;
   66.12 -    mov ar.pfs=loc0
   66.13      adds r20=PT(PR)+16,r12
   66.14      adds r8=PT(EML_UNAT)+16,r12
   66.15      ;;
   66.16 @@ -302,11 +298,9 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
   66.17      ;;
   66.18      mov ar.fpsr=r19
   66.19      mov ar.ccv=r18
   66.20 -    ;;
   66.21 -//rbs_switch
   66.22 -    
   66.23      shr.u r18=r20,16
   66.24      ;;
   66.25 +vmx_rbs_switch:    
   66.26      movl r19= THIS_CPU(ia64_phys_stacked_size_p8)
   66.27      ;;
   66.28      ld4 r19=[r19]
   66.29 @@ -368,7 +362,7 @@ vmx_rse_clear_invalid:
   66.30      ;;
   66.31      mov cr.ipsr=r31
   66.32      mov cr.iip=r30
   66.33 -    mov cr.ifs=r29
   66.34 +(pNonSys) mov cr.ifs=r29
   66.35      mov ar.pfs=r27
   66.36      adds r18=IA64_VPD_BASE_OFFSET,r21
   66.37      ;;
   66.38 @@ -425,148 +419,190 @@ ia64_vmm_entry_out:
   66.39  END(ia64_vmm_entry)
   66.40  
   66.41  
   66.42 -#ifdef XEN_DBL_MAPPING  /* will be removed */
   66.43 -
   66.44 -#define VMX_PURGE_RR7	0
   66.45 -#define VMX_INSERT_RR7	1
   66.46 -/*
   66.47 - * in0: old rr7
   66.48 - * in1: virtual address of xen image
   66.49 - * in2: virtual address of vhpt table
   66.50 - */
   66.51 -GLOBAL_ENTRY(vmx_purge_double_mapping)
   66.52 -    alloc loc1 = ar.pfs,5,9,0,0
   66.53 -    mov loc0 = rp
   66.54 -    movl r8 = 1f
   66.55 -    ;;
   66.56 -    movl loc4 = KERNEL_TR_PAGE_SHIFT
   66.57 -    movl loc5 = VCPU_TLB_SHIFT
   66.58 -    mov loc6 = psr
   66.59 -    movl loc7 = XEN_RR7_SWITCH_STUB
   66.60 -    mov loc8 = (1<<VMX_PURGE_RR7)
   66.61 -    ;;
   66.62 -    srlz.i
   66.63 -    ;;
   66.64 -    rsm psr.i | psr.ic
   66.65 -    ;;
   66.66 -    srlz.i
   66.67 -    ;;
   66.68 -    mov ar.rsc = 0
   66.69 -    mov b6 = loc7
   66.70 -    mov rp = r8
   66.71 -    ;;
   66.72 -    br.sptk b6
   66.73 -1:
   66.74 -    mov ar.rsc = 3
   66.75 -    mov rp = loc0
   66.76 -    ;;
   66.77 -    mov psr.l = loc6
   66.78 -    ;;
   66.79 -    srlz.i
   66.80 -    ;;
   66.81 -    br.ret.sptk rp
   66.82 -END(vmx_purge_double_mapping)
   66.83  
   66.84  /*
   66.85 - * in0: new rr7
   66.86 - * in1: virtual address of xen image
   66.87 - * in2: virtual address of vhpt table
   66.88 - * in3: pte entry of xen image
   66.89 - * in4: pte entry of vhpt table
   66.90 - */
   66.91 -GLOBAL_ENTRY(vmx_insert_double_mapping)
   66.92 -    alloc loc1 = ar.pfs,5,9,0,0
   66.93 -    mov loc0 = rp
   66.94 -    movl loc2 = IA64_TR_XEN_IN_DOM // TR number for xen image
   66.95 -    ;;
   66.96 -    movl loc3 = IA64_TR_VHPT_IN_DOM	// TR number for vhpt table
   66.97 -    movl r8 = 1f
   66.98 -    movl loc4 = KERNEL_TR_PAGE_SHIFT
   66.99 -    ;;
  66.100 -    movl loc5 = VCPU_TLB_SHIFT
  66.101 -    mov loc6 = psr
  66.102 -    movl loc7 = XEN_RR7_SWITCH_STUB
  66.103 -    ;;
  66.104 -    srlz.i
  66.105 -    ;;
  66.106 -    rsm psr.i | psr.ic
  66.107 -    mov loc8 = (1<<VMX_INSERT_RR7)
  66.108 -    ;;
  66.109 -    srlz.i
  66.110 -    ;;
  66.111 -    mov ar.rsc = 0
  66.112 -    mov b6 = loc7
  66.113 -    mov rp = r8
  66.114 -    ;;
  66.115 -    br.sptk b6
  66.116 -1:
  66.117 -    mov ar.rsc = 3
  66.118 -    mov rp = loc0
  66.119 -    ;;
  66.120 -    mov psr.l = loc6
  66.121 -    ;;
  66.122 -    srlz.i
  66.123 -    ;;
  66.124 -    br.ret.sptk rp
  66.125 -END(vmx_insert_double_mapping)
  66.126 -
  66.127 -    .align PAGE_SIZE
  66.128 -/*
  66.129 - * Stub to add double mapping for new domain, which shouldn't
  66.130 - * access any memory when active. Before reaching this point,
  66.131 - * both psr.i/ic is cleared and rse is set in lazy mode.
  66.132 + * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
  66.133 + *  need to switch to bank 0 and doesn't restore the scratch registers.
  66.134 + *  To avoid leaking kernel bits, the scratch registers are set to
  66.135 + *  the following known-to-be-safe values:
  66.136   *
  66.137 - * in0: new rr7
  66.138 - * in1: virtual address of xen image
  66.139 - * in2: virtual address of vhpt table
  66.140 - * in3: pte entry of xen image
  66.141 - * in4: pte entry of vhpt table
  66.142 - * loc2: TR number for xen image
  66.143 - * loc3: TR number for vhpt table
  66.144 - * loc4: page size for xen image
  66.145 - * loc5: page size of vhpt table
  66.146 - * loc7: free to use
  66.147 - * loc8: purge or insert
  66.148 - * r8: will contain old rid value
  66.149 + *        r1: restored (global pointer)
  66.150 + *        r2: cleared
  66.151 + *        r3: 1 (when returning to user-level)
  66.152 + *        r8-r11: restored (syscall return value(s))
  66.153 + *       r12: restored (user-level stack pointer)
  66.154 + *       r13: restored (user-level thread pointer)
  66.155 + *       r14: set to __kernel_syscall_via_epc
  66.156 + *       r15: restored (syscall #)
  66.157 + *       r16-r17: cleared
  66.158 + *       r18: user-level b6
  66.159 + *       r19: cleared
  66.160 + *       r20: user-level ar.fpsr
  66.161 + *       r21: user-level b0
  66.162 + *       r22: cleared
  66.163 + *       r23: user-level ar.bspstore
  66.164 + *       r24: user-level ar.rnat
  66.165 + *       r25: user-level ar.unat
  66.166 + *       r26: user-level ar.pfs
  66.167 + *       r27: user-level ar.rsc
  66.168 + *       r28: user-level ip
  66.169 + *       r29: user-level psr
  66.170 + *       r30: user-level cfm
  66.171 + *       r31: user-level pr
  66.172 + *        f6-f11: cleared
  66.173 + *        pr: restored (user-level pr)
  66.174 + *        b0: restored (user-level rp)
  66.175 + *        b6: restored
  66.176 + *        b7: set to __kernel_syscall_via_epc
  66.177 + *        ar.unat: restored (user-level ar.unat)
  66.178 + *        ar.pfs: restored (user-level ar.pfs)
  66.179 + *        ar.rsc: restored (user-level ar.rsc)
  66.180 + *        ar.rnat: restored (user-level ar.rnat)
  66.181 + *        ar.bspstore: restored (user-level ar.bspstore)
  66.182 + *        ar.fpsr: restored (user-level ar.fpsr)
  66.183 + *        ar.ccv: cleared
  66.184 + *        ar.csd: cleared
  66.185 + *        ar.ssd: cleared
  66.186   */
  66.187 -GLOBAL_ENTRY(vmx_switch_rr7)
  66.188 -    movl loc7 = (7<<61)
  66.189 -    dep.z loc4 = loc4, 2, 6
  66.190 -    dep.z loc5 = loc5, 2, 6
  66.191 +GLOBAL_ENTRY(ia64_leave_hypercall)
  66.192 +    PT_REGS_UNWIND_INFO(0)
  66.193 +    /*
  66.194 +     * work.need_resched etc. mustn't get changed by this CPU before it returns to
  66.195 +     * user- or fsys-mode, hence we disable interrupts early on.
  66.196 +     *
  66.197 +     * p6 controls whether current_thread_info()->flags needs to be check for
  66.198 +     * extra work.  We always check for extra work when returning to user-level.
  66.199 +     * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
  66.200 +     * is 0.  After extra work processing has been completed, execution
  66.201 +     * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
  66.202 +     * needs to be redone.
  66.203 +     */
  66.204 +(pUStk) rsm psr.i
  66.205 +    cmp.eq pLvSys,p0=r0,r0		// pLvSys=1: leave from syscall
  66.206 +(pUStk) cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
  66.207      ;;
  66.208 -    tbit.nz p6,p7=loc8, VMX_INSERT_RR7
  66.209 -    mov r8 = rr[loc7]
  66.210 +    br.call.sptk.many b0=leave_hypervisor_tail
  66.211 +.work_processed_syscall:
  66.212 +    //clean up bank 1 registers
  66.213 +    mov r16=r0
  66.214 +    mov r17=r0
  66.215 +    mov r18=r0
  66.216 +    mov r19=r0
  66.217 +    mov r20=r0
  66.218 +    mov r21=r0
  66.219 +    mov r22=r0
  66.220 +    mov r23=r0
  66.221 +    mov r24=r0
  66.222 +    mov r25=r0
  66.223 +    mov r26=r0
  66.224 +    mov r27=r0
  66.225 +    mov r28=r0
  66.226 +    mov r29=r0
  66.227 +    mov r30=r0
  66.228 +    mov r31=r0
  66.229 +    bsw.0
  66.230      ;;
  66.231 -    mov rr[loc7] = in0
  66.232 -(p6)mov cr.ifa = in1
  66.233 -(p6)mov cr.itir = loc4
  66.234 +    adds r2=PT(LOADRS)+16,r12
  66.235 +    adds r3=PT(AR_BSPSTORE)+16,r12
  66.236 +#ifndef XEN
  66.237 +    adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
  66.238 +    ;;
  66.239 +(p6) ld4 r31=[r18]				// load current_thread_info()->flags
  66.240 +#endif
  66.241 +    ;;
  66.242 +    ld8 r20=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"
  66.243 +    nop.i 0
  66.244      ;;
  66.245 -    srlz.i
  66.246 +//  mov r16=ar.bsp				// M2  get existing backing store pointer
  66.247 +    ld8 r18=[r2],PT(R9)-PT(B6)		// load b6
  66.248 +#ifndef XEN
  66.249 +(p6)    and r15=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
  66.250 +#endif
  66.251      ;;
  66.252 -(p6)itr.i itr[loc2] = in3
  66.253 -(p7)ptr.i in1, loc4
  66.254 +    ld8 r24=[r3],PT(R11)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)
  66.255 +#ifndef XEN
  66.256 +(p6)    cmp4.ne.unc p6,p0=r15, r0		// any special work pending?
  66.257 +(p6)    br.cond.spnt .work_pending_syscall
  66.258 +#endif
  66.259 +    ;;
  66.260 +    // start restoring the state saved on the kernel stack (struct pt_regs):
  66.261 +    ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
  66.262 +    ld8 r11=[r3],PT(CR_IIP)-PT(R11)
  66.263 +//(pNonSys) break 0		//      bug check: we shouldn't be here if pNonSys is TRUE!
  66.264      ;;
  66.265 -(p6)itr.d dtr[loc2] = in3
  66.266 -(p7)ptr.d in1, loc4
  66.267 +    invala			// M0|1 invalidate ALAT
  66.268 +    rsm psr.i | psr.ic	// M2   turn off interrupts and interruption collection
  66.269 +    cmp.eq p9,p0=r0,r0	// A    set p9 to indicate that we should restore cr.ifs
  66.270 +
  66.271 +    ld8 r31=[r2],32		// M0|1 load cr.ipsr
  66.272 +    ld8 r30=[r3],16		// M0|1 load cr.iip
  66.273 +    ;;
  66.274 +//  ld8 r29=[r2],16		// M0|1 load cr.ifs
  66.275 +    ld8 r28=[r3],16		// M0|1 load ar.unat
  66.276 +//(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
  66.277      ;;
  66.278 -    srlz.i
  66.279 +    ld8 r27=[r2],PT(B0)-PT(AR_PFS)	// M0|1 load ar.pfs
  66.280 +(pKStk) mov r22=psr			// M2   read PSR now that interrupts are disabled
  66.281 +    nop 0
  66.282 +    ;;
  66.283 +    ld8 r22=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
  66.284 +    ld8 r26=[r3],PT(PR)-PT(AR_RSC)	// M0|1 load ar.rsc
  66.285 +    mov f6=f0			// F    clear f6
  66.286      ;;
  66.287 -(p6)mov cr.ifa = in2
  66.288 -(p6)mov cr.itir = loc5
  66.289 +    ld8 r25=[r2],PT(AR_FPSR)-PT(AR_RNAT)	// M0|1 load ar.rnat (may be garbage)
  66.290 +    ld8 r23=[r3],PT(R1)-PT(PR)		// M0|1 load predicates
  66.291 +    mov f7=f0				// F    clear f7
  66.292 +    ;;
  66.293 +    ld8 r20=[r2],PT(R12)-PT(AR_FPSR)	// M0|1 load ar.fpsr
  66.294 +    ld8.fill r1=[r3],16			// M0|1 load r1
  66.295 +//(pUStk) mov r17=1				// A
  66.296      ;;
  66.297 -(p6)itr.d dtr[loc3] = in4
  66.298 -(p7)ptr.d in2, loc5
  66.299 +//(pUStk) st1 [r14]=r17				// M2|3
  66.300 +    ld8.fill r13=[r3],16			// M0|1
  66.301 +    mov f8=f0				// F    clear f8
  66.302      ;;
  66.303 -    srlz.i
  66.304 -    ;;
  66.305 -    mov rr[loc7] = r8
  66.306 +    ld8.fill r12=[r2]			// M0|1 restore r12 (sp)
  66.307 +#ifdef XEN    
  66.308 +    ld8.fill r2=[r3]			// M0|1
  66.309 +#else    
  66.310 +    ld8.fill r15=[r3]			// M0|1 restore r15
  66.311 +#endif    
  66.312 +    mov b6=r18				// I0   restore b6
  66.313 +    mov ar.fpsr=r20
  66.314 +//  addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
  66.315 +    mov f9=f0					// F    clear f9
  66.316 +//(pKStk) br.cond.dpnt.many skip_rbs_switch		// B
  66.317 +
  66.318 +//  srlz.d				// M0   ensure interruption collection is off (for cover)
  66.319 +//  shr.u r18=r19,16		// I0|1 get byte size of existing "dirty" partition
  66.320 +    mov r3=r21
  66.321 +    cover				// B    add current frame into dirty partition & set cr.ifs
  66.322      ;;
  66.323 -    srlz.i
  66.324 -    br.sptk rp
  66.325 -END(vmx_switch_rr7)
  66.326 -    .align PAGE_SIZE
  66.327 +//(pUStk) ld4 r17=[r17]			// M0|1 r17 = cpu_data->phys_stacked_size_p8
  66.328 +    mov r19=ar.bsp			// M2   get new backing store pointer
  66.329 +    addl r18=IA64_RBS_OFFSET, r3
  66.330 +    ;;
  66.331 +    mov r3=r0
  66.332 +    sub r18=r19,r18     // get byte size of existing "dirty" partition
  66.333 +    ;;
  66.334 +    shl r20=r18,16     // set rsc.load 
  66.335 +    mov f10=f0			// F    clear f10
  66.336 +#ifdef XEN
  66.337 +    mov r14=r0
  66.338 +#else
  66.339 +    movl r14=__kernel_syscall_via_epc // X
  66.340 +#endif
  66.341 +    ;;
  66.342 +    mov.m ar.csd=r0			// M2   clear ar.csd
  66.343 +    mov.m ar.ccv=r0			// M2   clear ar.ccv
  66.344 +    mov b7=r14			// I0   clear b7 (hint with __kernel_syscall_via_epc)
  66.345  
  66.346 -#else
  66.347 +    mov.m ar.ssd=r0			// M2   clear ar.ssd
  66.348 +    mov f11=f0			// F    clear f11
  66.349 +    br.cond.sptk.many vmx_rbs_switch	// B
  66.350 +END(ia64_leave_hypercall)
  66.351 +
  66.352 +
  66.353  /*
  66.354   * in0: new rr7
  66.355   * in1: virtual address of shared_info
  66.356 @@ -707,5 +743,3 @@ 1:
  66.357     srlz.d              // seralize restoration of psr.l
  66.358     br.ret.sptk.many rp
  66.359  END(vmx_switch_rr7)
  66.360 -#endif
  66.361 -
    67.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Fri Feb 09 14:43:22 2007 -0600
    67.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Wed Feb 14 19:01:35 2007 +0000
    67.3 @@ -290,7 +290,7 @@ static void vmx_release_assist_channel(s
    67.4   * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
    67.5   * is registered here.
    67.6   */
    67.7 -void
    67.8 +int
    67.9  vmx_final_setup_guest(struct vcpu *v)
   67.10  {
   67.11  	vpd_t *vpd;
   67.12 @@ -305,7 +305,8 @@ vmx_final_setup_guest(struct vcpu *v)
   67.13  	 * to this solution. Maybe it can be deferred until we know created
   67.14  	 * one as vmx domain */
   67.15  #ifndef HASH_VHPT
   67.16 -	init_domain_tlb(v);
   67.17 +	if (init_domain_tlb(v) != 0)
   67.18 +		return -1;
   67.19  #endif
   67.20  	vmx_create_event_channels(v);
   67.21  
   67.22 @@ -322,6 +323,8 @@ vmx_final_setup_guest(struct vcpu *v)
   67.23  
   67.24  	/* Set up guest 's indicator for VTi domain*/
   67.25  	set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags);
   67.26 +
   67.27 +	return 0;
   67.28  }
   67.29  
   67.30  void
    68.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c	Fri Feb 09 14:43:22 2007 -0600
    68.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c	Wed Feb 14 19:01:35 2007 +0000
    68.3 @@ -99,7 +99,7 @@ inject_guest_interruption(VCPU *vcpu, u6
    68.4      pt_isr.ir = 0;
    68.5      VMX(vcpu,cr_isr) = pt_isr.val;
    68.6      collect_interruption(vcpu);
    68.7 -
    68.8 +    vmx_ia64_set_dcr(vcpu);
    68.9      vmx_vcpu_get_iva(vcpu,&viva);
   68.10      regs->cr_iip = viva + vec;
   68.11  }
    69.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S	Fri Feb 09 14:43:22 2007 -0600
    69.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S	Wed Feb 14 19:01:35 2007 +0000
    69.3 @@ -59,6 +59,14 @@
    69.4  #include <asm/unistd.h>
    69.5  #include <asm/vhpt.h>
    69.6  #include <asm/virt_event.h>
    69.7 +#include <xen/errno.h>
    69.8 +
    69.9 +#if 1
   69.10 +# define PSR_DEFAULT_BITS   psr.ac
   69.11 +#else
   69.12 +# define PSR_DEFAULT_BITS   0
   69.13 +#endif
   69.14 +
   69.15  
   69.16  #ifdef VTI_DEBUG
   69.17    /*
   69.18 @@ -431,17 +439,152 @@ ENTRY(vmx_break_fault)
   69.19      VMX_DBG_FAULT(11)
   69.20      mov r31=pr
   69.21      mov r19=11
   69.22 -    mov r30=cr.iim
   69.23 +    mov r17=cr.iim
   69.24      ;;
   69.25  #ifdef VTI_DEBUG
   69.26      // break 0 is already handled in vmx_ia64_handle_break.
   69.27 -    cmp.eq p6,p7=r30,r0
   69.28 +    cmp.eq p6,p7=r17,r0
   69.29      (p6) br.sptk vmx_fault_11
   69.30      ;;
   69.31  #endif
   69.32 -    br.sptk.many vmx_dispatch_break_fault
   69.33 +    mov r29=cr.ipsr
   69.34 +    adds r22=IA64_VCPU_BREAKIMM_OFFSET, r21
   69.35 +    ;;
   69.36 +    ld4 r22=[r22]
   69.37 +    extr.u r24=r29,IA64_PSR_CPL0_BIT,2
   69.38 +    cmp.eq p0,p6=r0,r0
   69.39 +    ;;
   69.40 +    cmp.ne.or p6,p0=r22,r17
   69.41 +    cmp.ne.or p6,p0=r0,r24
   69.42 +(p6) br.sptk.many vmx_dispatch_break_fault
   69.43 +    ;;
   69.44 +   /*
   69.45 +    * The streamlined system call entry/exit paths only save/restore the initial part
   69.46 +    * of pt_regs.  This implies that the callers of system-calls must adhere to the
   69.47 +    * normal procedure calling conventions.
   69.48 +    *
   69.49 +    *   Registers to be saved & restored:
   69.50 +    *   CR registers: cr.ipsr, cr.iip, cr.ifs
   69.51 +    *   AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
   69.52 +    *   others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
   69.53 +    *   Registers to be restored only:
   69.54 +    *   r8-r11: output value from the system call.
   69.55 +    *
   69.56 +    * During system call exit, scratch registers (including r15) are modified/cleared
   69.57 +    * to prevent leaking bits from kernel to user level.
   69.58 +    */
   69.59 +   
   69.60 +//  mov.m r16=IA64_KR(CURRENT)		// M2 r16 <- current task (12 cyc)
   69.61 +    mov r14=r21
   69.62 +    bsw.1					// B (6 cyc) regs are saved, switch to bank 1
   69.63 +    ;;   
   69.64 +    mov r29=cr.ipsr				// M2 (12 cyc)
   69.65 +    mov r31=pr				// I0 (2 cyc)
   69.66 +    mov r16=r14
   69.67 +    mov r15=r2
   69.68 +
   69.69 +    mov r17=cr.iim				// M2 (2 cyc)
   69.70 +    mov.m r27=ar.rsc			// M2 (12 cyc)
   69.71 +//  mov r18=__IA64_BREAK_SYSCALL		// A
   69.72 +
   69.73 +    mov.m ar.rsc=0				// M2
   69.74 +    mov.m r21=ar.fpsr			// M2 (12 cyc)
   69.75 +    mov r19=b6				// I0 (2 cyc)
   69.76      ;;
   69.77 -    VMX_FAULT(11);
   69.78 +    mov.m r23=ar.bspstore			// M2 (12 cyc)
   69.79 +    mov.m r24=ar.rnat			// M2 (5 cyc)
   69.80 +    mov.i r26=ar.pfs			// I0 (2 cyc)
   69.81 +
   69.82 +    invala					// M0|1
   69.83 +    nop.m 0					// M
   69.84 +    mov r20=r1				// A			save r1
   69.85 +
   69.86 +    nop.m 0
   69.87 +//  movl r30=sys_call_table			// X
   69.88 +    movl r30=ia64_hypercall_table			// X
   69.89 +
   69.90 +    mov r28=cr.iip				// M2 (2 cyc)
   69.91 +//  cmp.eq p0,p7=r18,r17			// I0 is this a system call?
   69.92 +//(p7)  br.cond.spnt non_syscall		// B  no ->
   69.93 +   //
   69.94 +   // From this point on, we are definitely on the syscall-path
   69.95 +   // and we can use (non-banked) scratch registers.
   69.96 +   //
   69.97 +///////////////////////////////////////////////////////////////////////
   69.98 +    mov r1=r16				// A    move task-pointer to "addl"-addressable reg
   69.99 +    mov r2=r16				// A    setup r2 for ia64_syscall_setup
  69.100 +//  add r9=TI_FLAGS+IA64_TASK_SIZE,r16	// A	r9 = &current_thread_info()->flags
  69.101 +
  69.102 +//  adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
  69.103 +//  adds r15=-1024,r15			// A    subtract 1024 from syscall number
  69.104 +//  mov r3=NR_syscalls - 1
  69.105 +    mov r3=NR_hypercalls - 1
  69.106 +    ;;
  69.107 +//  ld1.bias r17=[r16]			// M0|1 r17 = current->thread.on_ustack flag
  69.108 +//  ld4 r9=[r9]				// M0|1 r9 = current_thread_info()->flags
  69.109 +    mov r9=r0               // force flags = 0
  69.110 +    extr.u r8=r29,41,2			// I0   extract ei field from cr.ipsr
  69.111 +
  69.112 +    shladd r30=r15,3,r30			// A    r30 = sys_call_table + 8*(syscall-1024)
  69.113 +    addl r22=IA64_RBS_OFFSET,r1		// A    compute base of RBS
  69.114 +    cmp.leu p6,p7=r15,r3			// A    syscall number in range?
  69.115 +    ;;
  69.116 +
  69.117 +    lfetch.fault.excl.nt1 [r22]		// M0|1 prefetch RBS
  69.118 +(p6) ld8 r30=[r30]				// M0|1 load address of syscall entry point
  69.119 +    tnat.nz.or p7,p0=r15			// I0	is syscall nr a NaT?
  69.120 +
  69.121 +    mov.m ar.bspstore=r22			// M2   switch to kernel RBS
  69.122 +    cmp.eq p8,p9=2,r8			// A    isr.ei==2?
  69.123 +    ;;
  69.124 +
  69.125 +(p8) mov r8=0				// A    clear ei to 0
  69.126 +//(p7)  movl r30=sys_ni_syscall			// X
  69.127 +(p7) movl r30=do_ni_hypercall			// X
  69.128 +
  69.129 +(p8) adds r28=16,r28				// A    switch cr.iip to next bundle
  69.130 +(p9) adds r8=1,r8				// A    increment ei to next slot
  69.131 +    nop.i 0
  69.132 +    ;;
  69.133 +
  69.134 +    mov.m r25=ar.unat			// M2 (5 cyc)
  69.135 +    dep r29=r8,r29,41,2			// I0   insert new ei into cr.ipsr
  69.136 +//  adds r15=1024,r15			// A    restore original syscall number
  69.137 +   //
  69.138 +   // If any of the above loads miss in L1D, we'll stall here until
  69.139 +   // the data arrives.
  69.140 +   //
  69.141 +///////////////////////////////////////////////////////////////////////
  69.142 +//    st1 [r16]=r0				// M2|3 clear current->thread.on_ustack flag
  69.143 +    mov b6=r30				// I0   setup syscall handler branch reg early
  69.144 +    cmp.ne pKStk,pUStk=r0,r0		// A    were we on kernel stacks already?
  69.145 +
  69.146 +//  and r9=_TIF_SYSCALL_TRACEAUDIT,r9	// A    mask trace or audit
  69.147 +    mov r18=ar.bsp				// M2 (12 cyc)
  69.148 +   ;;
  69.149 +(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A    compute base of memory stack
  69.150 +//  cmp.eq p14,p0=r9,r0			// A    are syscalls being traced/audited?
  69.151 +//    br.call.sptk.many b7=ia64_syscall_setup	// B
  69.152 +    br.call.sptk.many b7=ia64_hypercall_setup	// B
  69.153 +1:
  69.154 +    mov ar.rsc=0x3				// M2   set eager mode, pl 0, LE, loadrs=0
  69.155 +//    nop 0
  69.156 +//    bsw.1					// B (6 cyc) regs are saved, switch to bank 1
  69.157 +    ;;
  69.158 +    ssm psr.ic | PSR_DEFAULT_BITS		// M2	now it's safe to re-enable intr.-collection
  69.159 +//    movl r3=ia64_ret_from_syscall		// X
  69.160 +    movl r3=ia64_leave_hypercall		// X
  69.161 +    ;;
  69.162 +
  69.163 +    srlz.i					// M0   ensure interruption collection is on
  69.164 +    mov rp=r3				// I0   set the real return addr
  69.165 +    //(p10) br.cond.spnt.many ia64_ret_from_syscall	// B    return if bad call-frame or r15 is a NaT
  69.166 +    (p15)   ssm psr.i				// M2   restore psr.i
  69.167 +    //(p14) br.call.sptk.many b6=b6			// B    invoke syscall-handker (ignore return addr)
  69.168 +    br.call.sptk.many b6=b6			// B    invoke syscall-handker (ignore return addr)
  69.169 +//  br.cond.spnt.many ia64_trace_syscall	// B	do syscall-tracing thingamagic
  69.170 +   ;;
  69.171 +   VMX_FAULT(11)
  69.172  END(vmx_break_fault)
  69.173  
  69.174      .org vmx_ia64_ivt+0x3000
  69.175 @@ -613,6 +756,146 @@ END(vmx_virtual_exirq)
  69.176  // 0x3800 Entry 14 (size 64 bundles) Reserved
  69.177      VMX_DBG_FAULT(14)
  69.178      VMX_FAULT(14)
  69.179 +    // this code segment is from 2.6.16.13
  69.180 +    
  69.181 +    /*
  69.182 +     * There is no particular reason for this code to be here, other than that
  69.183 +     * there happens to be space here that would go unused otherwise.  If this
  69.184 +     * fault ever gets "unreserved", simply moved the following code to a more
  69.185 +     * suitable spot...
  69.186 +     *
  69.187 +     * ia64_syscall_setup() is a separate subroutine so that it can
  69.188 +     *	allocate stacked registers so it can safely demine any
  69.189 +     *	potential NaT values from the input registers.
  69.190 +     *
  69.191 +     * On entry:
  69.192 +     *	- executing on bank 0 or bank 1 register set (doesn't matter)
  69.193 +     *	-  r1: stack pointer
  69.194 +     *	-  r2: current task pointer
  69.195 +     *	-  r3: preserved
  69.196 +     *	- r11: original contents (saved ar.pfs to be saved)
  69.197 +     *	- r12: original contents (sp to be saved)
  69.198 +     *	- r13: original contents (tp to be saved)
  69.199 +     *	- r15: original contents (syscall # to be saved)
  69.200 +     *	- r18: saved bsp (after switching to kernel stack)
  69.201 +     *	- r19: saved b6
  69.202 +     *	- r20: saved r1 (gp)
  69.203 +     *	- r21: saved ar.fpsr
  69.204 +     *	- r22: kernel's register backing store base (krbs_base)
  69.205 +     *	- r23: saved ar.bspstore
  69.206 +     *	- r24: saved ar.rnat
  69.207 +     *	- r25: saved ar.unat
  69.208 +     *	- r26: saved ar.pfs
  69.209 +     *	- r27: saved ar.rsc
  69.210 +     *	- r28: saved cr.iip
  69.211 +     *	- r29: saved cr.ipsr
  69.212 +     *	- r31: saved pr
  69.213 +     *	-  b0: original contents (to be saved)
  69.214 +     * On exit:
  69.215 +     *	-  p10: TRUE if syscall is invoked with more than 8 out
  69.216 +     *		registers or r15's Nat is true
  69.217 +     *	-  r1: kernel's gp
  69.218 +     *	-  r3: preserved (same as on entry)
  69.219 +     *	-  r8: -EINVAL if p10 is true
  69.220 +     *	- r12: points to kernel stack
  69.221 +     *	- r13: points to current task
  69.222 +     *	- r14: preserved (same as on entry)
  69.223 +     *	- p13: preserved
  69.224 +     *	- p15: TRUE if interrupts need to be re-enabled
  69.225 +     *	- ar.fpsr: set to kernel settings
  69.226 +     *	-  b6: preserved (same as on entry)
  69.227 +     */
  69.228 +GLOBAL_ENTRY(ia64_hypercall_setup)
  69.229 +#if PT(B6) != 0
  69.230 +# error This code assumes that b6 is the first field in pt_regs.
  69.231 +#endif
  69.232 +    st8 [r1]=r19				// save b6
  69.233 +    add r16=PT(CR_IPSR),r1			// initialize first base pointer
  69.234 +    add r17=PT(R11),r1			// initialize second base pointer
  69.235 +    ;;
  69.236 +    alloc r19=ar.pfs,8,0,0,0		// ensure in0-in7 are writable
  69.237 +    st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR)	// save cr.ipsr
  69.238 +    tnat.nz p8,p0=in0
  69.239 +
  69.240 +    st8.spill [r17]=r11,PT(CR_IIP)-PT(R11)	// save r11
  69.241 +    tnat.nz p9,p0=in1
  69.242 +(pKStk) mov r18=r0				// make sure r18 isn't NaT
  69.243 +    ;;
  69.244 +
  69.245 +    st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS)	// save ar.pfs
  69.246 +    st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP)	// save cr.iip
  69.247 +    mov r28=b0				// save b0 (2 cyc)
  69.248 +    ;;
  69.249 +
  69.250 +    st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT)	// save ar.unat
  69.251 +    dep r19=0,r19,38,26			// clear all bits but 0..37 [I0]
  69.252 +(p8)    mov in0=-1
  69.253 +    ;;
  69.254 +
  69.255 +    st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS)	// store ar.pfs.pfm in cr.ifs
  69.256 +    extr.u r11=r19,7,7	// I0		// get sol of ar.pfs
  69.257 +    and r8=0x7f,r19		// A		// get sof of ar.pfs
  69.258 +
  69.259 +    st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
  69.260 +    tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
  69.261 +(p9)    mov in1=-1
  69.262 +    ;;
  69.263 +
  69.264 +(pUStk) sub r18=r18,r22				// r18=RSE.ndirty*8
  69.265 +    tnat.nz p10,p0=in2
  69.266 +    add r11=8,r11
  69.267 +    ;;
  69.268 +(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16		// skip over ar_rnat field
  69.269 +(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17	// skip over ar_bspstore field
  69.270 +    tnat.nz p11,p0=in3
  69.271 +    ;;
  69.272 +(p10)   mov in2=-1
  69.273 +    tnat.nz p12,p0=in4				// [I0]
  69.274 +(p11)   mov in3=-1
  69.275 +    ;;
  69.276 +(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT)	// save ar.rnat
  69.277 +(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE)	// save ar.bspstore
  69.278 +    shl r18=r18,16				// compute ar.rsc to be used for "loadrs"
  69.279 +    ;;
  69.280 +    st8 [r16]=r31,PT(LOADRS)-PT(PR)		// save predicates
  69.281 +    st8 [r17]=r28,PT(R1)-PT(B0)		// save b0
  69.282 +    tnat.nz p13,p0=in5				// [I0]
  69.283 +    ;;
  69.284 +    st8 [r16]=r18,PT(R12)-PT(LOADRS)	// save ar.rsc value for "loadrs"
  69.285 +    st8.spill [r17]=r20,PT(R13)-PT(R1)	// save original r1
  69.286 +(p12)   mov in4=-1
  69.287 +    ;;
  69.288 +
  69.289 +.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12)	// save r12
  69.290 +.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13)		// save r13
  69.291 +(p13)   mov in5=-1
  69.292 +    ;;
  69.293 +    st8 [r16]=r21,PT(R8)-PT(AR_FPSR)	// save ar.fpsr
  69.294 +    tnat.nz p13,p0=in6
  69.295 +    cmp.lt p10,p9=r11,r8	// frame size can't be more than local+8
  69.296 +    ;;
  69.297 +    mov r8=1
  69.298 +(p9)    tnat.nz p10,p0=r15
  69.299 +    adds r12=-16,r1		// switch to kernel memory stack (with 16 bytes of scratch)
  69.300 +
  69.301 +    st8.spill [r17]=r15			// save r15
  69.302 +    tnat.nz p8,p0=in7
  69.303 +    nop.i 0
  69.304 +
  69.305 +    mov r13=r2				// establish `current'
  69.306 +    movl r1=__gp				// establish kernel global pointer
  69.307 +    ;;
  69.308 +    st8 [r16]=r8		// ensure pt_regs.r8 != 0 (see handle_syscall_error)
  69.309 +(p13)   mov in6=-1
  69.310 +(p8)    mov in7=-1
  69.311 +
  69.312 +    cmp.eq pSys,pNonSys=r0,r0		// set pSys=1, pNonSys=0
  69.313 +    movl r17=FPSR_DEFAULT
  69.314 +    ;;
  69.315 +    mov.m ar.fpsr=r17			// set ar.fpsr to kernel default value
  69.316 +(p10)   mov r8=-EINVAL
  69.317 +    br.ret.sptk.many b7
  69.318 +END(ia64_hypercall_setup)
  69.319  
  69.320  
  69.321      .org vmx_ia64_ivt+0x3c00
  69.322 @@ -795,12 +1078,14 @@ ENTRY(vmx_virtualization_fault)
  69.323      cmp.eq p9,p0=EVENT_RSM,r24
  69.324      cmp.eq p10,p0=EVENT_SSM,r24
  69.325      cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
  69.326 +    cmp.eq p12,p0=EVENT_THASH,r24 
  69.327      (p6) br.dptk.many vmx_asm_mov_from_ar
  69.328      (p7) br.dptk.many vmx_asm_mov_from_rr
  69.329      (p8) br.dptk.many vmx_asm_mov_to_rr
  69.330      (p9) br.dptk.many vmx_asm_rsm
  69.331      (p10) br.dptk.many vmx_asm_ssm
  69.332      (p11) br.dptk.many vmx_asm_mov_to_psr
  69.333 +    (p12) br.dptk.many vmx_asm_thash
  69.334      ;;
  69.335  vmx_virtualization_fault_back:
  69.336      mov r19=37
    70.1 --- a/xen/arch/ia64/vmx/vmx_minstate.h	Fri Feb 09 14:43:22 2007 -0600
    70.2 +++ b/xen/arch/ia64/vmx/vmx_minstate.h	Wed Feb 14 19:01:35 2007 +0000
    70.3 @@ -174,6 +174,7 @@
    70.4      ;;                                          \
    70.5      st8 [r16]=r29,16;   /* save b0 */                           \
    70.6      st8 [r17]=r18,16;   /* save ar.rsc value for "loadrs" */                \
    70.7 +    cmp.eq pNonSys,pSys=r0,r0   /* initialize pSys=0, pNonSys=1 */          \
    70.8      ;;                                          \
    70.9  .mem.offset 0,0; st8.spill [r16]=r20,16;    /* save original r1 */              \
   70.10  .mem.offset 8,0; st8.spill [r17]=r12,16;                            \
    71.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Fri Feb 09 14:43:22 2007 -0600
    71.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Wed Feb 14 19:01:35 2007 +0000
    71.3 @@ -188,7 +188,7 @@ vmx_load_all_rr(VCPU *vcpu)
    71.4  			(void *)vcpu->arch.privregs,
    71.5  			(void *)vcpu->arch.vhpt.hash, pal_vaddr );
    71.6  	ia64_set_pta(VMX(vcpu, mpta));
    71.7 -	ia64_set_dcr(VMX(vcpu, mdcr));
    71.8 +	vmx_ia64_set_dcr(vcpu);
    71.9  
   71.10  	ia64_srlz_d();
   71.11  	ia64_set_psr(psr);
    72.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Fri Feb 09 14:43:22 2007 -0600
    72.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Wed Feb 14 19:01:35 2007 +0000
    72.3 @@ -79,36 +79,56 @@ static u64 vec2off[68] = {0x0,0x400,0x80
    72.4  
    72.5  
    72.6  void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
    72.7 -                              u64 vector, REGS *regs)
    72.8 +                              u64 vec, REGS *regs)
    72.9  {
   72.10 -    u64 status;
   72.11 +    u64 status, vector;
   72.12      VCPU *vcpu = current;
   72.13      u64 vpsr = VCPU(vcpu, vpsr);
   72.14 -    vector=vec2off[vector];
   72.15 +    
   72.16 +    vector = vec2off[vec];
   72.17      if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
   72.18          panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
   72.19      }
   72.20 -    else{ // handle fpswa emulation
   72.21 +
   72.22 +    switch (vec) {
   72.23 +
   72.24 +    case 25:	// IA64_DISABLED_FPREG_VECTOR
   72.25 +
   72.26 +        if (FP_PSR(vcpu) & IA64_PSR_DFH) {
   72.27 +            FP_PSR(vcpu) = IA64_PSR_MFH;
   72.28 +            if (__ia64_per_cpu_var(fp_owner) != vcpu)
   72.29 +                __ia64_load_fpu(vcpu->arch._thread.fph);
   72.30 +        }
   72.31 +        if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) {
   72.32 +            regs->cr_ipsr &= ~IA64_PSR_DFH;
   72.33 +            return;
   72.34 +        }
   72.35 +
   72.36 +        break;       
   72.37 +        
   72.38 +    case 32:	// IA64_FP_FAULT_VECTOR
   72.39 +        // handle fpswa emulation
   72.40          // fp fault
   72.41 -        if (vector == IA64_FP_FAULT_VECTOR) {
   72.42 -            status = handle_fpu_swa(1, regs, isr);
   72.43 -            if (!status) {
   72.44 -                vcpu_increment_iip(vcpu);
   72.45 -                return;
   72.46 -            } else if (IA64_RETRY == status)
   72.47 -                return;
   72.48 -        }
   72.49 +        status = handle_fpu_swa(1, regs, isr);
   72.50 +        if (!status) {
   72.51 +            vcpu_increment_iip(vcpu);
   72.52 +            return;
   72.53 +        } else if (IA64_RETRY == status)
   72.54 +            return;
   72.55 +        break;
   72.56 +
   72.57 +    case 33:	// IA64_FP_TRAP_VECTOR
   72.58          //fp trap
   72.59 -        else if (vector == IA64_FP_TRAP_VECTOR) {
   72.60 -            status = handle_fpu_swa(0, regs, isr);
   72.61 -            if (!status)
   72.62 -                return;
   72.63 -            else if (IA64_RETRY == status) {
   72.64 -                vcpu_decrement_iip(vcpu);
   72.65 -                return;
   72.66 -            }
   72.67 +        status = handle_fpu_swa(0, regs, isr);
   72.68 +        if (!status)
   72.69 +            return;
   72.70 +        else if (IA64_RETRY == status) {
   72.71 +            vcpu_decrement_iip(vcpu);
   72.72 +            return;
   72.73          }
   72.74 -    }
   72.75 +        break;
   72.76 +    
   72.77 +    } 
   72.78      VCPU(vcpu,isr)=isr;
   72.79      VCPU(vcpu,iipa) = regs->cr_iip;
   72.80      if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
   72.81 @@ -194,7 +214,7 @@ void save_banked_regs_to_vpd(VCPU *v, RE
   72.82  // ONLY gets called from ia64_leave_kernel
   72.83  // ONLY call with interrupts disabled?? (else might miss one?)
   72.84  // NEVER successful if already reflecting a trap/fault because psr.i==0
   72.85 -void leave_hypervisor_tail(struct pt_regs *regs)
   72.86 +void leave_hypervisor_tail(void)
   72.87  {
   72.88      struct domain *d = current->domain;
   72.89      struct vcpu *v = current;
   72.90 @@ -207,17 +227,23 @@ void leave_hypervisor_tail(struct pt_reg
   72.91          local_irq_disable();
   72.92  
   72.93          if (v->vcpu_id == 0) {
   72.94 -            int callback_irq =
   72.95 +            unsigned long callback_irq =
   72.96                  d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
   72.97 +            /*
   72.98 +             * val[63:56] == 1: val[55:0] is a delivery PCI INTx line:
   72.99 +             *                  Domain = val[47:32], Bus  = val[31:16],
  72.100 +             *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
  72.101 +             * val[63:56] == 0: val[55:0] is a delivery as GSI
  72.102 +             */
  72.103              if (callback_irq != 0 && local_events_need_delivery()) {
  72.104                  /* change level for para-device callback irq */
  72.105                  /* use level irq to send discrete event */
  72.106 -                if (callback_irq & IA64_CALLBACK_IRQ_RID) {
  72.107 -                    /* case of using Requester-ID as callback irq */
  72.108 -                    /* RID: '<#bus(8)><#dev(5)><#func(3)>' */
  72.109 -                    int dev = (callback_irq >> 3) & 0x1f;
  72.110 -                    viosapic_set_pci_irq(d, dev, 0, 1);
  72.111 -                    viosapic_set_pci_irq(d, dev, 0, 0);
  72.112 +                if ((uint8_t)(callback_irq >> 56) == 1) {
  72.113 +                    /* case of using PCI INTx line as callback irq */
  72.114 +                    int pdev = (callback_irq >> 11) & 0x1f;
  72.115 +                    int pintx = callback_irq & 3;
  72.116 +                    viosapic_set_pci_irq(d, pdev, pintx, 1);
  72.117 +                    viosapic_set_pci_irq(d, pdev, pintx, 0);
  72.118                  } else {
  72.119                      /* case of using GSI as callback irq */
  72.120                      viosapic_set_irq(d, callback_irq, 1);
  72.121 @@ -367,7 +393,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
  72.122      
  72.123          if (!vpsr.ic)
  72.124              misr.ni = 1;
  72.125 -        if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
  72.126 +        if (!vhpt_enabled(v, vadr, INST_REF)) {
  72.127              vcpu_set_isr(v, misr.val);
  72.128              alt_itlb(v, vadr);
  72.129              return IA64_FAULT;
    73.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Fri Feb 09 14:43:22 2007 -0600
    73.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Wed Feb 14 19:01:35 2007 +0000
    73.3 @@ -78,6 +78,22 @@ struct guest_psr_bundle guest_psr_buf[10
    73.4  unsigned long guest_psr_index = 0;
    73.5  #endif
    73.6  
    73.7 +
    73.8 +void
    73.9 +vmx_ia64_set_dcr(VCPU *v)   
   73.10 +{
   73.11 +    unsigned long dcr_bits = IA64_DEFAULT_DCR_BITS;
   73.12 +
   73.13 +    // if guest is runing on cpl > 0, set dcr.dm=1
   73.14 +    // if geust is runing on cpl = 0, set dcr.dm=0
   73.15 +    // because Guest OS may ld.s on tr mapped page.
   73.16 +    if (!(VCPU(v, vpsr) & IA64_PSR_CPL))
   73.17 +        dcr_bits &= ~IA64_DCR_DM;
   73.18 +
   73.19 +    ia64_set_dcr(dcr_bits);
   73.20 +}
   73.21 +
   73.22 +
   73.23  void
   73.24  vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
   73.25  {
   73.26 @@ -141,6 +157,9 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
   73.27  
   73.28      regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
   73.29  
   73.30 +    if (FP_PSR(vcpu) & IA64_PSR_DFH)
   73.31 +        regs->cr_ipsr |= IA64_PSR_DFH;
   73.32 +
   73.33      check_mm_mode_switch(vcpu, old_psr, new_psr);
   73.34      return ;
   73.35  }
   73.36 @@ -258,6 +277,7 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
   73.37      else
   73.38          vcpu_bsw0(vcpu);
   73.39      vmx_vcpu_set_psr(vcpu,psr);
   73.40 +    vmx_ia64_set_dcr(vcpu);
   73.41      ifs=VCPU(vcpu,ifs);
   73.42      if(ifs>>63)
   73.43          regs->cr_ifs = ifs;
    74.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Fri Feb 09 14:43:22 2007 -0600
    74.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Wed Feb 14 19:01:35 2007 +0000
    74.3 @@ -1234,7 +1234,7 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
    74.4  #endif  //CHECK_FAULT
    74.5      r2 = cr_igfld_mask(inst.M32.cr3,r2);
    74.6      switch (inst.M32.cr3) {
    74.7 -        case 0: return vmx_vcpu_set_dcr(vcpu,r2);
    74.8 +        case 0: return vcpu_set_dcr(vcpu,r2);
    74.9          case 1: return vmx_vcpu_set_itm(vcpu,r2);
   74.10          case 2: return vmx_vcpu_set_iva(vcpu,r2);
   74.11          case 8: return vmx_vcpu_set_pta(vcpu,r2);
   74.12 @@ -1299,7 +1299,7 @@ IA64FAULT vmx_emul_mov_from_cr(VCPU *vcp
   74.13  
   74.14  //    from_cr_cnt[inst.M33.cr3]++;
   74.15      switch (inst.M33.cr3) {
   74.16 -        case 0: return vmx_cr_get(dcr);
   74.17 +        case 0: return cr_get(dcr);
   74.18          case 1: return vmx_cr_get(itm);
   74.19          case 2: return vmx_cr_get(iva);
   74.20          case 8: return vmx_cr_get(pta);
    75.1 --- a/xen/arch/ia64/vmx/vtlb.c	Fri Feb 09 14:43:22 2007 -0600
    75.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Wed Feb 14 19:01:35 2007 +0000
    75.3 @@ -248,6 +248,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
    75.4                    "tnat.nz p6,p7=r9;;"
    75.5                    "(p6) mov %0=1;"
    75.6                    "(p6) mov r9=r0;"
    75.7 +                  "(p7) extr.u r9=r9,0,53;;"
    75.8                    "(p7) mov %0=r0;"
    75.9                    "(p7) st8 [%2]=r9;;"
   75.10                    "ssm psr.ic;;"
   75.11 @@ -261,7 +262,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
   75.12   *  purge software guest tlb
   75.13   */
   75.14  
   75.15 -void vtlb_purge(VCPU *v, u64 va, u64 ps)
   75.16 +static void vtlb_purge(VCPU *v, u64 va, u64 ps)
   75.17  {
   75.18      thash_data_t *cur;
   75.19      u64 start, curadr, size, psbits, tag, rr_ps, num;
   75.20 @@ -442,6 +443,15 @@ void thash_purge_entries(VCPU *v, u64 va
   75.21      vhpt_purge(v, va, ps);
   75.22  }
   75.23  
   75.24 +void thash_purge_entries_remote(VCPU *v, u64 va, u64 ps)
   75.25 +{
   75.26 +    u64 old_va = va;
   75.27 +    va = REGION_OFFSET(va);
   75.28 +    if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
   75.29 +        vtlb_purge(v, va, ps);
   75.30 +    vhpt_purge(v, va, ps);
   75.31 +}
   75.32 +
   75.33  u64 translate_phy_pte(VCPU *v, u64 *pte, u64 itir, u64 va)
   75.34  {
   75.35      u64 ps, ps_mask, paddr, maddr;
    76.1 --- a/xen/arch/ia64/xen/dom_fw.c	Fri Feb 09 14:43:22 2007 -0600
    76.2 +++ b/xen/arch/ia64/xen/dom_fw.c	Wed Feb 14 19:01:35 2007 +0000
    76.3 @@ -599,7 +599,17 @@ complete_dom0_memmap(struct domain *d,
    76.4  
    76.5  		case EFI_UNUSABLE_MEMORY:
    76.6  		case EFI_PAL_CODE:
    76.7 -			/* Discard.  */
    76.8 +			/*
    76.9 +			 * We don't really need these, but holes in the
   76.10 +			 * memory map may cause Linux to assume there are
   76.11 +			 * uncacheable ranges within a granule.
   76.12 +			 */
   76.13 +			dom_md->type = EFI_UNUSABLE_MEMORY;
   76.14 +			dom_md->phys_addr = start;
   76.15 +			dom_md->virt_addr = 0;
   76.16 +			dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
   76.17 +			dom_md->attribute = EFI_MEMORY_WB;
   76.18 +			num_mds++;
   76.19  			break;
   76.20  
   76.21  		default:
    77.1 --- a/xen/arch/ia64/xen/domain.c	Fri Feb 09 14:43:22 2007 -0600
    77.2 +++ b/xen/arch/ia64/xen/domain.c	Wed Feb 14 19:01:35 2007 +0000
    77.3 @@ -42,6 +42,7 @@
    77.4  #include <asm/vmx_vpd.h>
    77.5  #include <asm/vmx_phy_mode.h>
    77.6  #include <asm/vhpt.h>
    77.7 +#include <asm/vcpu.h>
    77.8  #include <asm/tlbflush.h>
    77.9  #include <asm/regionreg.h>
   77.10  #include <asm/dom_fw.h>
   77.11 @@ -68,6 +69,8 @@ static void init_switch_stack(struct vcp
   77.12  DEFINE_PER_CPU(uint8_t *, current_psr_i_addr);
   77.13  DEFINE_PER_CPU(int *, current_psr_ic_addr);
   77.14  
   77.15 +DEFINE_PER_CPU(struct vcpu *, fp_owner);
   77.16 +
   77.17  #include <xen/sched-if.h>
   77.18  
   77.19  static void
   77.20 @@ -135,12 +138,44 @@ static void flush_vtlb_for_context_switc
   77.21  	}
   77.22  }
   77.23  
   77.24 +static void lazy_fp_switch(struct vcpu *prev, struct vcpu *next)
   77.25 +{
   77.26 +	/*
   77.27 +	 * Implement eager save, lazy restore
   77.28 +	 */
   77.29 +	if (!is_idle_vcpu(prev)) {
   77.30 +		if (VMX_DOMAIN(prev)) {
   77.31 +			if (FP_PSR(prev) & IA64_PSR_MFH) {
   77.32 +				__ia64_save_fpu(prev->arch._thread.fph);
   77.33 +				__ia64_per_cpu_var(fp_owner) = prev;
   77.34 +			}
   77.35 +		} else {
   77.36 +			if (PSCB(prev, hpsr_mfh)) {
   77.37 +				__ia64_save_fpu(prev->arch._thread.fph);
   77.38 +				__ia64_per_cpu_var(fp_owner) = prev;
   77.39 +			}
   77.40 +		}
   77.41 +	}
   77.42 +
   77.43 +	if (!is_idle_vcpu(next)) {
   77.44 +		if (VMX_DOMAIN(next)) {
   77.45 +			FP_PSR(next) = IA64_PSR_DFH;
   77.46 +			vcpu_regs(next)->cr_ipsr |= IA64_PSR_DFH;
   77.47 +		} else {
   77.48 +			PSCB(next, hpsr_dfh) = 1;
   77.49 +			PSCB(next, hpsr_mfh) = 0;
   77.50 +			vcpu_regs(next)->cr_ipsr |= IA64_PSR_DFH;
   77.51 +		}
   77.52 +	}
   77.53 +}
   77.54 +
   77.55  void schedule_tail(struct vcpu *prev)
   77.56  {
   77.57  	extern char ia64_ivt;
   77.58 -	context_saved(prev);
   77.59  
   77.60 +	context_saved(prev);
   77.61  	ia64_disable_vhpt_walker();
   77.62 +
   77.63  	if (VMX_DOMAIN(current)) {
   77.64  		vmx_do_launch(current);
   77.65  		migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
   77.66 @@ -148,7 +183,7 @@ void schedule_tail(struct vcpu *prev)
   77.67  	} else {
   77.68  		ia64_set_iva(&ia64_ivt);
   77.69  		load_region_regs(current);
   77.70 -        	ia64_set_pta(vcpu_pta(current));
   77.71 +		ia64_set_pta(vcpu_pta(current));
   77.72  		vcpu_load_kernel_regs(current);
   77.73  		__ia64_per_cpu_var(current_psr_i_addr) = &current->domain->
   77.74  		  shared_info->vcpu_info[current->vcpu_id].evtchn_upcall_mask;
   77.75 @@ -165,64 +200,65 @@ void context_switch(struct vcpu *prev, s
   77.76  
   77.77      local_irq_save(spsr);
   77.78  
   77.79 -    if (!is_idle_domain(prev->domain)) 
   77.80 -        __ia64_save_fpu(prev->arch._thread.fph);
   77.81 -    if (!is_idle_domain(next->domain)) 
   77.82 -        __ia64_load_fpu(next->arch._thread.fph);
   77.83 -
   77.84      if (VMX_DOMAIN(prev)) {
   77.85 -	vmx_save_state(prev);
   77.86 -	if (!VMX_DOMAIN(next)) {
   77.87 -	    /* VMX domains can change the physical cr.dcr.
   77.88 -	     * Restore default to prevent leakage. */
   77.89 -	    ia64_setreg(_IA64_REG_CR_DCR, (IA64_DCR_DP | IA64_DCR_DK
   77.90 -	                   | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_PP
   77.91 -	                   | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
   77.92 -	}
   77.93 +        vmx_save_state(prev);
   77.94 +        if (!VMX_DOMAIN(next)) {
   77.95 +            /* VMX domains can change the physical cr.dcr.
   77.96 +             * Restore default to prevent leakage. */
   77.97 +            ia64_setreg(_IA64_REG_CR_DCR, IA64_DEFAULT_DCR_BITS);
   77.98 +        }
   77.99      }
  77.100      if (VMX_DOMAIN(next))
  77.101 -	vmx_load_state(next);
  77.102 +        vmx_load_state(next);
  77.103  
  77.104      ia64_disable_vhpt_walker();
  77.105 -    /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
  77.106 +    lazy_fp_switch(prev, current);
  77.107 +
  77.108      prev = ia64_switch_to(next);
  77.109  
  77.110      /* Note: ia64_switch_to does not return here at vcpu initialization.  */
  77.111  
  77.112 -    //cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask);
  77.113 - 
  77.114 -    if (VMX_DOMAIN(current)){
  77.115 -	vmx_load_all_rr(current);
  77.116 -	migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
  77.117 -	              current->processor);
  77.118 +    if (VMX_DOMAIN(current)) {
  77.119 +        vmx_load_all_rr(current);
  77.120 +        migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
  77.121 +                      current->processor);
  77.122      } else {
  77.123 -	struct domain *nd;
  77.124 -    	extern char ia64_ivt;
  77.125 +        struct domain *nd;
  77.126 +        extern char ia64_ivt;
  77.127  
  77.128 -    	ia64_set_iva(&ia64_ivt);
  77.129 +        ia64_set_iva(&ia64_ivt);
  77.130  
  77.131 -	nd = current->domain;
  77.132 -    	if (!is_idle_domain(nd)) {
  77.133 -	    	load_region_regs(current);
  77.134 -		ia64_set_pta(vcpu_pta(current));
  77.135 -	    	vcpu_load_kernel_regs(current);
  77.136 -		vcpu_set_next_timer(current);
  77.137 -		if (vcpu_timer_expired(current))
  77.138 -			vcpu_pend_timer(current);
  77.139 -		__ia64_per_cpu_var(current_psr_i_addr) = &nd->shared_info->
  77.140 -		  vcpu_info[current->vcpu_id].evtchn_upcall_mask;
  77.141 -		__ia64_per_cpu_var(current_psr_ic_addr) =
  77.142 -		  (int *)(nd->arch.shared_info_va + XSI_PSR_IC_OFS);
  77.143 -    	} else {
  77.144 -		/* When switching to idle domain, only need to disable vhpt
  77.145 -		 * walker. Then all accesses happen within idle context will
  77.146 -		 * be handled by TR mapping and identity mapping.
  77.147 -		 */
  77.148 -		__ia64_per_cpu_var(current_psr_i_addr) = NULL;
  77.149 -		__ia64_per_cpu_var(current_psr_ic_addr) = NULL;
  77.150 +        nd = current->domain;
  77.151 +        if (!is_idle_domain(nd)) {
  77.152 +            load_region_regs(current);
  77.153 +            ia64_set_pta(vcpu_pta(current));
  77.154 +            vcpu_load_kernel_regs(current);
  77.155 +            vcpu_set_next_timer(current);
  77.156 +            if (vcpu_timer_expired(current))
  77.157 +                vcpu_pend_timer(current);
  77.158 +            __ia64_per_cpu_var(current_psr_i_addr) = &nd->shared_info->
  77.159 +                vcpu_info[current->vcpu_id].evtchn_upcall_mask;
  77.160 +            __ia64_per_cpu_var(current_psr_ic_addr) =
  77.161 +                (int *)(nd->arch.shared_info_va + XSI_PSR_IC_OFS);
  77.162 +        } else {
  77.163 +            /* When switching to idle domain, only need to disable vhpt
  77.164 +             * walker. Then all accesses happen within idle context will
  77.165 +             * be handled by TR mapping and identity mapping.
  77.166 +             */
  77.167 +            __ia64_per_cpu_var(current_psr_i_addr) = NULL;
  77.168 +            __ia64_per_cpu_var(current_psr_ic_addr) = NULL;
  77.169          }
  77.170      }
  77.171      local_irq_restore(spsr);
  77.172 +
  77.173 +    /* lazy fp */
  77.174 +    if (current->processor != current->arch.last_processor) {
  77.175 +        unsigned long *addr;
  77.176 +        addr = (unsigned long *)per_cpu_addr(fp_owner,
  77.177 +                                             current->arch.last_processor);
  77.178 +        ia64_cmpxchg(acq, addr, current, 0, 8);
  77.179 +    }
  77.180 +   
  77.181      flush_vtlb_for_context_switch(prev, current);
  77.182      context_saved(prev);
  77.183  }
  77.184 @@ -411,9 +447,6 @@ int vcpu_late_initialise(struct vcpu *v)
  77.185  		assign_domain_page(d, IA64_XMAPPEDREGS_PADDR(v->vcpu_id) + i,
  77.186  		                   virt_to_maddr(v->arch.privregs + i));
  77.187  
  77.188 -	tlbflush_update_time(&v->arch.tlbflush_timestamp,
  77.189 -	                     tlbflush_current_time());
  77.190 -
  77.191  	return 0;
  77.192  }
  77.193  
  77.194 @@ -519,6 +552,12 @@ void arch_domain_destroy(struct domain *
  77.195  	deallocate_rid_range(d);
  77.196  }
  77.197  
  77.198 +int arch_vcpu_reset(struct vcpu *v)
  77.199 +{
  77.200 +	/* FIXME: Stub for now */
  77.201 +	return 0;
  77.202 +}
  77.203 +
  77.204  void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
  77.205  {
  77.206  	int i;
  77.207 @@ -542,7 +581,7 @@ void arch_get_info_guest(struct vcpu *v,
  77.208  		er->dtrs[i].rid = v->arch.dtrs[i].rid;
  77.209  	}
  77.210  	er->event_callback_ip = v->arch.event_callback_ip;
  77.211 -	er->dcr = v->arch.dcr;
  77.212 +	er->dcr = PSCB(v,dcr);
  77.213  	er->iva = v->arch.iva;
  77.214  }
  77.215  
  77.216 @@ -578,16 +617,18 @@ int arch_set_info_guest(struct vcpu *v, 
  77.217  			             er->dtrs[i].rid);
  77.218  		}
  77.219  		v->arch.event_callback_ip = er->event_callback_ip;
  77.220 -		v->arch.dcr = er->dcr;
  77.221 +		PSCB(v,dcr) = er->dcr;
  77.222  		v->arch.iva = er->iva;
  77.223  	}
  77.224  
  77.225  	if (test_bit(_VCPUF_initialised, &v->vcpu_flags))
  77.226  		return 0;
  77.227  
  77.228 -	if (d->arch.is_vti)
  77.229 -		vmx_final_setup_guest(v);
  77.230 -	else {
  77.231 +	if (d->arch.is_vti) {
  77.232 +		rc = vmx_final_setup_guest(v);
  77.233 +		if (rc != 0)
  77.234 +			return rc;
  77.235 +	} else {
  77.236  		rc = vcpu_late_initialise(v);
  77.237  		if (rc != 0)
  77.238  			return rc;
  77.239 @@ -982,12 +1023,6 @@ int construct_dom0(struct domain *d,
  77.240  	unsigned long bp_mpa;
  77.241  	struct ia64_boot_param *bp;
  77.242  
  77.243 -#ifdef VALIDATE_VT
  77.244 -	unsigned int vmx_dom0 = 0;
  77.245 -	unsigned long mfn;
  77.246 -	struct page_info *page = NULL;
  77.247 -#endif
  77.248 -
  77.249  //printk("construct_dom0: starting\n");
  77.250  
  77.251  	/* Sanity! */
  77.252 @@ -1021,23 +1056,6 @@ int construct_dom0(struct domain *d,
  77.253  		return -1;
  77.254  	}
  77.255  
  77.256 -#ifdef VALIDATE_VT
  77.257 -	/* Temp workaround */
  77.258 -	if (running_on_sim)
  77.259 -	    dsi.xen_section_string = (char *)1;
  77.260 -
  77.261 -	/* Check whether dom0 is vti domain */
  77.262 -	if ((!vmx_enabled) && !dsi.xen_section_string) {
  77.263 -	    printk("Lack of hardware support for unmodified vmx dom0\n");
  77.264 -	    panic("");
  77.265 -	}
  77.266 -
  77.267 -	if (vmx_enabled && !dsi.xen_section_string) {
  77.268 -	    printk("Dom0 is vmx domain!\n");
  77.269 -	    vmx_dom0 = 1;
  77.270 -	}
  77.271 -#endif
  77.272 -
  77.273  	p_start = parms.virt_base;
  77.274  	pkern_start = parms.virt_kstart;
  77.275  	pkern_end = parms.virt_kend;
  77.276 @@ -1131,14 +1149,6 @@ int construct_dom0(struct domain *d,
  77.277  
  77.278  	printk("Dom0: 0x%lx\n", (u64)dom0);
  77.279  
  77.280 -#ifdef VALIDATE_VT
  77.281 -	/* VMX specific construction for Dom0, if hardware supports VMX
  77.282 -	 * and Dom0 is unmodified image
  77.283 -	 */
  77.284 -	if (vmx_dom0)
  77.285 -	    vmx_final_setup_guest(v);
  77.286 -#endif
  77.287 -
  77.288  	set_bit(_VCPUF_initialised, &v->vcpu_flags);
  77.289  
  77.290  	/* Build firmware.
    78.1 --- a/xen/arch/ia64/xen/faults.c	Fri Feb 09 14:43:22 2007 -0600
    78.2 +++ b/xen/arch/ia64/xen/faults.c	Wed Feb 14 19:01:35 2007 +0000
    78.3 @@ -92,6 +92,9 @@ void reflect_interruption(unsigned long 
    78.4  	regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
    78.5  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    78.6  
    78.7 +	if (PSCB(v, hpsr_dfh))
    78.8 +		regs->cr_ipsr |= IA64_PSR_DFH;  
    78.9 +	PSCB(v, vpsr_dfh) = 0;
   78.10  	v->vcpu_info->evtchn_upcall_mask = 1;
   78.11  	PSCB(v, interrupt_collection_enabled) = 0;
   78.12  
   78.13 @@ -152,6 +155,9 @@ void reflect_event(void)
   78.14  	regs->cr_iip = v->arch.event_callback_ip;
   78.15  	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   78.16  
   78.17 +	if (PSCB(v, hpsr_dfh))
   78.18 +		regs->cr_ipsr |= IA64_PSR_DFH;
   78.19 +	PSCB(v, vpsr_dfh) = 0;
   78.20  	v->vcpu_info->evtchn_upcall_mask = 1;
   78.21  	PSCB(v, interrupt_collection_enabled) = 0;
   78.22  }
   78.23 @@ -261,6 +267,10 @@ void ia64_do_page_fault(unsigned long ad
   78.24  		    ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
   78.25  		regs->cr_ipsr =
   78.26  		    (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
   78.27 +
   78.28 +		if (PSCB(current, hpsr_dfh))
   78.29 +			regs->cr_ipsr |= IA64_PSR_DFH;  
   78.30 +		PSCB(current, vpsr_dfh) = 0;
   78.31  		perfc_incra(slow_reflect, fault >> 8);
   78.32  		return;
   78.33  	}
   78.34 @@ -608,6 +618,16 @@ ia64_handle_reflection(unsigned long ifa
   78.35  		vector = IA64_GENEX_VECTOR;
   78.36  		break;
   78.37  	case 25:
   78.38 +		if (PSCB(v, hpsr_dfh)) {
   78.39 +			PSCB(v, hpsr_dfh) = 0;
   78.40 +			PSCB(v, hpsr_mfh) = 1;
   78.41 +			if (__ia64_per_cpu_var(fp_owner) != v)
   78.42 +				__ia64_load_fpu(v->arch._thread.fph);
   78.43 +		}
   78.44 +		if (!PSCB(v, vpsr_dfh)) {
   78.45 +			regs->cr_ipsr &= ~IA64_PSR_DFH;
   78.46 +			return;
   78.47 +		}
   78.48  		vector = IA64_DISABLED_FPREG_VECTOR;
   78.49  		break;
   78.50  	case 26:
    79.1 --- a/xen/arch/ia64/xen/flushtlb.c	Fri Feb 09 14:43:22 2007 -0600
    79.2 +++ b/xen/arch/ia64/xen/flushtlb.c	Wed Feb 14 19:01:35 2007 +0000
    79.3 @@ -59,44 +59,18 @@ tlbflush_clock_inc_and_return(void)
    79.4      return t2;
    79.5  }
    79.6  
    79.7 +static void
    79.8 +tlbflush_clock_local_flush(void *unused)
    79.9 +{
   79.10 +    local_vhpt_flush();
   79.11 +    local_flush_tlb_all();
   79.12 +}
   79.13 +
   79.14  void
   79.15  new_tlbflush_clock_period(void)
   79.16  {
   79.17 -    /*
   79.18 -     *XXX TODO
   79.19 -     * If flushing all vcpu's vhpt takes too long, it can be done backgroundly.
   79.20 -     * In such case tlbflush time comparison is done using only 31bit
   79.21 -     * similar to linux jiffies comparison.
   79.22 -     * vhpt should be flushed gradually before wraping 31bits.
   79.23 -     *
   79.24 -     * Sample calculation.
   79.25 -     * Currently Xen/IA64 can create up to 64 domains at the same time.
   79.26 -     * Vhpt size is currently 64KB. (This might be changed later though)
   79.27 -     * Suppose each domains have 4 vcpus (or 16 vcpus).
   79.28 -     * then the memory size which must be flushed is 16MB (64MB).
   79.29 -     */    
   79.30 -    struct domain* d;
   79.31 -    struct vcpu* v;
   79.32 -    /* flush all vhpt of vcpu of all existing domain. */
   79.33 -    read_lock(&domlist_lock);
   79.34 -    for_each_domain(d) {
   79.35 -        for_each_vcpu(d, v) {
   79.36 -            vcpu_purge_tr_entry(&PSCBX(v,dtlb));
   79.37 -            vcpu_purge_tr_entry(&PSCBX(v,itlb));
   79.38 -        }
   79.39 -    }
   79.40 -    smp_mb();
   79.41 -    for_each_domain(d) {
   79.42 -        for_each_vcpu(d, v) {
   79.43 -            if (HAS_PERVCPU_VHPT(v->domain))
   79.44 -                vcpu_vhpt_flush(v);
   79.45 -        }
   79.46 -    }
   79.47 -    read_unlock(&domlist_lock);
   79.48 -    /* unlock has release semantics */
   79.49 -
   79.50      /* flush all vhpt of physical cpu and mTLB */
   79.51 -    on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
   79.52 +    on_each_cpu(tlbflush_clock_local_flush, NULL, 1, 1);
   79.53  
   79.54      /*
   79.55       * if global TLB shootdown is finished, increment tlbflush_time
    80.1 --- a/xen/arch/ia64/xen/fw_emul.c	Fri Feb 09 14:43:22 2007 -0600
    80.2 +++ b/xen/arch/ia64/xen/fw_emul.c	Wed Feb 14 19:01:35 2007 +0000
    80.3 @@ -605,9 +605,11 @@ xen_pal_emulator(unsigned long index, u6
    80.4  			printk ("Domain0 halts the machine\n");
    80.5  			console_start_sync();
    80.6  			(*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
    80.7 +		} else {
    80.8 +			set_bit(_VCPUF_down, &current->vcpu_flags);
    80.9 +			vcpu_sleep_nosync(current);
   80.10 +			status = PAL_STATUS_SUCCESS;
   80.11  		}
   80.12 -		else
   80.13 -			domain_shutdown(current->domain, SHUTDOWN_poweroff);
   80.14  		break;
   80.15  	    case PAL_HALT_LIGHT:
   80.16  		if (VMX_DOMAIN(current)) {
   80.17 @@ -623,6 +625,9 @@ xen_pal_emulator(unsigned long index, u6
   80.18  		if (VMX_DOMAIN(current))
   80.19  			status = PAL_STATUS_SUCCESS;
   80.20  		break;
   80.21 +	    case PAL_LOGICAL_TO_PHYSICAL:
   80.22 +		/* Optional, no need to complain about being unimplemented */
   80.23 +		break;
   80.24  	    default:
   80.25  		printk("xen_pal_emulator: UNIMPLEMENTED PAL CALL %lu!!!!\n",
   80.26  				index);
    81.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Fri Feb 09 14:43:22 2007 -0600
    81.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Wed Feb 14 19:01:35 2007 +0000
    81.3 @@ -252,6 +252,10 @@ ENTRY(hyper_ssm_i)
    81.4  	movl r27=~DELIVER_PSR_CLR;;
    81.5  	or r29=r29,r28;;
    81.6  	and r29=r29,r27;;
    81.7 +	// set hpsr_dfh to ipsr
    81.8 +	adds r28=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
    81.9 +	ld1 r28=[r28];;
   81.10 +	dep r29=r28,r29,IA64_PSR_DFH_BIT,1;;
   81.11  	mov cr.ipsr=r29;;
   81.12  	// set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
   81.13  	extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
   81.14 @@ -269,6 +273,12 @@ ENTRY(hyper_ssm_i)
   81.15  	movl r22=THIS_CPU(current_psr_i_addr)
   81.16  	adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
   81.17  	ld8 r22=[r22]
   81.18 +	;;
   81.19 +	adds r27=XSI_VPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
   81.20 +	ld1 r28=[r27];;
   81.21 +	st1 [r27]=r0
   81.22 +	dep r30=r28,r30,IA64_PSR_DFH_BIT,1
   81.23 +	;;
   81.24  	st8 [r21]=r30;;
   81.25  	// set shared_mem interrupt_delivery_enabled to 0
   81.26  	// set shared_mem interrupt_collection_enabled to 0
   81.27 @@ -607,6 +617,10 @@ ENTRY(fast_reflect)
   81.28  	movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
   81.29  	or r29=r29,r28;;
   81.30  	and r29=r29,r27;;
   81.31 +	// set hpsr_dfh to ipsr
   81.32 +	adds r28=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
   81.33 +	ld1 r28=[r28];;
   81.34 +	dep r29=r28,r29,IA64_PSR_DFH_BIT,1;;
   81.35  	mov cr.ipsr=r29;;
   81.36  	// set shared_mem ipsr (from ipsr in r30 with ipsr.ri already set)
   81.37  	extr.u r29=r30,IA64_PSR_CPL0_BIT,2;;
   81.38 @@ -629,7 +643,13 @@ ENTRY(fast_reflect)
   81.39  (p6)	dep r30=0,r30,IA64_PSR_I_BIT,1
   81.40  (p7)	dep r30=-1,r30,IA64_PSR_I_BIT,1 ;;
   81.41  	mov r22=1
   81.42 -	adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
   81.43 +	adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 
   81.44 +	;;
   81.45 +	adds r27=XSI_VPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
   81.46 +	ld1 r28=[r27];;
   81.47 +	st1 [r27]=r0
   81.48 +	dep r30=r28,r30,IA64_PSR_DFH_BIT,1
   81.49 +	;;
   81.50  	st8 [r21]=r30 ;;
   81.51  	// set shared_mem interrupt_delivery_enabled to 0
   81.52  	// set shared_mem interrupt_collection_enabled to 0
   81.53 @@ -1104,6 +1124,18 @@ just_do_rfi:
   81.54  	;;
   81.55  	or r21=r21,r20
   81.56  	;;
   81.57 +	adds r20=XSI_VPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
   81.58 +	tbit.z p8,p9 = r21, IA64_PSR_DFH_BIT
   81.59 +	;;
   81.60 +	(p9) mov r27=1;;
   81.61 +	(p9) st1 [r20]=r27
   81.62 +	;;
   81.63 +	(p8) st1 [r20]=r0
   81.64 +	(p8) adds r20=XSI_HPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
   81.65 +	(p8) ld1 r27=[r20]
   81.66 +	;;
   81.67 +	(p8) dep r21=r27,r21, IA64_PSR_DFH_BIT, 1
   81.68 +	;;
   81.69  	mov cr.ipsr=r21
   81.70  	adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
   81.71  	ld4 r21=[r20];;
   81.72 @@ -1489,6 +1521,11 @@ ENTRY(hyper_get_psr)
   81.73  	ld1 r21=[r20];;
   81.74  	dep r8=r21,r8,IA64_PSR_I_BIT,1
   81.75  	;;
   81.76 +	// set vpsr.dfh
   81.77 +	adds r20=XSI_VPSR_DFH_OFS-XSI_PSR_IC_OFS,r18;;
   81.78 +	ld1 r21=[r20];;
   81.79 +	dep r8=r21,r8,IA64_PSR_DFH_BIT,1
   81.80 +	;;
   81.81  	mov r25=cr.iip
   81.82  	extr.u r26=r24,41,2 ;;
   81.83  	cmp.eq p6,p7=2,r26 ;;
    82.1 --- a/xen/arch/ia64/xen/mm.c	Fri Feb 09 14:43:22 2007 -0600
    82.2 +++ b/xen/arch/ia64/xen/mm.c	Wed Feb 14 19:01:35 2007 +0000
    82.3 @@ -525,7 +525,9 @@ u64 translate_domain_pte(u64 pteval, u64
    82.4  			   This can happen when domU tries to touch i/o
    82.5  			   port space.  Also prevents possible address
    82.6  			   aliasing issues.  */
    82.7 -			printk("Warning: UC to WB for mpaddr=%lx\n", mpaddr);
    82.8 +			if (!(mpaddr - IO_PORTS_PADDR < IO_PORTS_SIZE))
    82.9 +				gdprintk(XENLOG_WARNING, "Warning: UC to WB "
   82.10 +				         "for mpaddr=%lx\n", mpaddr);
   82.11  			pteval = (pteval & ~_PAGE_MA_MASK) | _PAGE_MA_WB;
   82.12  		}
   82.13  		break;
   82.14 @@ -690,7 +692,6 @@ unsigned long lookup_domain_mpa(struct d
   82.15      if (pte != NULL) {
   82.16          pte_t tmp_pte = *pte;// pte is volatile. copy the value.
   82.17          if (pte_present(tmp_pte)) {
   82.18 -//printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte));
   82.19              if (entry != NULL)
   82.20                  p2m_entry_set(entry, pte, tmp_pte);
   82.21              return pte_val(tmp_pte);
   82.22 @@ -698,14 +699,20 @@ unsigned long lookup_domain_mpa(struct d
   82.23              return GPFN_INV_MASK;
   82.24      }
   82.25  
   82.26 -    printk("%s: d 0x%p id %d current 0x%p id %d\n",
   82.27 -           __func__, d, d->domain_id, current, current->vcpu_id);
   82.28 -    if (mpaddr < d->arch.convmem_end)
   82.29 -        printk("%s: non-allocated mpa 0x%lx (< 0x%lx)\n", __func__,
   82.30 -               mpaddr, d->arch.convmem_end);
   82.31 -    else
   82.32 -        printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
   82.33 -               mpaddr, d->arch.convmem_end);
   82.34 +    if (mpaddr < d->arch.convmem_end) {
   82.35 +        gdprintk(XENLOG_WARNING, "vcpu %d iip 0x%016lx: non-allocated mpa "
   82.36 +                 "0x%lx (< 0x%lx)\n", current->vcpu_id, PSCB(current, iip),
   82.37 +                 mpaddr, d->arch.convmem_end);
   82.38 +    } else if (mpaddr - IO_PORTS_PADDR < IO_PORTS_SIZE) {
   82.39 +        /* Log I/O port probing, but complain less loudly about it */
   82.40 +        gdprintk(XENLOG_INFO, "vcpu %d iip 0x%016lx: bad I/O port access "
   82.41 +                 "0x%lx\n ", current->vcpu_id, PSCB(current, iip),
   82.42 +                 IO_SPACE_SPARSE_DECODING(mpaddr - IO_PORTS_PADDR));
   82.43 +    } else {
   82.44 +        gdprintk(XENLOG_WARNING, "vcpu %d iip 0x%016lx: bad mpa 0x%lx "
   82.45 +                 "(=> 0x%lx)\n", current->vcpu_id, PSCB(current, iip),
   82.46 +                 mpaddr, d->arch.convmem_end);
   82.47 +    }
   82.48  
   82.49      if (entry != NULL)
   82.50          p2m_entry_set(entry, NULL, __pte(0));
    83.1 --- a/xen/arch/ia64/xen/vcpu.c	Fri Feb 09 14:43:22 2007 -0600
    83.2 +++ b/xen/arch/ia64/xen/vcpu.c	Wed Feb 14 19:01:35 2007 +0000
    83.3 @@ -141,6 +141,9 @@ void vcpu_init_regs(struct vcpu *v)
    83.4  		/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
    83.5  		/* Need to be expanded as macro */
    83.6  		regs->cr_ipsr = 0x501008826008;
    83.7 +		/* lazy fp */
    83.8 +		FP_PSR(v) = IA64_PSR_DFH;
    83.9 +		regs->cr_ipsr |= IA64_PSR_DFH;
   83.10  	} else {
   83.11  		regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
   83.12  		    | IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
   83.13 @@ -148,6 +151,10 @@ void vcpu_init_regs(struct vcpu *v)
   83.14  				   | IA64_PSR_RI | IA64_PSR_IS);
   83.15  		// domain runs at PL2
   83.16  		regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
   83.17 +		// lazy fp 
   83.18 +		PSCB(v, hpsr_dfh) = 1;
   83.19 +		PSCB(v, hpsr_mfh) = 0;
   83.20 +		regs->cr_ipsr |= IA64_PSR_DFH;
   83.21  	}
   83.22  	regs->cr_ifs = 1UL << 63;	/* or clear? */
   83.23  	regs->ar_fpsr = FPSR_DEFAULT;
   83.24 @@ -265,8 +272,10 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu,
   83.25  		      IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
   83.26  		      IA64_PSR_DFL | IA64_PSR_DFH))
   83.27  		return IA64_ILLOP_FAULT;
   83.28 -	if (imm.dfh)
   83.29 -		ipsr->dfh = 0;
   83.30 +	if (imm.dfh) {
   83.31 +		ipsr->dfh = PSCB(vcpu, hpsr_dfh);
   83.32 +		PSCB(vcpu, vpsr_dfh) = 0;
   83.33 +	}
   83.34  	if (imm.dfl)
   83.35  		ipsr->dfl = 0;
   83.36  	if (imm.pp) {
   83.37 @@ -320,8 +329,10 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
   83.38  	    IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH;
   83.39  	if (imm24 & ~mask)
   83.40  		return IA64_ILLOP_FAULT;
   83.41 -	if (imm.dfh)
   83.42 +	if (imm.dfh) {
   83.43 +		PSCB(vcpu, vpsr_dfh) = 1;
   83.44  		ipsr->dfh = 1;
   83.45 +	} 
   83.46  	if (imm.dfl)
   83.47  		ipsr->dfl = 1;
   83.48  	if (imm.pp) {
   83.49 @@ -386,8 +397,13 @@ IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u6
   83.50  	//if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP))
   83.51  	//	return IA64_ILLOP_FAULT;
   83.52  	// however trying to set other bits can't be an error as it is in ssm
   83.53 -	if (newpsr.dfh)
   83.54 +	if (newpsr.dfh) {
   83.55  		ipsr->dfh = 1;
   83.56 +		PSCB(vcpu, vpsr_dfh) = 1;
   83.57 +	} else {
   83.58 +		ipsr->dfh = PSCB(vcpu, hpsr_dfh);
   83.59 +		PSCB(vcpu, vpsr_dfh) = 0;
   83.60 +	}       
   83.61  	if (newpsr.dfl)
   83.62  		ipsr->dfl = 1;
   83.63  	if (newpsr.pp) {
   83.64 @@ -466,6 +482,8 @@ IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 
   83.65  		newpsr.pp = 1;
   83.66  	else
   83.67  		newpsr.pp = 0;
   83.68 +	newpsr.dfh = PSCB(vcpu, vpsr_dfh);
   83.69 +
   83.70  	*pval = *(unsigned long *)&newpsr;
   83.71  	*pval &= (MASK(0, 32) | MASK(35, 2));
   83.72  	return IA64_NO_FAULT;
   83.73 @@ -483,7 +501,7 @@ BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
   83.74  
   83.75  u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr)
   83.76  {
   83.77 -	u64 dcr = PSCBX(vcpu, dcr);
   83.78 +	u64 dcr = PSCB(vcpu, dcr);
   83.79  	PSR psr;
   83.80  
   83.81  	//printk("*** vcpu_get_ipsr_int_state (0x%016lx)...\n",prevpsr);
   83.82 @@ -497,6 +515,7 @@ u64 vcpu_get_ipsr_int_state(VCPU * vcpu,
   83.83  	psr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
   83.84  	psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
   83.85  	psr.ia64_psr.bn = PSCB(vcpu, banknum);
   83.86 +	psr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);
   83.87  	psr.ia64_psr.dt = 1;
   83.88  	psr.ia64_psr.it = 1;
   83.89  	psr.ia64_psr.rt = 1;
   83.90 @@ -513,10 +532,7 @@ u64 vcpu_get_ipsr_int_state(VCPU * vcpu,
   83.91  
   83.92  IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
   83.93  {
   83.94 -//verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
   83.95 -	// Reads of cr.dcr on Xen always have the sign bit set, so
   83.96 -	// a domain can differentiate whether it is running on SP or not
   83.97 -	*pval = PSCBX(vcpu, dcr) | 0x8000000000000000L;
   83.98 +	*pval = PSCB(vcpu, dcr);
   83.99  	return IA64_NO_FAULT;
  83.100  }
  83.101  
  83.102 @@ -632,11 +648,7 @@ IA64FAULT vcpu_get_iha(VCPU * vcpu, u64 
  83.103  
  83.104  IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val)
  83.105  {
  83.106 -	// Reads of cr.dcr on SP always have the sign bit set, so
  83.107 -	// a domain can differentiate whether it is running on SP or not
  83.108 -	// Thus, writes of DCR should ignore the sign bit
  83.109 -//verbose("vcpu_set_dcr: called\n");
  83.110 -	PSCBX(vcpu, dcr) = val & ~0x8000000000000000L;
  83.111 +	PSCB(vcpu, dcr) = val;
  83.112  	return IA64_NO_FAULT;
  83.113  }
  83.114  
  83.115 @@ -1343,6 +1355,12 @@ IA64FAULT vcpu_rfi(VCPU * vcpu)
  83.116  	if (psr.ia64_psr.cpl < 3)
  83.117  		psr.ia64_psr.cpl = 2;
  83.118  	int_enable = psr.ia64_psr.i;
  83.119 +	if (psr.ia64_psr.dfh) {
  83.120 +		PSCB(vcpu, vpsr_dfh) = 1;
  83.121 +	} else {
  83.122 +		psr.ia64_psr.dfh = PSCB(vcpu, hpsr_dfh);
  83.123 +		PSCB(vcpu, vpsr_dfh) = 0;
  83.124 +	}
  83.125  	if (psr.ia64_psr.ic)
  83.126  		PSCB(vcpu, interrupt_collection_enabled) = 1;
  83.127  	if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it)
    84.1 --- a/xen/arch/ia64/xen/vhpt.c	Fri Feb 09 14:43:22 2007 -0600
    84.2 +++ b/xen/arch/ia64/xen/vhpt.c	Wed Feb 14 19:01:35 2007 +0000
    84.3 @@ -54,11 +54,7 @@ local_vhpt_flush(void)
    84.4  void
    84.5  vcpu_vhpt_flush(struct vcpu* v)
    84.6  {
    84.7 -	/* increment flush clock before flush */
    84.8 -	u32 flush_time = tlbflush_clock_inc_and_return();
    84.9  	__vhpt_flush(vcpu_vhpt_maddr(v));
   84.10 -	/* this must be after flush */
   84.11 -	tlbflush_update_time(&v->arch.tlbflush_timestamp, flush_time);
   84.12  	perfc_incrc(vcpu_vhpt_flush);
   84.13  }
   84.14  
   84.15 @@ -177,7 +173,9 @@ pervcpu_vhpt_alloc(struct vcpu *v)
   84.16  void
   84.17  pervcpu_vhpt_free(struct vcpu *v)
   84.18  {
   84.19 -	free_domheap_pages(v->arch.vhpt_page, VHPT_SIZE_LOG2 - PAGE_SHIFT);
   84.20 +	if (likely(v->arch.vhpt_page != NULL))
   84.21 +		free_domheap_pages(v->arch.vhpt_page,
   84.22 +		                   VHPT_SIZE_LOG2 - PAGE_SHIFT);
   84.23  }
   84.24  #endif
   84.25  
    85.1 --- a/xen/arch/ia64/xen/xensetup.c	Fri Feb 09 14:43:22 2007 -0600
    85.2 +++ b/xen/arch/ia64/xen/xensetup.c	Wed Feb 14 19:01:35 2007 +0000
    85.3 @@ -26,6 +26,8 @@
    85.4  #include <asm/vmx.h>
    85.5  #include <linux/efi.h>
    85.6  #include <asm/iosapic.h>
    85.7 +#include <xen/softirq.h>
    85.8 +#include <xen/rcupdate.h>
    85.9  
   85.10  unsigned long xenheap_phys_end, total_pages;
   85.11  
   85.12 @@ -265,13 +267,16 @@ void start_kernel(void)
   85.13      early_setup_arch(&cmdline);
   85.14  
   85.15      /* We initialise the serial devices very early so we can get debugging. */
   85.16 -    if (running_on_sim) hpsim_serial_init();
   85.17 +    if (running_on_sim)
   85.18 +        hpsim_serial_init();
   85.19      else {
   85.20 -	ns16550_init(0, &ns16550_com1);
   85.21 -	/* Also init com2 for Tiger4. */
   85.22 -	ns16550_com2.io_base = 0x2f8;
   85.23 -	ns16550_com2.irq     = 3;
   85.24 -	ns16550_init(1, &ns16550_com2);
   85.25 +        ns16550_init(0, &ns16550_com1);
   85.26 +        if (ns16550_com1.io_base == 0x3f8) {
   85.27 +            /* Also init com2 for Tiger4. */
   85.28 +            ns16550_com2.io_base = 0x2f8;
   85.29 +            ns16550_com2.irq     = 3;
   85.30 +            ns16550_init(1, &ns16550_com2);
   85.31 +        }
   85.32      }
   85.33      serial_init_preirq();
   85.34  
   85.35 @@ -436,6 +441,12 @@ void start_kernel(void)
   85.36      init_xen_time(); /* initialise the time */
   85.37      timer_init();
   85.38  
   85.39 +    rcu_init();
   85.40 +
   85.41 +#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
   85.42 +    open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period);
   85.43 +#endif
   85.44 +
   85.45  #ifdef CONFIG_SMP
   85.46      if ( opt_nosmp )
   85.47      {
   85.48 @@ -464,6 +475,7 @@ printk("num_online_cpus=%d, max_cpus=%d\
   85.49          if ( num_online_cpus() >= max_cpus )
   85.50              break;
   85.51          if ( !cpu_online(i) ) {
   85.52 +            rcu_online_cpu(i);
   85.53              __cpu_up(i);
   85.54  	}
   85.55      }
    86.1 --- a/xen/arch/powerpc/machine_kexec.c	Fri Feb 09 14:43:22 2007 -0600
    86.2 +++ b/xen/arch/powerpc/machine_kexec.c	Wed Feb 14 19:01:35 2007 +0000
    86.3 @@ -19,6 +19,11 @@ void machine_reboot_kexec(xen_kexec_imag
    86.4      printk("STUB: " __FILE__ ": %s: not implemented\n", __FUNCTION__);
    86.5  }
    86.6  
    86.7 +void machine_kexec(xen_kexec_image_t *image)
    86.8 +{
    86.9 +    printk("STUB: " __FILE__ ": %s: not implemented\n", __FUNCTION__);
   86.10 +}
   86.11 +
   86.12  /*
   86.13   * Local variables:
   86.14   * mode: C
    87.1 --- a/xen/arch/x86/crash.c	Fri Feb 09 14:43:22 2007 -0600
    87.2 +++ b/xen/arch/x86/crash.c	Wed Feb 14 19:01:35 2007 +0000
    87.3 @@ -11,7 +11,6 @@
    87.4  #include <asm/atomic.h>
    87.5  #include <asm/elf.h>
    87.6  #include <asm/percpu.h>
    87.7 -#include <asm/kexec.h>
    87.8  #include <xen/types.h>
    87.9  #include <xen/irq.h>
   87.10  #include <asm/ipi.h>
    88.1 --- a/xen/arch/x86/domain.c	Fri Feb 09 14:43:22 2007 -0600
    88.2 +++ b/xen/arch/x86/domain.c	Wed Feb 14 19:01:35 2007 +0000
    88.3 @@ -37,7 +37,7 @@
    88.4  #include <asm/i387.h>
    88.5  #include <asm/mpspec.h>
    88.6  #include <asm/ldt.h>
    88.7 -#include <asm/shadow.h>
    88.8 +#include <asm/paging.h>
    88.9  #include <asm/hvm/hvm.h>
   88.10  #include <asm/hvm/support.h>
   88.11  #include <asm/msr.h>
   88.12 @@ -331,6 +331,7 @@ int vcpu_initialise(struct vcpu *v)
   88.13  
   88.14      pae_l3_cache_init(&v->arch.pae_l3_cache);
   88.15  
   88.16 +    paging_vcpu_init(v);
   88.17  
   88.18      if ( is_hvm_domain(d) )
   88.19      {
   88.20 @@ -424,7 +425,7 @@ int arch_domain_create(struct domain *d)
   88.21      HYPERVISOR_COMPAT_VIRT_START(d) = __HYPERVISOR_COMPAT_VIRT_START;
   88.22  #endif
   88.23  
   88.24 -    shadow_domain_init(d);
   88.25 +    paging_domain_init(d);
   88.26  
   88.27      if ( !is_idle_domain(d) )
   88.28      {
   88.29 @@ -464,7 +465,7 @@ void arch_domain_destroy(struct domain *
   88.30          hvm_domain_destroy(d);
   88.31      }
   88.32  
   88.33 -    shadow_final_teardown(d);
   88.34 +    paging_final_teardown(d);
   88.35  
   88.36      free_xenheap_pages(
   88.37          d->arch.mm_perdomain_pt,
   88.38 @@ -613,7 +614,7 @@ int arch_set_info_guest(
   88.39          {
   88.40              cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[3]));
   88.41  
   88.42 -            if ( shadow_mode_refcounts(d)
   88.43 +            if ( paging_mode_refcounts(d)
   88.44                   ? !get_page(mfn_to_page(cr3_pfn), d)
   88.45                   : !get_page_and_type(mfn_to_page(cr3_pfn), d,
   88.46                                        PGT_base_page_table) )
   88.47 @@ -631,7 +632,7 @@ int arch_set_info_guest(
   88.48  
   88.49              cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
   88.50  
   88.51 -            if ( shadow_mode_refcounts(d)
   88.52 +            if ( paging_mode_refcounts(d)
   88.53                   ? !get_page(mfn_to_page(cr3_pfn), d)
   88.54                   : !get_page_and_type(mfn_to_page(cr3_pfn), d,
   88.55                                      PGT_l3_page_table) )
   88.56 @@ -652,8 +653,8 @@ int arch_set_info_guest(
   88.57      /* Don't redo final setup */
   88.58      set_bit(_VCPUF_initialised, &v->vcpu_flags);
   88.59  
   88.60 -    if ( shadow_mode_enabled(d) )
   88.61 -        shadow_update_paging_modes(v);
   88.62 +    if ( paging_mode_enabled(d) )
   88.63 +        paging_update_paging_modes(v);
   88.64  
   88.65      update_cr3(v);
   88.66  
   88.67 @@ -1406,7 +1407,7 @@ static void vcpu_destroy_pagetables(stru
   88.68  
   88.69          if ( pfn != 0 )
   88.70          {
   88.71 -            if ( shadow_mode_refcounts(d) )
   88.72 +            if ( paging_mode_refcounts(d) )
   88.73                  put_page(mfn_to_page(pfn));
   88.74              else
   88.75                  put_page_and_type(mfn_to_page(pfn));
   88.76 @@ -1427,7 +1428,7 @@ static void vcpu_destroy_pagetables(stru
   88.77      pfn = pagetable_get_pfn(v->arch.guest_table);
   88.78      if ( pfn != 0 )
   88.79      {
   88.80 -        if ( shadow_mode_refcounts(d) )
   88.81 +        if ( paging_mode_refcounts(d) )
   88.82              put_page(mfn_to_page(pfn));
   88.83          else
   88.84              put_page_and_type(mfn_to_page(pfn));
   88.85 @@ -1443,7 +1444,7 @@ static void vcpu_destroy_pagetables(stru
   88.86      pfn = pagetable_get_pfn(v->arch.guest_table_user);
   88.87      if ( pfn != 0 )
   88.88      {
   88.89 -        if ( shadow_mode_refcounts(d) )
   88.90 +        if ( paging_mode_refcounts(d) )
   88.91              put_page(mfn_to_page(pfn));
   88.92          else
   88.93              put_page_and_type(mfn_to_page(pfn));
   88.94 @@ -1464,8 +1465,8 @@ void domain_relinquish_resources(struct 
   88.95      for_each_vcpu ( d, v )
   88.96          vcpu_destroy_pagetables(v);
   88.97  
   88.98 -    /* Tear down shadow mode stuff. */
   88.99 -    shadow_teardown(d);
  88.100 +    /* Tear down paging-assistance stuff. */
  88.101 +    paging_teardown(d);
  88.102  
  88.103      /*
  88.104       * Relinquish GDT mappings. No need for explicit unmapping of the LDT as
  88.105 @@ -1484,35 +1485,12 @@ void domain_relinquish_resources(struct 
  88.106  
  88.107  void arch_dump_domain_info(struct domain *d)
  88.108  {
  88.109 -    if ( shadow_mode_enabled(d) )
  88.110 -    {
  88.111 -        printk("    shadow mode: ");
  88.112 -        if ( d->arch.shadow.mode & SHM2_enable )
  88.113 -            printk("enabled ");
  88.114 -        if ( shadow_mode_refcounts(d) )
  88.115 -            printk("refcounts ");
  88.116 -        if ( shadow_mode_log_dirty(d) )
  88.117 -            printk("log_dirty ");
  88.118 -        if ( shadow_mode_translate(d) )
  88.119 -            printk("translate ");
  88.120 -        if ( shadow_mode_external(d) )
  88.121 -            printk("external ");
  88.122 -        printk("\n");
  88.123 -    }
  88.124 +    paging_dump_domain_info(d);
  88.125  }
  88.126  
  88.127  void arch_dump_vcpu_info(struct vcpu *v)
  88.128  {
  88.129 -    if ( shadow_mode_enabled(v->domain) )
  88.130 -    {
  88.131 -        if ( v->arch.shadow.mode )
  88.132 -            printk("    shadowed %u-on-%u, %stranslated\n",
  88.133 -                   v->arch.shadow.mode->guest_levels,
  88.134 -                   v->arch.shadow.mode->shadow_levels,
  88.135 -                   shadow_vcpu_mode_translate(v) ? "" : "not ");
  88.136 -        else
  88.137 -            printk("    not shadowed\n");
  88.138 -    }
  88.139 +    paging_dump_vcpu_info(v);
  88.140  }
  88.141  
  88.142  /*
    89.1 --- a/xen/arch/x86/domain_build.c	Fri Feb 09 14:43:22 2007 -0600
    89.2 +++ b/xen/arch/x86/domain_build.c	Wed Feb 14 19:01:35 2007 +0000
    89.3 @@ -25,7 +25,7 @@
    89.4  #include <asm/processor.h>
    89.5  #include <asm/desc.h>
    89.6  #include <asm/i387.h>
    89.7 -#include <asm/shadow.h>
    89.8 +#include <asm/paging.h>
    89.9  
   89.10  #include <public/version.h>
   89.11  #include <public/libelf.h>
   89.12 @@ -777,8 +777,8 @@ int construct_dom0(struct domain *d,
   89.13          (void)alloc_vcpu(d, i, i);
   89.14  
   89.15      /* Set up CR3 value for write_ptbase */
   89.16 -    if ( shadow_mode_enabled(v->domain) )
   89.17 -        shadow_update_paging_modes(v);
   89.18 +    if ( paging_mode_enabled(v->domain) )
   89.19 +        paging_update_paging_modes(v);
   89.20      else
   89.21          update_cr3(v);
   89.22  
   89.23 @@ -918,8 +918,8 @@ int construct_dom0(struct domain *d,
   89.24      regs->eflags = X86_EFLAGS_IF;
   89.25  
   89.26      if ( opt_dom0_shadow )
   89.27 -        if ( shadow_enable(d, SHM2_enable) == 0 ) 
   89.28 -            shadow_update_paging_modes(v);
   89.29 +        if ( paging_enable(d, PG_SH_enable) == 0 ) 
   89.30 +            paging_update_paging_modes(v);
   89.31  
   89.32      if ( supervisor_mode_kernel )
   89.33      {
    90.1 --- a/xen/arch/x86/domctl.c	Fri Feb 09 14:43:22 2007 -0600
    90.2 +++ b/xen/arch/x86/domctl.c	Wed Feb 14 19:01:35 2007 +0000
    90.3 @@ -19,7 +19,7 @@
    90.4  #include <xen/trace.h>
    90.5  #include <xen/console.h>
    90.6  #include <xen/iocap.h>
    90.7 -#include <asm/shadow.h>
    90.8 +#include <asm/paging.h>
    90.9  #include <asm/irq.h>
   90.10  #include <asm/hvm/hvm.h>
   90.11  #include <asm/hvm/support.h>
   90.12 @@ -42,7 +42,7 @@ long arch_do_domctl(
   90.13          d = get_domain_by_id(domctl->domain);
   90.14          if ( d != NULL )
   90.15          {
   90.16 -            ret = shadow_domctl(d,
   90.17 +            ret = paging_domctl(d,
   90.18                                  &domctl->u.shadow_op,
   90.19                                  guest_handle_cast(u_domctl, void));
   90.20              put_domain(d);
   90.21 @@ -326,10 +326,6 @@ long arch_do_domctl(
   90.22          struct hvm_domain_context c;
   90.23          struct domain             *d;
   90.24  
   90.25 -        c.cur = 0;
   90.26 -        c.size = domctl->u.hvmcontext.size;
   90.27 -        c.data = NULL;
   90.28 -
   90.29          ret = -ESRCH;
   90.30          if ( (d = get_domain_by_id(domctl->domain)) == NULL )
   90.31              break;
   90.32 @@ -338,19 +334,38 @@ long arch_do_domctl(
   90.33          if ( !is_hvm_domain(d) ) 
   90.34              goto gethvmcontext_out;
   90.35  
   90.36 +        c.cur = 0;
   90.37 +        c.size = hvm_save_size(d);
   90.38 +        c.data = NULL;
   90.39 +
   90.40 +        if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
   90.41 +        {
   90.42 +            /* Client is querying for the correct buffer size */
   90.43 +            domctl->u.hvmcontext.size = c.size;
   90.44 +            ret = 0;
   90.45 +            goto gethvmcontext_out;            
   90.46 +        }
   90.47 +
   90.48 +        /* Check that the client has a big enough buffer */
   90.49 +        ret = -ENOSPC;
   90.50 +        if ( domctl->u.hvmcontext.size < c.size ) 
   90.51 +            goto gethvmcontext_out;
   90.52 +
   90.53 +        /* Allocate our own marshalling buffer */
   90.54          ret = -ENOMEM;
   90.55          if ( (c.data = xmalloc_bytes(c.size)) == NULL )
   90.56              goto gethvmcontext_out;
   90.57  
   90.58          ret = hvm_save(d, &c);
   90.59  
   90.60 +        domctl->u.hvmcontext.size = c.cur;
   90.61          if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
   90.62              ret = -EFAULT;
   90.63  
   90.64 +    gethvmcontext_out:
   90.65          if ( copy_to_guest(u_domctl, domctl, 1) )
   90.66              ret = -EFAULT;
   90.67  
   90.68 -    gethvmcontext_out:
   90.69          if ( c.data != NULL )
   90.70              xfree(c.data);
   90.71  
   90.72 @@ -383,6 +398,7 @@ long arch_do_domctl(
   90.73  
   90.74          put_domain(d);
   90.75      }
   90.76 +    break;
   90.77  
   90.78      case XEN_DOMCTL_get_address_size:
   90.79      {
   90.80 @@ -396,7 +412,11 @@ long arch_do_domctl(
   90.81  
   90.82          ret = 0;
   90.83          put_domain(d);
   90.84 +
   90.85 +        if ( copy_to_guest(u_domctl, domctl, 1) )
   90.86 +            ret = -EFAULT;
   90.87      }
   90.88 +    break;
   90.89  
   90.90      default:
   90.91          ret = -ENOSYS;
    91.1 --- a/xen/arch/x86/hvm/Makefile	Fri Feb 09 14:43:22 2007 -0600
    91.2 +++ b/xen/arch/x86/hvm/Makefile	Wed Feb 14 19:01:35 2007 +0000
    91.3 @@ -15,3 +15,4 @@ obj-y += vpt.o
    91.4  obj-y += vioapic.o
    91.5  obj-y += vlapic.o
    91.6  obj-y += vpic.o
    91.7 +obj-y += save.o
    92.1 --- a/xen/arch/x86/hvm/hpet.c	Fri Feb 09 14:43:22 2007 -0600
    92.2 +++ b/xen/arch/x86/hvm/hpet.c	Wed Feb 14 19:01:35 2007 +0000
    92.3 @@ -409,7 +409,7 @@ static int hpet_load(struct domain *d, h
    92.4      return 0;
    92.5  }
    92.6  
    92.7 -HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load);
    92.8 +HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load, 1, HVMSR_PER_DOM);
    92.9  
   92.10  void hpet_init(struct vcpu *v)
   92.11  {
    93.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Feb 09 14:43:22 2007 -0600
    93.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Feb 14 19:01:35 2007 +0000
    93.3 @@ -30,11 +30,10 @@
    93.4  #include <xen/hypercall.h>
    93.5  #include <xen/guest_access.h>
    93.6  #include <xen/event.h>
    93.7 -#include <xen/shadow.h>
    93.8  #include <asm/current.h>
    93.9  #include <asm/e820.h>
   93.10  #include <asm/io.h>
   93.11 -#include <asm/shadow.h>
   93.12 +#include <asm/paging.h>
   93.13  #include <asm/regs.h>
   93.14  #include <asm/cpufeature.h>
   93.15  #include <asm/processor.h>
   93.16 @@ -106,7 +105,6 @@ void hvm_migrate_timers(struct vcpu *v)
   93.17      pit_migrate_timers(v);
   93.18      rtc_migrate_timers(v);
   93.19      hpet_migrate_timers(v);
   93.20 -    pmtimer_migrate_timers(v);
   93.21      if ( vcpu_vlapic(v)->pt.enabled )
   93.22          migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
   93.23  }
   93.24 @@ -156,7 +154,7 @@ int hvm_domain_initialise(struct domain 
   93.25      spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
   93.26      spin_lock_init(&d->arch.hvm_domain.irq_lock);
   93.27  
   93.28 -    rc = shadow_enable(d, SHM2_refcounts|SHM2_translate|SHM2_external);
   93.29 +    rc = paging_enable(d, PG_SH_enable|PG_refcounts|PG_translate|PG_external);
   93.30      if ( rc != 0 )
   93.31          return rc;
   93.32  
   93.33 @@ -170,7 +168,6 @@ void hvm_domain_destroy(struct domain *d
   93.34  {
   93.35      pit_deinit(d);
   93.36      rtc_deinit(d);
   93.37 -    pmtimer_deinit(d);
   93.38      hpet_deinit(d);
   93.39  
   93.40      if ( d->arch.hvm_domain.shared_page_va )
   93.41 @@ -227,7 +224,8 @@ static int hvm_load_cpu_ctxt(struct doma
   93.42      return 0;
   93.43  }
   93.44  
   93.45 -HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt);
   93.46 +HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
   93.47 +                          1, HVMSR_PER_VCPU);
   93.48  
   93.49  int hvm_vcpu_initialise(struct vcpu *v)
   93.50  {
   93.51 @@ -273,6 +271,24 @@ void hvm_vcpu_destroy(struct vcpu *v)
   93.52      /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
   93.53  }
   93.54  
   93.55 +
   93.56 +void hvm_vcpu_reset(struct vcpu *v)
   93.57 +{
   93.58 +    vcpu_pause(v);
   93.59 +
   93.60 +    vlapic_reset(vcpu_vlapic(v));
   93.61 +
   93.62 +    hvm_funcs.vcpu_initialise(v);
   93.63 +
   93.64 +    set_bit(_VCPUF_down, &v->vcpu_flags);
   93.65 +    clear_bit(_VCPUF_initialised, &v->vcpu_flags);
   93.66 +    clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
   93.67 +    clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
   93.68 +    clear_bit(_VCPUF_blocked, &v->vcpu_flags);
   93.69 +
   93.70 +    vcpu_unpause(v);
   93.71 +}
   93.72 +
   93.73  static void hvm_vcpu_down(void)
   93.74  {
   93.75      struct vcpu *v = current;
   93.76 @@ -366,7 +382,7 @@ static int __hvm_copy(void *buf, paddr_t
   93.77          count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
   93.78  
   93.79          if ( virt )
   93.80 -            mfn = get_mfn_from_gpfn(shadow_gva_to_gfn(current, addr));
   93.81 +            mfn = get_mfn_from_gpfn(paging_gva_to_gfn(current, addr));
   93.82          else
   93.83              mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
   93.84  
   93.85 @@ -583,7 +599,7 @@ void hvm_do_hypercall(struct cpu_user_re
   93.86          return;
   93.87      }
   93.88  
   93.89 -    if ( current->arch.shadow.mode->guest_levels == 4 )
   93.90 +    if ( current->arch.paging.mode->guest_levels == 4 )
   93.91      {
   93.92          pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
   93.93                                                         pregs->rsi,
   93.94 @@ -624,20 +640,13 @@ void hvm_hypercall_page_initialise(struc
   93.95   */
   93.96  int hvm_bringup_ap(int vcpuid, int trampoline_vector)
   93.97  {
   93.98 -    struct vcpu *bsp = current, *v;
   93.99 -    struct domain *d = bsp->domain;
  93.100 +    struct vcpu *v;
  93.101 +    struct domain *d = current->domain;
  93.102      struct vcpu_guest_context *ctxt;
  93.103      int rc = 0;
  93.104  
  93.105      BUG_ON(!is_hvm_domain(d));
  93.106  
  93.107 -    if ( bsp->vcpu_id != 0 )
  93.108 -    {
  93.109 -        gdprintk(XENLOG_ERR, "Not calling hvm_bringup_ap from BSP context.\n");
  93.110 -        domain_crash(bsp->domain);
  93.111 -        return -EINVAL;
  93.112 -    }
  93.113 -
  93.114      if ( (v = d->vcpu[vcpuid]) == NULL )
  93.115          return -ENOENT;
  93.116  
  93.117 @@ -668,8 +677,8 @@ int hvm_bringup_ap(int vcpuid, int tramp
  93.118          goto out;
  93.119      }
  93.120  
  93.121 -    if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
  93.122 -        vcpu_wake(d->vcpu[vcpuid]);
  93.123 +    if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
  93.124 +        vcpu_wake(v);
  93.125      gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
  93.126  
  93.127   out:
    94.1 --- a/xen/arch/x86/hvm/i8254.c	Fri Feb 09 14:43:22 2007 -0600
    94.2 +++ b/xen/arch/x86/hvm/i8254.c	Wed Feb 14 19:01:35 2007 +0000
    94.3 @@ -445,7 +445,7 @@ static int pit_load(struct domain *d, hv
    94.4      return 0;
    94.5  }
    94.6  
    94.7 -HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load);
    94.8 +HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
    94.9  
   94.10  static void pit_reset(void *opaque)
   94.11  {
    95.1 --- a/xen/arch/x86/hvm/intercept.c	Fri Feb 09 14:43:22 2007 -0600
    95.2 +++ b/xen/arch/x86/hvm/intercept.c	Wed Feb 14 19:01:35 2007 +0000
    95.3 @@ -29,8 +29,6 @@
    95.4  #include <asm/current.h>
    95.5  #include <io_ports.h>
    95.6  #include <xen/event.h>
    95.7 -#include <xen/compile.h>
    95.8 -#include <public/version.h>
    95.9  
   95.10  
   95.11  extern struct hvm_mmio_handler hpet_mmio_handler;
   95.12 @@ -157,180 +155,6 @@ static inline void hvm_mmio_access(struc
   95.13      }
   95.14  }
   95.15  
   95.16 -/* List of handlers for various HVM save and restore types */
   95.17 -static struct { 
   95.18 -    hvm_save_handler save;
   95.19 -    hvm_load_handler load; 
   95.20 -    const char *name;
   95.21 -} hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
   95.22 -
   95.23 -/* Init-time function to add entries to that list */
   95.24 -void hvm_register_savevm(uint16_t typecode, 
   95.25 -                         const char *name,
   95.26 -                         hvm_save_handler save_state,
   95.27 -                         hvm_load_handler load_state)
   95.28 -{
   95.29 -    ASSERT(typecode <= HVM_SAVE_CODE_MAX);
   95.30 -    ASSERT(hvm_sr_handlers[typecode].save == NULL);
   95.31 -    ASSERT(hvm_sr_handlers[typecode].load == NULL);
   95.32 -    hvm_sr_handlers[typecode].save = save_state;
   95.33 -    hvm_sr_handlers[typecode].load = load_state;
   95.34 -    hvm_sr_handlers[typecode].name = name;
   95.35 -}
   95.36 -
   95.37 -
   95.38 -int hvm_save(struct domain *d, hvm_domain_context_t *h)
   95.39 -{
   95.40 -    uint32_t eax, ebx, ecx, edx;
   95.41 -    char *c;
   95.42 -    struct hvm_save_header hdr;
   95.43 -    struct hvm_save_end end;
   95.44 -    hvm_save_handler handler;
   95.45 -    uint16_t i;
   95.46 -
   95.47 -    hdr.magic = HVM_FILE_MAGIC;
   95.48 -    hdr.version = HVM_FILE_VERSION;
   95.49 -
   95.50 -    /* Save some CPUID bits */
   95.51 -    cpuid(1, &eax, &ebx, &ecx, &edx);
   95.52 -    hdr.cpuid = eax;
   95.53 -
   95.54 -    /* Save xen changeset */
   95.55 -    c = strrchr(XEN_CHANGESET, ':');
   95.56 -    if ( c )
   95.57 -        hdr.changeset = simple_strtoll(c, NULL, 16);
   95.58 -    else 
   95.59 -        hdr.changeset = -1ULL; /* Unknown */
   95.60 -
   95.61 -    if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 )
   95.62 -    {
   95.63 -        gdprintk(XENLOG_ERR, "HVM save: failed to write header\n");
   95.64 -        return -EFAULT;
   95.65 -    } 
   95.66 -
   95.67 -    /* Save all available kinds of state */
   95.68 -    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
   95.69 -    {
   95.70 -        handler = hvm_sr_handlers[i].save;
   95.71 -        if ( handler != NULL ) 
   95.72 -        {
   95.73 -            gdprintk(XENLOG_INFO, "HVM save: %s\n",  hvm_sr_handlers[i].name);
   95.74 -            if ( handler(d, h) != 0 ) 
   95.75 -            {
   95.76 -                gdprintk(XENLOG_ERR, 
   95.77 -                         "HVM save: failed to save type %"PRIu16"\n", i);
   95.78 -                return -EFAULT;
   95.79 -            } 
   95.80 -        }
   95.81 -    }
   95.82 -
   95.83 -    /* Save an end-of-file marker */
   95.84 -    if ( hvm_save_entry(END, 0, h, &end) != 0 )
   95.85 -    {
   95.86 -        /* Run out of data */
   95.87 -        gdprintk(XENLOG_ERR, "HVM save: no room for end marker.\n");
   95.88 -        return -EFAULT;
   95.89 -    }
   95.90 -
   95.91 -    /* Save macros should not have let us overrun */
   95.92 -    ASSERT(h->cur <= h->size);
   95.93 -    return 0;
   95.94 -}
   95.95 -
   95.96 -int hvm_load(struct domain *d, hvm_domain_context_t *h)
   95.97 -{
   95.98 -    uint32_t eax, ebx, ecx, edx;
   95.99 -    char *c;
  95.100 -    uint64_t cset;
  95.101 -    struct hvm_save_header hdr;
  95.102 -    struct hvm_save_descriptor *desc;
  95.103 -    hvm_load_handler handler;
  95.104 -    struct vcpu *v;
  95.105 -    
  95.106 -    /* Read the save header, which must be first */
  95.107 -    if ( hvm_load_entry(HEADER, h, &hdr) != 0 ) 
  95.108 -        return -1;
  95.109 -
  95.110 -    if (hdr.magic != HVM_FILE_MAGIC) {
  95.111 -        gdprintk(XENLOG_ERR, 
  95.112 -                 "HVM restore: bad magic number %#"PRIx32"\n", hdr.magic);
  95.113 -        return -1;
  95.114 -    }
  95.115 -
  95.116 -    if (hdr.version != HVM_FILE_VERSION) {
  95.117 -        gdprintk(XENLOG_ERR, 
  95.118 -                 "HVM restore: unsupported version %u\n", hdr.version);
  95.119 -        return -1;
  95.120 -    }
  95.121 -
  95.122 -    cpuid(1, &eax, &ebx, &ecx, &edx);
  95.123 -    /*TODO: need to define how big a difference is acceptable */
  95.124 -    if (hdr.cpuid != eax)
  95.125 -        gdprintk(XENLOG_WARNING, "HVM restore: saved CPUID (%#"PRIx32") "
  95.126 -               "does not match host (%#"PRIx32").\n", hdr.cpuid, eax);
  95.127 -
  95.128 -
  95.129 -    c = strrchr(XEN_CHANGESET, ':');
  95.130 -    if ( hdr.changeset == -1ULL )
  95.131 -        gdprintk(XENLOG_WARNING, 
  95.132 -                 "HVM restore: Xen changeset was not saved.\n");
  95.133 -    else if ( c == NULL )
  95.134 -        gdprintk(XENLOG_WARNING, 
  95.135 -                 "HVM restore: Xen changeset is not available.\n");
  95.136 -    else
  95.137 -    {
  95.138 -        cset = simple_strtoll(c, NULL, 16);
  95.139 -        if ( hdr.changeset != cset )
  95.140 -        gdprintk(XENLOG_WARNING, "HVM restore: saved Xen changeset (%#"PRIx64
  95.141 -                 ") does not match host (%#"PRIx64").\n", hdr.changeset, cset);
  95.142 -    }
  95.143 -
  95.144 -    /* Down all the vcpus: we only re-enable the ones that had state saved. */
  95.145 -    for_each_vcpu(d, v) 
  95.146 -        if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
  95.147 -            vcpu_sleep_nosync(v);
  95.148 -
  95.149 -    while(1) {
  95.150 -
  95.151 -        if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) )
  95.152 -        {
  95.153 -            /* Run out of data */
  95.154 -            gdprintk(XENLOG_ERR, 
  95.155 -                     "HVM restore: save did not end with a null entry\n");
  95.156 -            return -1;
  95.157 -        }
  95.158 -        
  95.159 -        /* Read the typecode of the next entry  and check for the end-marker */
  95.160 -        desc = (struct hvm_save_descriptor *)(&h->data[h->cur]);
  95.161 -        if ( desc->typecode == 0 )
  95.162 -            return 0; 
  95.163 -        
  95.164 -        /* Find the handler for this entry */
  95.165 -        if ( desc->typecode > HVM_SAVE_CODE_MAX 
  95.166 -             || (handler = hvm_sr_handlers[desc->typecode].load) == NULL ) 
  95.167 -        {
  95.168 -            gdprintk(XENLOG_ERR, 
  95.169 -                     "HVM restore: unknown entry typecode %u\n", 
  95.170 -                     desc->typecode);
  95.171 -            return -1;
  95.172 -        }
  95.173 -
  95.174 -        /* Load the entry */
  95.175 -        gdprintk(XENLOG_INFO, "HVM restore: %s %"PRIu16"\n",  
  95.176 -                 hvm_sr_handlers[desc->typecode].name, desc->instance);
  95.177 -        if ( handler(d, h) != 0 ) 
  95.178 -        {
  95.179 -            gdprintk(XENLOG_ERR, 
  95.180 -                     "HVM restore: failed to load entry %u/%u\n", 
  95.181 -                     desc->typecode, desc->instance);
  95.182 -            return -1;
  95.183 -        }
  95.184 -    }
  95.185 -
  95.186 -    /* Not reached */
  95.187 -}
  95.188 -
  95.189 -
  95.190  int hvm_buffered_io_intercept(ioreq_t *p)
  95.191  {
  95.192      struct vcpu *v = current;
    96.1 --- a/xen/arch/x86/hvm/io.c	Fri Feb 09 14:43:22 2007 -0600
    96.2 +++ b/xen/arch/x86/hvm/io.c	Wed Feb 14 19:01:35 2007 +0000
    96.3 @@ -32,7 +32,7 @@
    96.4  #include <asm/processor.h>
    96.5  #include <asm/msr.h>
    96.6  #include <asm/apic.h>
    96.7 -#include <asm/shadow.h>
    96.8 +#include <asm/paging.h>
    96.9  #include <asm/hvm/hvm.h>
   96.10  #include <asm/hvm/support.h>
   96.11  #include <asm/hvm/vpt.h>
    97.1 --- a/xen/arch/x86/hvm/irq.c	Fri Feb 09 14:43:22 2007 -0600
    97.2 +++ b/xen/arch/x86/hvm/irq.c	Wed Feb 14 19:01:35 2007 +0000
    97.3 @@ -480,6 +480,9 @@ static int irq_load_link(struct domain *
    97.4      return 0;
    97.5  }
    97.6  
    97.7 -HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci);
    97.8 -HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa);
    97.9 -HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link);
   97.10 +HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
   97.11 +                          1, HVMSR_PER_DOM);
   97.12 +HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa, 
   97.13 +                          1, HVMSR_PER_DOM);
   97.14 +HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
   97.15 +                          1, HVMSR_PER_DOM);
    98.1 --- a/xen/arch/x86/hvm/platform.c	Fri Feb 09 14:43:22 2007 -0600
    98.2 +++ b/xen/arch/x86/hvm/platform.c	Wed Feb 14 19:01:35 2007 +0000
    98.3 @@ -21,7 +21,6 @@
    98.4  #include <xen/config.h>
    98.5  #include <xen/types.h>
    98.6  #include <xen/mm.h>
    98.7 -#include <xen/shadow.h>
    98.8  #include <xen/domain_page.h>
    98.9  #include <asm/page.h>
   98.10  #include <xen/event.h>
   98.11 @@ -29,6 +28,7 @@
   98.12  #include <xen/sched.h>
   98.13  #include <asm/regs.h>
   98.14  #include <asm/x86_emulate.h>
   98.15 +#include <asm/paging.h>
   98.16  #include <asm/hvm/hvm.h>
   98.17  #include <asm/hvm/support.h>
   98.18  #include <asm/hvm/io.h>
   98.19 @@ -513,14 +513,30 @@ static int mmio_decode(int address_bytes
   98.20          mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
   98.21  
   98.22          switch ( ins_subtype ) {
   98.23 -        case 7: /* cmp $imm, m32/16 */
   98.24 -            mmio_op->instr = INSTR_CMP;
   98.25 +        case 0: /* add $imm, m32/16 */
   98.26 +            mmio_op->instr = INSTR_ADD;
   98.27              return DECODE_success;
   98.28  
   98.29          case 1: /* or $imm, m32/16 */
   98.30              mmio_op->instr = INSTR_OR;
   98.31              return DECODE_success;
   98.32  
   98.33 +        case 4: /* and $imm, m32/16 */
   98.34 +            mmio_op->instr = INSTR_AND;
   98.35 +            return DECODE_success;
   98.36 +
   98.37 +        case 5: /* sub $imm, m32/16 */
   98.38 +            mmio_op->instr = INSTR_SUB;
   98.39 +            return DECODE_success;
   98.40 +
   98.41 +        case 6: /* xor $imm, m32/16 */
   98.42 +            mmio_op->instr = INSTR_XOR;
   98.43 +            return DECODE_success;
   98.44 +
   98.45 +        case 7: /* cmp $imm, m32/16 */
   98.46 +            mmio_op->instr = INSTR_CMP;
   98.47 +            return DECODE_success;
   98.48 +
   98.49          default:
   98.50              printk("%x/%x, This opcode isn't handled yet!\n",
   98.51                     *opcode, ins_subtype);
   98.52 @@ -674,6 +690,39 @@ static int mmio_decode(int address_bytes
   98.53          } else
   98.54              return DECODE_failure;
   98.55  
   98.56 +    case 0xFE:
   98.57 +    case 0xFF:
   98.58 +    {
   98.59 +        unsigned char ins_subtype = (opcode[1] >> 3) & 7;
   98.60 +
   98.61 +        if ( opcode[0] == 0xFE ) {
   98.62 +            *op_size = BYTE;
   98.63 +            GET_OP_SIZE_FOR_BYTE(size_reg);
   98.64 +        } else {
   98.65 +            GET_OP_SIZE_FOR_NONEBYTE(*op_size);
   98.66 +            size_reg = *op_size;
   98.67 +        }
   98.68 +
   98.69 +        mmio_op->immediate = 1;
   98.70 +        mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
   98.71 +        mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
   98.72 +
   98.73 +        switch ( ins_subtype ) {
   98.74 +        case 0: /* inc */
   98.75 +            mmio_op->instr = INSTR_ADD;
   98.76 +            return DECODE_success;
   98.77 +
   98.78 +        case 1: /* dec */
   98.79 +            mmio_op->instr = INSTR_SUB;
   98.80 +            return DECODE_success;
   98.81 +
   98.82 +        default:
   98.83 +            printk("%x/%x, This opcode isn't handled yet!\n",
   98.84 +                   *opcode, ins_subtype);
   98.85 +            return DECODE_failure;
   98.86 +        }
   98.87 +    }
   98.88 +
   98.89      case 0x0F:
   98.90          break;
   98.91  
   98.92 @@ -793,7 +842,7 @@ void send_pio_req(unsigned long port, un
   98.93      if ( value_is_ptr )   /* get physical address of data */
   98.94      {
   98.95          if ( hvm_paging_enabled(current) )
   98.96 -            p->data = shadow_gva_to_gpa(current, value);
   98.97 +            p->data = paging_gva_to_gpa(current, value);
   98.98          else
   98.99              p->data = value; /* guest VA == guest PA */
  98.100      }
  98.101 @@ -849,7 +898,7 @@ static void send_mmio_req(unsigned char 
  98.102      if ( value_is_ptr )
  98.103      {
  98.104          if ( hvm_paging_enabled(v) )
  98.105 -            p->data = shadow_gva_to_gpa(v, value);
  98.106 +            p->data = paging_gva_to_gpa(v, value);
  98.107          else
  98.108              p->data = value; /* guest VA == guest PA */
  98.109      }
  98.110 @@ -965,7 +1014,7 @@ void handle_mmio(unsigned long gpa)
  98.111          if ( ad_size == WORD )
  98.112              addr &= 0xFFFF;
  98.113          addr += hvm_get_segment_base(v, x86_seg_es);
  98.114 -        if ( shadow_gva_to_gpa(v, addr) == gpa )
  98.115 +        if ( paging_gva_to_gpa(v, addr) == gpa )
  98.116          {
  98.117              enum x86_segment seg;
  98.118  
    99.1 --- a/xen/arch/x86/hvm/pmtimer.c	Fri Feb 09 14:43:22 2007 -0600
    99.2 +++ b/xen/arch/x86/hvm/pmtimer.c	Wed Feb 14 19:01:35 2007 +0000
    99.3 @@ -2,17 +2,6 @@
    99.4  #include <asm/hvm/io.h>
    99.5  #include <asm/hvm/support.h>
    99.6  
    99.7 -#define TMR_STS (1 << 0)
    99.8 -static void pmt_update_status(void *opaque)
    99.9 -{
   99.10 -   PMTState *s = opaque;
   99.11 -   s->pm1_status |= TMR_STS;
   99.12 -
   99.13 -   /* TODO: When TMR_EN == 1, generate a SCI event */
   99.14 -
   99.15 -   set_timer(&s->timer, NOW() + (1000000000ULL << 31) / FREQUENCE_PMTIMER);
   99.16 -}
   99.17 -
   99.18  static int handle_pmt_io(ioreq_t *p)
   99.19  {
   99.20      struct vcpu *v = current;
   99.21 @@ -30,42 +19,62 @@ static int handle_pmt_io(ioreq_t *p)
   99.22          /* PM_TMR_BLK is read-only */
   99.23          return 1;
   99.24      } else if (p->dir == 1) { /* read */
   99.25 +        /* Set the correct value in the timer, accounting for time
   99.26 +         * elapsed since the last time we did that. */
   99.27          curr_gtime = hvm_get_guest_time(s->vcpu);
   99.28 -        s->pm1_timer += ((curr_gtime - s->last_gtime) * s->scale) >> 32;
   99.29 -        p->data = s->pm1_timer;
   99.30 +        s->pm.timer += ((curr_gtime - s->last_gtime) * s->scale) >> 32;
   99.31 +        p->data = s->pm.timer;
   99.32          s->last_gtime = curr_gtime;
   99.33          return 1;
   99.34      }
   99.35      return 0;
   99.36  }
   99.37  
   99.38 +static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
   99.39 +{
   99.40 +    PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
   99.41 +    uint32_t x;
   99.42 +
   99.43 +    /* Update the counter to the guest's current time.  We always save
   99.44 +     * with the domain paused, so the saved time should be after the
   99.45 +     * last_gtime, but just in case, make sure we only go forwards */
   99.46 +    x = ((s->vcpu->arch.hvm_vcpu.guest_time - s->last_gtime) * s->scale) >> 32;
   99.47 +    if ( x < 1UL<<31 )
   99.48 +        s->pm.timer += x;
   99.49 +    return hvm_save_entry(PMTIMER, 0, h, &s->pm);
   99.50 +}
   99.51 +
   99.52 +static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
   99.53 +{
   99.54 +    PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
   99.55 +
   99.56 +    /* Reload the counter */
   99.57 +    if ( hvm_load_entry(PMTIMER, h, &s->pm) )
   99.58 +        return -EINVAL;
   99.59 +
   99.60 +    /* Calculate future counter values from now. */
   99.61 +    s->last_gtime = hvm_get_guest_time(s->vcpu);
   99.62 +    
   99.63 +    return 0;
   99.64 +}
   99.65 +
   99.66 +HVM_REGISTER_SAVE_RESTORE(PMTIMER, pmtimer_save, pmtimer_load, 
   99.67 +                          1, HVMSR_PER_DOM);
   99.68 +
   99.69 +
   99.70  void pmtimer_init(struct vcpu *v, int base)
   99.71  {
   99.72      PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
   99.73  
   99.74 -    s->pm1_timer = 0;
   99.75 -    s->pm1_status = 0;
   99.76 +    s->pm.timer = 0;
   99.77      s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / ticks_per_sec(v);
   99.78      s->vcpu = v;
   99.79  
   99.80 -    init_timer(&s->timer, pmt_update_status, s, v->processor);
   99.81 -    /* ACPI supports a 32-bit power management timer */
   99.82 -    set_timer(&s->timer, NOW() + (1000000000ULL << 31) / FREQUENCE_PMTIMER);
   99.83 -    
   99.84 +    /* Not implemented: we should set TMR_STS (bit 0 of PM1a_STS) every
   99.85 +     * time the timer's top bit flips, and generate an SCI if TMR_EN
   99.86 +     * (bit 0 of PM1a_EN) is set.  For now, those registers are in
   99.87 +     * qemu-dm, and we just calculate the timer's value on demand. */  
   99.88 +
   99.89      register_portio_handler(v->domain, base, 4, handle_pmt_io);
   99.90  }
   99.91  
   99.92 -void pmtimer_migrate_timers(struct vcpu *v)
   99.93 -{
   99.94 -    struct PMTState *vpmt = &v->domain->arch.hvm_domain.pl_time.vpmt;
   99.95 -
   99.96 -    if (vpmt->vcpu == v)
   99.97 -        migrate_timer(&vpmt->timer, v->processor);
   99.98 -}
   99.99 -
  99.100 -void pmtimer_deinit(struct domain *d)
  99.101 -{
  99.102 -    PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
  99.103 -
  99.104 -    kill_timer(&s->timer);
  99.105 -}
   100.1 --- a/xen/arch/x86/hvm/rtc.c	Fri Feb 09 14:43:22 2007 -0600
   100.2 +++ b/xen/arch/x86/hvm/rtc.c	Wed Feb 14 19:01:35 2007 +0000
   100.3 @@ -417,7 +417,7 @@ static int rtc_load(struct domain *d, hv
   100.4      return 0;
   100.5  }
   100.6  
   100.7 -HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load);
   100.8 +HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load, 1, HVMSR_PER_DOM);
   100.9  
  100.10  
  100.11  void rtc_init(struct vcpu *v, int base)
   101.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   101.2 +++ b/xen/arch/x86/hvm/save.c	Wed Feb 14 19:01:35 2007 +0000
   101.3 @@ -0,0 +1,229 @@
   101.4 +/*
   101.5 + * hvm/save.c: Save and restore HVM guest's emulated hardware state.
   101.6 + *
   101.7 + * Copyright (c) 2004, Intel Corporation.
   101.8 + * Copyright (c) 2007, XenSource Inc.
   101.9 + *
  101.10 + * This program is free software; you can redistribute it and/or modify it
  101.11 + * under the terms and conditions of the GNU General Public License,
  101.12 + * version 2, as published by the Free Software Foundation.
  101.13 + *
  101.14 + * This program is distributed in the hope it will be useful, but WITHOUT
  101.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  101.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  101.17 + * more details.
  101.18 + *
  101.19 + * You should have received a copy of the GNU General Public License along with
  101.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  101.21 + * Place - Suite 330, Boston, MA 02111-1307 USA.
  101.22 + */
  101.23 +
  101.24 +#include <xen/config.h>
  101.25 +#include <xen/compile.h>
  101.26 +#include <xen/lib.h>
  101.27 +#include <public/version.h>
  101.28 +#include <xen/sched.h>
  101.29 +
  101.30 +#include <asm/hvm/hvm.h>
  101.31 +#include <asm/hvm/support.h>
  101.32 +#include <asm/hvm/domain.h>
  101.33 +#include <asm/current.h>
  101.34 +
  101.35 +
  101.36 +/* List of handlers for various HVM save and restore types */
  101.37 +static struct { 
  101.38 +    hvm_save_handler save;
  101.39 +    hvm_load_handler load; 
  101.40 +    const char *name;
  101.41 +    size_t size;
  101.42 +    int kind;
  101.43 +} hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
  101.44 +
  101.45 +/* Init-time function to add entries to that list */
  101.46 +void hvm_register_savevm(uint16_t typecode, 
  101.47 +                         const char *name,
  101.48 +                         hvm_save_handler save_state,
  101.49 +                         hvm_load_handler load_state,
  101.50 +                         size_t size, int kind)
  101.51 +{
  101.52 +    ASSERT(typecode <= HVM_SAVE_CODE_MAX);
  101.53 +    ASSERT(hvm_sr_handlers[typecode].save == NULL);
  101.54 +    ASSERT(hvm_sr_handlers[typecode].load == NULL);
  101.55 +    hvm_sr_handlers[typecode].save = save_state;
  101.56 +    hvm_sr_handlers[typecode].load = load_state;
  101.57 +    hvm_sr_handlers[typecode].name = name;
  101.58 +    hvm_sr_handlers[typecode].size = size;
  101.59 +    hvm_sr_handlers[typecode].kind = kind;
  101.60 +}
  101.61 +
  101.62 +size_t hvm_save_size(struct domain *d) 
  101.63 +{
  101.64 +    struct vcpu *v;
  101.65 +    size_t sz;
  101.66 +    int i;
  101.67 +    
  101.68 +    /* Basic overhead for header and footer */
  101.69 +    sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
  101.70 +
  101.71 +    /* Plus space for each thing we will be saving */
  101.72 +    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
  101.73 +        if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
  101.74 +            for_each_vcpu(d, v)
  101.75 +                sz += hvm_sr_handlers[i].size;
  101.76 +        else 
  101.77 +            sz += hvm_sr_handlers[i].size;
  101.78 +
  101.79 +    return sz;
  101.80 +}
  101.81 +
  101.82 +
  101.83 +int hvm_save(struct domain *d, hvm_domain_context_t *h)
  101.84 +{
  101.85 +    uint32_t eax, ebx, ecx, edx;
  101.86 +    char *c;
  101.87 +    struct hvm_save_header hdr;
  101.88 +    struct hvm_save_end end;
  101.89 +    hvm_save_handler handler;
  101.90 +    uint16_t i;
  101.91 +
  101.92 +    hdr.magic = HVM_FILE_MAGIC;
  101.93 +    hdr.version = HVM_FILE_VERSION;
  101.94 +
  101.95 +    /* Save some CPUID bits */
  101.96 +    cpuid(1, &eax, &ebx, &ecx, &edx);
  101.97 +    hdr.cpuid = eax;
  101.98 +
  101.99 +    /* Save xen changeset */
 101.100 +    c = strrchr(XEN_CHANGESET, ':');
 101.101 +    if ( c )
 101.102 +        hdr.changeset = simple_strtoll(c, NULL, 16);
 101.103 +    else 
 101.104 +        hdr.changeset = -1ULL; /* Unknown */
 101.105 +
 101.106 +    if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 )
 101.107 +    {
 101.108 +        gdprintk(XENLOG_ERR, "HVM save: failed to write header\n");
 101.109 +        return -EFAULT;
 101.110 +    } 
 101.111 +
 101.112 +    /* Save all available kinds of state */
 101.113 +    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
 101.114 +    {
 101.115 +        handler = hvm_sr_handlers[i].save;
 101.116 +        if ( handler != NULL ) 
 101.117 +        {
 101.118 +            gdprintk(XENLOG_INFO, "HVM save: %s\n",  hvm_sr_handlers[i].name);
 101.119 +            if ( handler(d, h) != 0 ) 
 101.120 +            {
 101.121 +                gdprintk(XENLOG_ERR, 
 101.122 +                         "HVM save: failed to save type %"PRIu16"\n", i);
 101.123 +                return -EFAULT;
 101.124 +            } 
 101.125 +        }
 101.126 +    }
 101.127 +
 101.128 +    /* Save an end-of-file marker */
 101.129 +    if ( hvm_save_entry(END, 0, h, &end) != 0 )
 101.130 +    {
 101.131 +        /* Run out of data */
 101.132 +        gdprintk(XENLOG_ERR, "HVM save: no room for end marker.\n");
 101.133 +        return -EFAULT;
 101.134 +    }
 101.135 +
 101.136 +    /* Save macros should not have let us overrun */
 101.137 +    ASSERT(h->cur <= h->size);
 101.138 +    return 0;
 101.139 +}
 101.140 +
 101.141 +int hvm_load(struct domain *d, hvm_domain_context_t *h)
 101.142 +{
 101.143 +    uint32_t eax, ebx, ecx, edx;
 101.144 +    char *c;
 101.145 +    uint64_t cset;
 101.146 +    struct hvm_save_header hdr;
 101.147 +    struct hvm_save_descriptor *desc;
 101.148 +    hvm_load_handler handler;
 101.149 +    struct vcpu *v;
 101.150 +    
 101.151 +    /* Read the save header, which must be first */
 101.152 +    if ( hvm_load_entry(HEADER, h, &hdr) != 0 ) 
 101.153 +        return -1;
 101.154 +
 101.155 +    if (hdr.magic != HVM_FILE_MAGIC) {
 101.156 +        gdprintk(XENLOG_ERR, 
 101.157 +                 "HVM restore: bad magic number %#"PRIx32"\n", hdr.magic);
 101.158 +        return -1;
 101.159 +    }
 101.160 +
 101.161 +    if (hdr.version != HVM_FILE_VERSION) {
 101.162 +        gdprintk(XENLOG_ERR, 
 101.163 +                 "HVM restore: unsupported version %u\n", hdr.version);
 101.164 +        return -1;
 101.165 +    }
 101.166 +
 101.167 +    cpuid(1, &eax, &ebx, &ecx, &edx);
 101.168 +    /*TODO: need to define how big a difference is acceptable */
 101.169 +    if (hdr.cpuid != eax)
 101.170 +        gdprintk(XENLOG_WARNING, "HVM restore: saved CPUID (%#"PRIx32") "
 101.171 +               "does not match host (%#"PRIx32").\n", hdr.cpuid, eax);
 101.172 +
 101.173 +
 101.174 +    c = strrchr(XEN_CHANGESET, ':');
 101.175 +    if ( hdr.changeset == -1ULL )
 101.176 +        gdprintk(XENLOG_WARNING, 
 101.177 +                 "HVM restore: Xen changeset was not saved.\n");
 101.178 +    else if ( c == NULL )
 101.179 +        gdprintk(XENLOG_WARNING, 
 101.180 +                 "HVM restore: Xen changeset is not available.\n");
 101.181 +    else
 101.182 +    {
 101.183 +        cset = simple_strtoll(c, NULL, 16);
 101.184 +        if ( hdr.changeset != cset )
 101.185 +        gdprintk(XENLOG_WARNING, "HVM restore: saved Xen changeset (%#"PRIx64
 101.186 +                 ") does not match host (%#"PRIx64").\n", hdr.changeset, cset);
 101.187 +    }
 101.188 +
 101.189 +    /* Down all the vcpus: we only re-enable the ones that had state saved. */
 101.190 +    for_each_vcpu(d, v) 
 101.191 +        if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
 101.192 +            vcpu_sleep_nosync(v);
 101.193 +
 101.194 +    while(1) {
 101.195 +
 101.196 +        if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) )
 101.197 +        {
 101.198 +            /* Run out of data */
 101.199 +            gdprintk(XENLOG_ERR, 
 101.200 +                     "HVM restore: save did not end with a null entry\n");
 101.201 +            return -1;
 101.202 +        }
 101.203 +        
 101.204 +        /* Read the typecode of the next entry  and check for the end-marker */
 101.205 +        desc = (struct hvm_save_descriptor *)(&h->data[h->cur]);
 101.206 +        if ( desc->typecode == 0 )
 101.207 +            return 0; 
 101.208 +        
 101.209 +        /* Find the handler for this entry */
 101.210 +        if ( desc->typecode > HVM_SAVE_CODE_MAX 
 101.211 +             || (handler = hvm_sr_handlers[desc->typecode].load) == NULL ) 
 101.212 +        {
 101.213 +            gdprintk(XENLOG_ERR, 
 101.214 +                     "HVM restore: unknown entry typecode %u\n", 
 101.215 +                     desc->typecode);
 101.216 +            return -1;
 101.217 +        }
 101.218 +
 101.219 +        /* Load the entry */
 101.220 +        gdprintk(XENLOG_INFO, "HVM restore: %s %"PRIu16"\n",  
 101.221 +                 hvm_sr_handlers[desc->typecode].name, desc->instance);
 101.222 +        if ( handler(d, h) != 0 ) 
 101.223 +        {
 101.224 +            gdprintk(XENLOG_ERR, 
 101.225 +                     "HVM restore: failed to load entry %u/%u\n", 
 101.226 +                     desc->typecode, desc->instance);
 101.227 +            return -1;
 101.228 +        }
 101.229 +    }
 101.230 +
 101.231 +    /* Not reached */
 101.232 +}
   102.1 --- a/xen/arch/x86/hvm/svm/intr.c	Fri Feb 09 14:43:22 2007 -0600
   102.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Wed Feb 14 19:01:35 2007 +0000
   102.3 @@ -24,10 +24,10 @@
   102.4  #include <xen/lib.h>
   102.5  #include <xen/trace.h>
   102.6  #include <xen/errno.h>
   102.7 -#include <xen/shadow.h>
   102.8  #include <asm/cpufeature.h>
   102.9  #include <asm/processor.h>
  102.10  #include <asm/msr.h>
  102.11 +#include <asm/paging.h>
  102.12  #include <asm/hvm/hvm.h>
  102.13  #include <asm/hvm/io.h>
  102.14  #include <asm/hvm/support.h>
   103.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Feb 09 14:43:22 2007 -0600
   103.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Feb 14 19:01:35 2007 +0000
   103.3 @@ -29,7 +29,8 @@
   103.4  #include <xen/domain_page.h>
   103.5  #include <asm/current.h>
   103.6  #include <asm/io.h>
   103.7 -#include <asm/shadow.h>
   103.8 +#include <asm/paging.h>
   103.9 +#include <asm/p2m.h>
  103.10  #include <asm/regs.h>
  103.11  #include <asm/cpufeature.h>
  103.12  #include <asm/processor.h>
  103.13 @@ -491,9 +492,6 @@ int svm_vmcb_restore(struct vcpu *v, str
  103.14          v->arch.guest_table = pagetable_from_pfn(mfn);
  103.15          if (old_base_mfn)
  103.16               put_page(mfn_to_page(old_base_mfn));
  103.17 -        /*
  103.18 -         * arch.shadow_table should now hold the next CR3 for shadow
  103.19 -         */
  103.20          v->arch.hvm_svm.cpu_cr3 = c->cr3;
  103.21      }
  103.22  
  103.23 @@ -560,7 +558,7 @@ int svm_vmcb_restore(struct vcpu *v, str
  103.24      vmcb->sysenter_esp = c->sysenter_esp;
  103.25      vmcb->sysenter_eip = c->sysenter_eip;
  103.26  
  103.27 -    shadow_update_paging_modes(v);
  103.28 +    paging_update_paging_modes(v);
  103.29      return 0;
  103.30   
  103.31   bad_cr3:
  103.32 @@ -1095,7 +1093,7 @@ static int svm_do_page_fault(unsigned lo
  103.33                  "svm_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
  103.34                  va, (unsigned long)current->arch.hvm_svm.vmcb->rip,
  103.35                  (unsigned long)regs->error_code);
  103.36 -    return shadow_fault(va, regs); 
  103.37 +    return paging_fault(va, regs); 
  103.38  }
  103.39  
  103.40  
  103.41 @@ -1199,6 +1197,8 @@ static void svm_vmexit_do_cpuid(struct v
  103.42          /* So far, we do not support 3DNow for the guest. */
  103.43          clear_bit(X86_FEATURE_3DNOW & 31, &edx);
  103.44          clear_bit(X86_FEATURE_3DNOWEXT & 31, &edx);
  103.45 +        /* no FFXSR instructions feature. */
  103.46 +        clear_bit(X86_FEATURE_FFXSR & 31, &edx);
  103.47      }
  103.48      else if ( input == 0x80000007 || input == 0x8000000A )
  103.49      {
  103.50 @@ -1728,7 +1728,7 @@ static int svm_set_cr0(unsigned long val
  103.51          v->arch.guest_table = pagetable_from_pfn(mfn);
  103.52          if ( old_base_mfn )
  103.53              put_page(mfn_to_page(old_base_mfn));
  103.54 -        shadow_update_paging_modes(v);
  103.55 +        paging_update_paging_modes(v);
  103.56  
  103.57          HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
  103.58                      (unsigned long) (mfn << PAGE_SHIFT));
  103.59 @@ -1751,7 +1751,7 @@ static int svm_set_cr0(unsigned long val
  103.60              svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  103.61              return 0;
  103.62          }
  103.63 -        shadow_update_paging_modes(v);
  103.64 +        paging_update_paging_modes(v);
  103.65      }
  103.66      else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
  103.67      {
  103.68 @@ -1761,7 +1761,7 @@ static int svm_set_cr0(unsigned long val
  103.69              clear_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
  103.70          }
  103.71          /* we should take care of this kind of situation */
  103.72 -        shadow_update_paging_modes(v);
  103.73 +        paging_update_paging_modes(v);
  103.74      }
  103.75  
  103.76      return 1;
  103.77 @@ -1864,7 +1864,7 @@ static int mov_to_cr(int gpreg, int cr, 
  103.78              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  103.79              if (mfn != pagetable_get_pfn(v->arch.guest_table))
  103.80                  goto bad_cr3;
  103.81 -            shadow_update_cr3(v);
  103.82 +            paging_update_cr3(v);
  103.83          }
  103.84          else 
  103.85          {
  103.86 @@ -1915,7 +1915,7 @@ static int mov_to_cr(int gpreg, int cr, 
  103.87                  v->arch.guest_table = pagetable_from_pfn(mfn);
  103.88                  if ( old_base_mfn )
  103.89                      put_page(mfn_to_page(old_base_mfn));
  103.90 -                shadow_update_paging_modes(v);
  103.91 +                paging_update_paging_modes(v);
  103.92  
  103.93                  HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
  103.94                              (unsigned long) (mfn << PAGE_SHIFT));
  103.95 @@ -1944,7 +1944,7 @@ static int mov_to_cr(int gpreg, int cr, 
  103.96           * all TLB entries except global entries.
  103.97           */
  103.98          if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
  103.99 -            shadow_update_paging_modes(v);
 103.100 +            paging_update_paging_modes(v);
 103.101          break;
 103.102  
 103.103      case 8:
 103.104 @@ -2287,7 +2287,7 @@ void svm_handle_invlpg(const short invlp
 103.105          __update_guest_eip (vmcb, inst_len);
 103.106      }
 103.107  
 103.108 -    shadow_invlpg(v, g_vaddr);
 103.109 +    paging_invlpg(v, g_vaddr);
 103.110  }
 103.111  
 103.112  
 103.113 @@ -2658,7 +2658,7 @@ void walk_shadow_and_guest_pt(unsigned l
 103.114      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 103.115      paddr_t gpa;
 103.116  
 103.117 -    gpa = shadow_gva_to_gpa(current, gva);
 103.118 +    gpa = paging_gva_to_gpa(current, gva);
 103.119      printk("gva = %lx, gpa=%"PRIpaddr", gCR3=%x\n", gva, gpa, (u32)vmcb->cr3);
 103.120      if( !svm_paging_enabled(v) || mmio_space(gpa) )
 103.121          return;
 103.122 @@ -2679,7 +2679,7 @@ void walk_shadow_and_guest_pt(unsigned l
 103.123      shadow_sync_va(v, gva);
 103.124  
 103.125      gpte.l1 = 0;
 103.126 -    __copy_from_user(&gpte, &linear_pg_table[ l1_linear_offset(gva) ],
 103.127 +    __copy_from_user(&gpte, &__linear_l1_table[ l1_linear_offset(gva) ],
 103.128                       sizeof(gpte) );
 103.129      printk( "G-PTE = %x, flags=%x\n", gpte.l1, l1e_get_flags(gpte) );
 103.130  
 103.131 @@ -2724,7 +2724,7 @@ asmlinkage void svm_vmexit_handler(struc
 103.132          if (svm_dbg_on && exit_reason == VMEXIT_EXCEPTION_PF) 
 103.133          {
 103.134              if (svm_paging_enabled(v) && 
 103.135 -                !mmio_space(shadow_gva_to_gpa(current, vmcb->exitinfo2)))
 103.136 +                !mmio_space(paging_gva_to_gpa(current, vmcb->exitinfo2)))
 103.137              {
 103.138                  printk("I%08ld,ExC=%s(%d),IP=%x:%"PRIx64","
 103.139                         "I1=%"PRIx64",I2=%"PRIx64",INT=%"PRIx64", "
 103.140 @@ -2734,7 +2734,7 @@ asmlinkage void svm_vmexit_handler(struc
 103.141                         (u64)vmcb->exitinfo1,
 103.142                         (u64)vmcb->exitinfo2,
 103.143                         (u64)vmcb->exitintinfo.bytes,
 103.144 -                       (u64)shadow_gva_to_gpa(current, vmcb->exitinfo2));
 103.145 +                       (u64)paging_gva_to_gpa(current, vmcb->exitinfo2));
 103.146              }
 103.147              else 
 103.148              {
   104.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Fri Feb 09 14:43:22 2007 -0600
   104.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Feb 14 19:01:35 2007 +0000
   104.3 @@ -23,10 +23,10 @@
   104.4  #include <xen/mm.h>
   104.5  #include <xen/lib.h>
   104.6  #include <xen/errno.h>
   104.7 -#include <xen/shadow.h>
   104.8  #include <asm/cpufeature.h>
   104.9  #include <asm/processor.h>
  104.10  #include <asm/msr.h>
  104.11 +#include <asm/paging.h>
  104.12  #include <asm/hvm/hvm.h>
  104.13  #include <asm/hvm/io.h>
  104.14  #include <asm/hvm/support.h>
  104.15 @@ -196,7 +196,7 @@ static int construct_vmcb(struct vcpu *v
  104.16          read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
  104.17      vmcb->cr4 = arch_svm->cpu_shadow_cr4 | SVM_CR4_HOST_MASK;
  104.18  
  104.19 -    shadow_update_paging_modes(v);
  104.20 +    paging_update_paging_modes(v);
  104.21      vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
  104.22  
  104.23      arch_svm->vmcb->exception_intercepts = MONITOR_DEFAULT_EXCEPTION_BITMAP;
  104.24 @@ -209,7 +209,8 @@ int svm_create_vmcb(struct vcpu *v)
  104.25      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
  104.26      int rc;
  104.27  
  104.28 -    if ( (arch_svm->vmcb = alloc_vmcb()) == NULL )
  104.29 +    if ( (arch_svm->vmcb == NULL) &&
  104.30 +         (arch_svm->vmcb = alloc_vmcb()) == NULL )
  104.31      {
  104.32          printk("Failed to create a new VMCB\n");
  104.33          return -ENOMEM;
   105.1 --- a/xen/arch/x86/hvm/vioapic.c	Fri Feb 09 14:43:22 2007 -0600
   105.2 +++ b/xen/arch/x86/hvm/vioapic.c	Wed Feb 14 19:01:35 2007 +0000
   105.3 @@ -514,7 +514,7 @@ static int ioapic_load(struct domain *d,
   105.4      return 0;
   105.5  }
   105.6  
   105.7 -HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load);
   105.8 +HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
   105.9  
  105.10  void vioapic_init(struct domain *d)
  105.11  {
   106.1 --- a/xen/arch/x86/hvm/vlapic.c	Fri Feb 09 14:43:22 2007 -0600
   106.2 +++ b/xen/arch/x86/hvm/vlapic.c	Wed Feb 14 19:01:35 2007 +0000
   106.3 @@ -22,7 +22,6 @@
   106.4  #include <xen/types.h>
   106.5  #include <xen/mm.h>
   106.6  #include <xen/xmalloc.h>
   106.7 -#include <xen/shadow.h>
   106.8  #include <xen/domain_page.h>
   106.9  #include <asm/page.h>
  106.10  #include <xen/event.h>
  106.11 @@ -83,8 +82,6 @@ static unsigned int vlapic_lvt_mask[VLAP
  106.12  #define vlapic_base_address(vlapic)                             \
  106.13      (vlapic->hw.apic_base_msr & MSR_IA32_APICBASE_BASE)
  106.14  
  106.15 -static int vlapic_reset(struct vlapic *vlapic);
  106.16 -
  106.17  /*
  106.18   * Generic APIC bitmap vector update & search routines.
  106.19   */
  106.20 @@ -293,8 +290,11 @@ static int vlapic_accept_irq(struct vcpu
  106.21          break;
  106.22  
  106.23      case APIC_DM_SMI:
  106.24 +        gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n");
  106.25 +        break;
  106.26 +
  106.27      case APIC_DM_NMI:
  106.28 -        gdprintk(XENLOG_WARNING, "Ignoring guest SMI/NMI\n");
  106.29 +        gdprintk(XENLOG_WARNING, "Ignoring guest NMI\n");
  106.30          break;
  106.31  
  106.32      case APIC_DM_INIT:
  106.33 @@ -303,10 +303,7 @@ static int vlapic_accept_irq(struct vcpu
  106.34              break;
  106.35          /* FIXME How to check the situation after vcpu reset? */
  106.36          if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
  106.37 -        {
  106.38 -            gdprintk(XENLOG_ERR, "Reset hvm vcpu not supported yet\n");
  106.39 -            goto exit_and_crash;
  106.40 -        }
  106.41 +            hvm_vcpu_reset(v);
  106.42          v->arch.hvm_vcpu.init_sipi_sipi_state =
  106.43              HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
  106.44          result = 1;
  106.45 @@ -764,7 +761,7 @@ int cpu_get_apic_interrupt(struct vcpu *
  106.46  }
  106.47  
  106.48  /* Reset the VLPAIC back to its power-on/reset state. */
  106.49 -static int vlapic_reset(struct vlapic *vlapic)
  106.50 +void vlapic_reset(struct vlapic *vlapic)
  106.51  {
  106.52      struct vcpu *v = vlapic_vcpu(vlapic);
  106.53      int i;
  106.54 @@ -793,8 +790,6 @@ static int vlapic_reset(struct vlapic *v
  106.55  
  106.56      vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
  106.57      vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
  106.58 -
  106.59 -    return 1;
  106.60  }
  106.61  
  106.62  #ifdef HVM_DEBUG_SUSPEND
  106.63 @@ -908,8 +903,10 @@ static int lapic_load_regs(struct domain
  106.64      return 0;
  106.65  }
  106.66  
  106.67 -HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden);
  106.68 -HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs);
  106.69 +HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
  106.70 +                          1, HVMSR_PER_VCPU);
  106.71 +HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
  106.72 +                          1, HVMSR_PER_VCPU);
  106.73  
  106.74  int vlapic_init(struct vcpu *v)
  106.75  {
  106.76 @@ -922,7 +919,6 @@ int vlapic_init(struct vcpu *v)
  106.77      {
  106.78          dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
  106.79                  v->vcpu_id);
  106.80 -        xfree(vlapic);
  106.81          return -ENOMEM;
  106.82      }
  106.83  
   107.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Fri Feb 09 14:43:22 2007 -0600
   107.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Feb 14 19:01:35 2007 +0000
   107.3 @@ -295,6 +295,11 @@ static void construct_vmcs(struct vcpu *
   107.4  
   107.5      vmx_vmcs_enter(v);
   107.6  
   107.7 +    v->arch.hvm_vmx.cpu_cr2 = 0;
   107.8 +    v->arch.hvm_vmx.cpu_cr3 = 0;
   107.9 +    memset(&v->arch.hvm_vmx.msr_state, 0, sizeof(v->arch.hvm_vmx.msr_state));
  107.10 +    v->arch.hvm_vmx.vmxassist_enabled = 0;
  107.11 +
  107.12      /* VMCS controls. */
  107.13      __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
  107.14      __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
  107.15 @@ -443,15 +448,18 @@ static void construct_vmcs(struct vcpu *
  107.16  
  107.17      vmx_vmcs_exit(v);
  107.18  
  107.19 -    shadow_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
  107.20 +    paging_update_paging_modes(v); /* will update HOST & GUEST_CR3 as reqd */
  107.21  }
  107.22  
  107.23  int vmx_create_vmcs(struct vcpu *v)
  107.24  {
  107.25 -    if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
  107.26 -        return -ENOMEM;
  107.27 - 
  107.28 -    __vmx_clear_vmcs(v);
  107.29 +    if ( v->arch.hvm_vmx.vmcs == NULL )
  107.30 +    {
  107.31 +        if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
  107.32 +            return -ENOMEM;
  107.33 +
  107.34 +        __vmx_clear_vmcs(v);
  107.35 +    }
  107.36  
  107.37      construct_vmcs(v);
  107.38  
   108.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Feb 09 14:43:22 2007 -0600
   108.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Feb 14 19:01:35 2007 +0000
   108.3 @@ -35,12 +35,13 @@
   108.4  #include <asm/types.h>
   108.5  #include <asm/msr.h>
   108.6  #include <asm/spinlock.h>
   108.7 +#include <asm/paging.h>
   108.8 +#include <asm/p2m.h>
   108.9  #include <asm/hvm/hvm.h>
  108.10  #include <asm/hvm/support.h>
  108.11  #include <asm/hvm/vmx/vmx.h>
  108.12  #include <asm/hvm/vmx/vmcs.h>
  108.13  #include <asm/hvm/vmx/cpu.h>
  108.14 -#include <asm/shadow.h>
  108.15  #include <public/sched.h>
  108.16  #include <public/hvm/ioreq.h>
  108.17  #include <asm/hvm/vpic.h>
  108.18 @@ -484,9 +485,6 @@ int vmx_vmcs_restore(struct vcpu *v, str
  108.19          v->arch.guest_table = pagetable_from_pfn(mfn);
  108.20          if (old_base_mfn)
  108.21               put_page(mfn_to_page(old_base_mfn));
  108.22 -        /*
  108.23 -         * arch.shadow_table should now hold the next CR3 for shadow
  108.24 -         */
  108.25          v->arch.hvm_vmx.cpu_cr3 = c->cr3;
  108.26      }
  108.27  
  108.28 @@ -556,7 +554,7 @@ int vmx_vmcs_restore(struct vcpu *v, str
  108.29  
  108.30      vmx_vmcs_exit(v);
  108.31  
  108.32 -    shadow_update_paging_modes(v);
  108.33 +    paging_update_paging_modes(v);
  108.34      return 0;
  108.35  
  108.36   bad_cr3:
  108.37 @@ -1126,7 +1124,7 @@ static int vmx_do_page_fault(unsigned lo
  108.38      }
  108.39  #endif
  108.40  
  108.41 -    result = shadow_fault(va, regs);
  108.42 +    result = paging_fault(va, regs);
  108.43  
  108.44      TRACE_VMEXIT(2, result);
  108.45  #if 0
  108.46 @@ -1277,7 +1275,7 @@ static void vmx_do_invlpg(unsigned long 
  108.47       * We do the safest things first, then try to update the shadow
  108.48       * copying from guest
  108.49       */
  108.50 -    shadow_invlpg(v, va);
  108.51 +    paging_invlpg(v, va);
  108.52  }
  108.53  
  108.54  
  108.55 @@ -1691,9 +1689,6 @@ static int vmx_world_restore(struct vcpu
  108.56          v->arch.guest_table = pagetable_from_pfn(mfn);
  108.57          if (old_base_mfn)
  108.58               put_page(mfn_to_page(old_base_mfn));
  108.59 -        /*
  108.60 -         * arch.shadow_table should now hold the next CR3 for shadow
  108.61 -         */
  108.62          v->arch.hvm_vmx.cpu_cr3 = c->cr3;
  108.63      }
  108.64  
  108.65 @@ -1753,7 +1748,7 @@ static int vmx_world_restore(struct vcpu
  108.66      __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
  108.67      __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
  108.68  
  108.69 -    shadow_update_paging_modes(v);
  108.70 +    paging_update_paging_modes(v);
  108.71      return 0;
  108.72  
  108.73   bad_cr3:
  108.74 @@ -1906,14 +1901,11 @@ static int vmx_set_cr0(unsigned long val
  108.75          v->arch.guest_table = pagetable_from_pfn(mfn);
  108.76          if (old_base_mfn)
  108.77              put_page(mfn_to_page(old_base_mfn));
  108.78 -        shadow_update_paging_modes(v);
  108.79 +        paging_update_paging_modes(v);
  108.80  
  108.81          HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
  108.82                      (unsigned long) (mfn << PAGE_SHIFT));
  108.83  
  108.84 -        /*
  108.85 -         * arch->shadow_table should hold the next CR3 for shadow
  108.86 -         */
  108.87          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
  108.88                      v->arch.hvm_vmx.cpu_cr3, mfn);
  108.89      }
  108.90 @@ -1981,7 +1973,7 @@ static int vmx_set_cr0(unsigned long val
  108.91              vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
  108.92              __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
  108.93          }
  108.94 -        shadow_update_paging_modes(v);
  108.95 +        paging_update_paging_modes(v);
  108.96      }
  108.97  
  108.98      return 1;
  108.99 @@ -2070,7 +2062,7 @@ static int mov_to_cr(int gp, int cr, str
 108.100              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
 108.101              if (mfn != pagetable_get_pfn(v->arch.guest_table))
 108.102                  goto bad_cr3;
 108.103 -            shadow_update_cr3(v);
 108.104 +            paging_update_cr3(v);
 108.105          } else {
 108.106              /*
 108.107               * If different, make a shadow. Check if the PDBR is valid
 108.108 @@ -2084,9 +2076,6 @@ static int mov_to_cr(int gp, int cr, str
 108.109              v->arch.guest_table = pagetable_from_pfn(mfn);
 108.110              if (old_base_mfn)
 108.111                  put_page(mfn_to_page(old_base_mfn));
 108.112 -            /*
 108.113 -             * arch.shadow_table should now hold the next CR3 for shadow
 108.114 -             */
 108.115              v->arch.hvm_vmx.cpu_cr3 = value;
 108.116              update_cr3(v);
 108.117              HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
 108.118 @@ -2120,9 +2109,6 @@ static int mov_to_cr(int gp, int cr, str
 108.119                  HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
 108.120                              (unsigned long) (mfn << PAGE_SHIFT));
 108.121  
 108.122 -                /*
 108.123 -                 * arch->shadow_table should hold the next CR3 for shadow
 108.124 -                 */
 108.125                  HVM_DBG_LOG(DBG_LEVEL_VMMU, 
 108.126                              "Update CR3 value = %lx, mfn = %lx",
 108.127                              v->arch.hvm_vmx.cpu_cr3, mfn);
 108.128 @@ -2148,7 +2134,7 @@ static int mov_to_cr(int gp, int cr, str
 108.129           * all TLB entries except global entries.
 108.130           */
 108.131          if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
 108.132 -            shadow_update_paging_modes(v);
 108.133 +            paging_update_paging_modes(v);
 108.134          break;
 108.135  
 108.136      case 8:
   109.1 --- a/xen/arch/x86/hvm/vpic.c	Fri Feb 09 14:43:22 2007 -0600
   109.2 +++ b/xen/arch/x86/hvm/vpic.c	Wed Feb 14 19:01:35 2007 +0000
   109.3 @@ -440,7 +440,7 @@ static int vpic_load(struct domain *d, h
   109.4      return 0;
   109.5  }
   109.6  
   109.7 -HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load);
   109.8 +HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
   109.9  
  109.10  void vpic_init(struct domain *d)
  109.11  {
   110.1 --- a/xen/arch/x86/machine_kexec.c	Fri Feb 09 14:43:22 2007 -0600
   110.2 +++ b/xen/arch/x86/machine_kexec.c	Wed Feb 14 19:01:35 2007 +0000
   110.3 @@ -15,11 +15,15 @@
   110.4  #include <xen/types.h>
   110.5  #include <xen/console.h>
   110.6  #include <xen/kexec.h>
   110.7 -#include <asm/kexec.h>
   110.8  #include <xen/domain_page.h>
   110.9  #include <asm/fixmap.h>
  110.10  #include <asm/hvm/hvm.h>
  110.11  
  110.12 +typedef void (*relocate_new_kernel_t)(
  110.13 +                unsigned long indirection_page,
  110.14 +                unsigned long *page_list,
  110.15 +                unsigned long start_address);
  110.16 +
  110.17  int machine_kexec_load(int type, int slot, xen_kexec_image_t *image)
  110.18  {
  110.19      unsigned long prev_ma = 0;
  110.20 @@ -40,8 +44,26 @@ int machine_kexec_load(int type, int slo
  110.21          else
  110.22          {
  110.23              /* Odd pages: va for previous ma. */
  110.24 -            set_fixmap(fix_base + (k >> 1), prev_ma);
  110.25 -            image->page_list[k] = fix_to_virt(fix_base + (k >> 1));
  110.26 +            if ( IS_COMPAT(dom0) )
  110.27 +            {
  110.28 +
  110.29 +                /*
  110.30 +                 * The compatability bounce code sets up a page table
  110.31 +                 * with a 1-1 mapping of the first 1G of memory so
  110.32 +                 * VA==PA here.
  110.33 +                 *
  110.34 +                 * This Linux purgatory code still sets up separate
  110.35 +                 * high and low mappings on the control page (entries
  110.36 +                 * 0 and 1) but it is harmless if they are equal since
  110.37 +                 * that PT is not live at the time.
  110.38 +                 */
  110.39 +                image->page_list[k] = prev_ma;
  110.40 +            }
  110.41 +            else
  110.42 +            {
  110.43 +                set_fixmap(fix_base + (k >> 1), prev_ma);
  110.44 +                image->page_list[k] = fix_to_virt(fix_base + (k >> 1));
  110.45 +            }
  110.46          }
  110.47      }
  110.48  
  110.49 @@ -94,6 +116,31 @@ void machine_reboot_kexec(xen_kexec_imag
  110.50      BUG();
  110.51  }
  110.52  
  110.53 +void machine_kexec(xen_kexec_image_t *image)
  110.54 +{
  110.55 +#ifdef CONFIG_COMPAT
  110.56 +    if ( IS_COMPAT(dom0) )
  110.57 +    {
  110.58 +        extern void compat_machine_kexec(unsigned long rnk,
  110.59 +                                         unsigned long indirection_page,
  110.60 +                                         unsigned long *page_list,
  110.61 +                                         unsigned long start_address);
  110.62 +        compat_machine_kexec(image->page_list[1],
  110.63 +                             image->indirection_page,
  110.64 +                             image->page_list,
  110.65 +                             image->start_address);
  110.66 +    }
  110.67 +    else
  110.68 +#endif
  110.69 +    {
  110.70 +        relocate_new_kernel_t rnk;
  110.71 +
  110.72 +        rnk = (relocate_new_kernel_t) image->page_list[1];
  110.73 +        (*rnk)(image->indirection_page, image->page_list,
  110.74 +               image->start_address);
  110.75 +    }
  110.76 +}
  110.77 +
  110.78  /*
  110.79   * Local variables:
  110.80   * mode: C
   111.1 --- a/xen/arch/x86/mm.c	Fri Feb 09 14:43:22 2007 -0600
   111.2 +++ b/xen/arch/x86/mm.c	Wed Feb 14 19:01:35 2007 +0000
   111.3 @@ -99,6 +99,7 @@
   111.4  #include <xen/event.h>
   111.5  #include <xen/iocap.h>
   111.6  #include <xen/guest_access.h>
   111.7 +#include <asm/paging.h>
   111.8  #include <asm/shadow.h>
   111.9  #include <asm/page.h>
  111.10  #include <asm/flushtlb.h>
  111.11 @@ -373,9 +374,6 @@ void write_ptbase(struct vcpu *v)
  111.12  /* Should be called after CR3 is updated.
  111.13   * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
  111.14   * 
  111.15 - * Also updates other state derived from CR3 (vcpu->arch.guest_vtable,
  111.16 - * shadow_vtable, etc).
  111.17 - *
  111.18   * Uses values found in vcpu->arch.(guest_table and guest_table_user), and
  111.19   * for HVM guests, arch.monitor_table and hvm's guest CR3.
  111.20   *
  111.21 @@ -385,9 +383,9 @@ void update_cr3(struct vcpu *v)
  111.22  {
  111.23      unsigned long cr3_mfn=0;
  111.24  
  111.25 -    if ( shadow_mode_enabled(v->domain) )
  111.26 +    if ( paging_mode_enabled(v->domain) )
  111.27      {
  111.28 -        shadow_update_cr3(v);
  111.29 +        paging_update_cr3(v);
  111.30          return;
  111.31      }
  111.32  
  111.33 @@ -615,7 +613,7 @@ get_page_from_l1e(
  111.34       * qemu-dm helper process in dom0 to map the domain's memory without
  111.35       * messing up the count of "real" writable mappings.) */
  111.36      okay = (((l1e_get_flags(l1e) & _PAGE_RW) && 
  111.37 -             !(unlikely(shadow_mode_external(d) && (d != current->domain))))
  111.38 +             !(unlikely(paging_mode_external(d) && (d != current->domain))))
  111.39              ? get_page_and_type(page, d, PGT_writable_page)
  111.40              : get_page(page, d));
  111.41      if ( !okay )
  111.42 @@ -804,9 +802,9 @@ void put_page_from_l1e(l1_pgentry_t l1e,
  111.43      }
  111.44  
  111.45      /* Remember we didn't take a type-count of foreign writable mappings
  111.46 -     * to shadow external domains */
  111.47 +     * to paging-external domains */
  111.48      if ( (l1e_get_flags(l1e) & _PAGE_RW) && 
  111.49 -         !(unlikely((e != d) && shadow_mode_external(e))) )
  111.50 +         !(unlikely((e != d) && paging_mode_external(e))) )
  111.51      {
  111.52          put_page_and_type(page);
  111.53      }
  111.54 @@ -976,6 +974,19 @@ static void pae_flush_pgd(
  111.55      l3_pgentry_t  *l3tab_ptr;
  111.56      struct pae_l3_cache *cache;
  111.57  
  111.58 +    if ( unlikely(shadow_mode_enabled(d)) )
  111.59 +    {
  111.60 +        cpumask_t m = CPU_MASK_NONE;
  111.61 +        /* Re-shadow this l3 table on any vcpus that are using it */
  111.62 +        for_each_vcpu ( d, v )
  111.63 +            if ( pagetable_get_pfn(v->arch.guest_table) == mfn )
  111.64 +            {
  111.65 +                paging_update_cr3(v);
  111.66 +                cpus_or(m, m, v->vcpu_dirty_cpumask);
  111.67 +            }
  111.68 +        flush_tlb_mask(m);
  111.69 +    }
  111.70 +
  111.71      /* If below 4GB then the pgdir is not shadowed in low memory. */
  111.72      if ( !l3tab_needs_shadow(mfn) )
  111.73          return;
  111.74 @@ -1259,20 +1270,13 @@ static inline int update_intpte(intpte_t
  111.75  {
  111.76      int rv = 1;
  111.77  #ifndef PTE_UPDATE_WITH_CMPXCHG
  111.78 -    if ( unlikely(shadow_mode_enabled(v->domain)) )
  111.79 -        rv = shadow_write_guest_entry(v, p, new, _mfn(mfn));
  111.80 -    else
  111.81 -        rv = (!__copy_to_user(p, &new, sizeof(new)));
  111.82 +    rv = paging_write_guest_entry(v, p, new, _mfn(mfn));
  111.83  #else
  111.84      {
  111.85          intpte_t t = old;
  111.86          for ( ; ; )
  111.87          {
  111.88 -            if ( unlikely(shadow_mode_enabled(v->domain)) )
  111.89 -                rv = shadow_cmpxchg_guest_entry(v, p, &t, new, _mfn(mfn));
  111.90 -            else
  111.91 -                rv = (!cmpxchg_user(p, t, new));
  111.92 -
  111.93 +            rv = paging_cmpxchg_guest_entry(v, p, &t, new, _mfn(mfn));
  111.94              if ( unlikely(rv == 0) )
  111.95              {
  111.96                  MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte
  111.97 @@ -1310,7 +1314,7 @@ static int mod_l1_entry(l1_pgentry_t *pl
  111.98      if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
  111.99          return 0;
 111.100  
 111.101 -    if ( unlikely(shadow_mode_refcounts(d)) )
 111.102 +    if ( unlikely(paging_mode_refcounts(d)) )
 111.103          return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, current);
 111.104  
 111.105      if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
 111.106 @@ -1572,7 +1576,7 @@ void free_page_type(struct page_info *pa
 111.107           */
 111.108          queue_deferred_ops(owner, DOP_FLUSH_ALL_TLBS);
 111.109  
 111.110 -        if ( unlikely(shadow_mode_enabled(owner)) )
 111.111 +        if ( unlikely(paging_mode_enabled(owner)) )
 111.112          {
 111.113              /* A page table is dirtied when its type count becomes zero. */
 111.114              mark_dirty(owner, page_to_mfn(page));
 111.115 @@ -1771,7 +1775,7 @@ int new_guest_cr3(unsigned long mfn)
 111.116  #ifdef CONFIG_COMPAT
 111.117      if ( IS_COMPAT(d) )
 111.118      {
 111.119 -        okay = shadow_mode_refcounts(d)
 111.120 +        okay = paging_mode_refcounts(d)
 111.121              ? 0 /* Old code was broken, but what should it be? */
 111.122              : mod_l4_entry(__va(pagetable_get_paddr(v->arch.guest_table)),
 111.123                             l4e_from_pfn(mfn, (_PAGE_PRESENT|_PAGE_RW|
 111.124 @@ -1788,7 +1792,7 @@ int new_guest_cr3(unsigned long mfn)
 111.125          return 1;
 111.126      }
 111.127  #endif
 111.128 -    okay = shadow_mode_refcounts(d)
 111.129 +    okay = paging_mode_refcounts(d)
 111.130          ? get_page_from_pagenr(mfn, d)
 111.131          : get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d);
 111.132      if ( unlikely(!okay) )
 111.133 @@ -1808,7 +1812,7 @@ int new_guest_cr3(unsigned long mfn)
 111.134  
 111.135      if ( likely(old_base_mfn != 0) )
 111.136      {
 111.137 -        if ( shadow_mode_refcounts(d) )
 111.138 +        if ( paging_mode_refcounts(d) )
 111.139              put_page(mfn_to_page(old_base_mfn));
 111.140          else
 111.141              put_page_and_type(mfn_to_page(old_base_mfn));
 111.142 @@ -1861,7 +1865,7 @@ static int set_foreigndom(domid_t domid)
 111.143                  d->domain_id);
 111.144          okay = 0;
 111.145      }
 111.146 -    else if ( unlikely(shadow_mode_translate(d)) )
 111.147 +    else if ( unlikely(paging_mode_translate(d)) )
 111.148      {
 111.149          MEM_LOG("Cannot mix foreign mappings with translated domains");
 111.150          okay = 0;
 111.151 @@ -2007,7 +2011,7 @@ int do_mmuext_op(
 111.152              if ( (op.cmd - MMUEXT_PIN_L1_TABLE) > (CONFIG_PAGING_LEVELS - 1) )
 111.153                  break;
 111.154  
 111.155 -            if ( shadow_mode_refcounts(FOREIGNDOM) )
 111.156 +            if ( paging_mode_refcounts(FOREIGNDOM) )
 111.157                  break;
 111.158  
 111.159              okay = get_page_and_type_from_pagenr(mfn, type, FOREIGNDOM);
 111.160 @@ -2032,7 +2036,7 @@ int do_mmuext_op(
 111.161              break;
 111.162  
 111.163          case MMUEXT_UNPIN_TABLE:
 111.164 -            if ( shadow_mode_refcounts(d) )
 111.165 +            if ( paging_mode_refcounts(d) )
 111.166                  break;
 111.167  
 111.168              if ( unlikely(!(okay = get_page_from_pagenr(mfn, d))) )
 111.169 @@ -2070,7 +2074,7 @@ int do_mmuext_op(
 111.170              }
 111.171              if (likely(mfn != 0))
 111.172              {
 111.173 -                if ( shadow_mode_refcounts(d) )
 111.174 +                if ( paging_mode_refcounts(d) )
 111.175                      okay = get_page_from_pagenr(mfn, d);
 111.176                  else
 111.177                      okay = get_page_and_type_from_pagenr(
 111.178 @@ -2087,7 +2091,7 @@ int do_mmuext_op(
 111.179                  v->arch.guest_table_user = pagetable_from_pfn(mfn);
 111.180                  if ( old_mfn != 0 )
 111.181                  {
 111.182 -                    if ( shadow_mode_refcounts(d) )
 111.183 +                    if ( paging_mode_refcounts(d) )
 111.184                          put_page(mfn_to_page(old_mfn));
 111.185                      else
 111.186                          put_page_and_type(mfn_to_page(old_mfn));
 111.187 @@ -2101,8 +2105,8 @@ int do_mmuext_op(
 111.188              break;
 111.189      
 111.190          case MMUEXT_INVLPG_LOCAL:
 111.191 -            if ( !shadow_mode_enabled(d) 
 111.192 -                 || shadow_invlpg(v, op.arg1.linear_addr) != 0 )
 111.193 +            if ( !paging_mode_enabled(d) 
 111.194 +                 || paging_invlpg(v, op.arg1.linear_addr) != 0 )
 111.195                  local_flush_tlb_one(op.arg1.linear_addr);
 111.196              break;
 111.197  
 111.198 @@ -2149,7 +2153,7 @@ int do_mmuext_op(
 111.199              unsigned long ptr  = op.arg1.linear_addr;
 111.200              unsigned long ents = op.arg2.nr_ents;
 111.201  
 111.202 -            if ( shadow_mode_external(d) )
 111.203 +            if ( paging_mode_external(d) )
 111.204              {
 111.205                  MEM_LOG("ignoring SET_LDT hypercall from external "
 111.206                          "domain %u", d->domain_id);
 111.207 @@ -2298,9 +2302,9 @@ int do_mmu_update(
 111.208              case PGT_l3_page_table:
 111.209              case PGT_l4_page_table:
 111.210              {
 111.211 -                if ( shadow_mode_refcounts(d) )
 111.212 +                if ( paging_mode_refcounts(d) )
 111.213                  {
 111.214 -                    MEM_LOG("mmu update on shadow-refcounted domain!");
 111.215 +                    MEM_LOG("mmu update on auto-refcounted domain!");
 111.216                      break;
 111.217                  }
 111.218  
 111.219 @@ -2351,13 +2355,7 @@ int do_mmu_update(
 111.220                  if ( unlikely(!get_page_type(page, PGT_writable_page)) )
 111.221                      break;
 111.222  
 111.223 -                if ( unlikely(shadow_mode_enabled(d)) )
 111.224 -                    okay = shadow_write_guest_entry(v, va, req.val, _mfn(mfn));
 111.225 -                else
 111.226 -                {
 111.227 -                    *(intpte_t *)va = req.val;
 111.228 -                    okay = 1;
 111.229 -                }
 111.230 +                okay = paging_write_guest_entry(v, va, req.val, _mfn(mfn));
 111.231  
 111.232                  put_page_type(page);
 111.233              }
 111.234 @@ -2380,9 +2378,9 @@ int do_mmu_update(
 111.235                  break;
 111.236              }
 111.237  
 111.238 -            if ( unlikely(shadow_mode_translate(FOREIGNDOM)) )
 111.239 +            if ( unlikely(paging_mode_translate(FOREIGNDOM)) )
 111.240              {
 111.241 -                MEM_LOG("Mach-phys update on shadow-translate guest");
 111.242 +                MEM_LOG("Mach-phys update on auto-translate guest");
 111.243                  break;
 111.244              }
 111.245  
 111.246 @@ -2472,7 +2470,7 @@ static int create_grant_pte_mapping(
 111.247          goto failed;
 111.248      } 
 111.249  
 111.250 -    if ( !shadow_mode_refcounts(d) )
 111.251 +    if ( !paging_mode_refcounts(d) )
 111.252          put_page_from_l1e(ol1e, d);
 111.253  
 111.254      put_page_type(page);
 111.255 @@ -2578,7 +2576,7 @@ static int create_grant_va_mapping(
 111.256      if ( !okay )
 111.257              return GNTST_general_error;
 111.258  
 111.259 -    if ( !shadow_mode_refcounts(d) )
 111.260 +    if ( !paging_mode_refcounts(d) )
 111.261          put_page_from_l1e(ol1e, d);
 111.262  
 111.263      return GNTST_okay;
 111.264 @@ -2704,7 +2702,7 @@ int do_update_va_mapping(unsigned long v
 111.265  
 111.266      perfc_incrc(calls_to_update_va);
 111.267  
 111.268 -    if ( unlikely(!__addr_ok(va) && !shadow_mode_external(d)) )
 111.269 +    if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) )
 111.270          return -EINVAL;
 111.271  
 111.272      LOCK_BIGLOCK(d);
 111.273 @@ -2744,8 +2742,8 @@ int do_update_va_mapping(unsigned long v
 111.274          switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
 111.275          {
 111.276          case UVMF_LOCAL:
 111.277 -            if ( !shadow_mode_enabled(d) 
 111.278 -                 || (shadow_invlpg(current, va) != 0) ) 
 111.279 +            if ( !paging_mode_enabled(d) 
 111.280 +                 || (paging_invlpg(current, va) != 0) ) 
 111.281                  local_flush_tlb_one(va);
 111.282              break;
 111.283          case UVMF_ALL:
 111.284 @@ -2980,7 +2978,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
 111.285              break;
 111.286          }
 111.287  
 111.288 -        if ( !shadow_mode_translate(d) || (mfn == 0) )
 111.289 +        if ( !paging_mode_translate(d) || (mfn == 0) )
 111.290          {
 111.291              put_domain(d);
 111.292              return -EINVAL;
 111.293 @@ -3235,17 +3233,12 @@ static int ptwr_emulated_update(
 111.294      if ( do_cmpxchg )
 111.295      {
 111.296          int okay;
 111.297 +        intpte_t t = old;
 111.298          ol1e = l1e_from_intpte(old);
 111.299  
 111.300 -        if ( shadow_mode_enabled(d) )
 111.301 -        {
 111.302 -            intpte_t t = old;
 111.303 -            okay = shadow_cmpxchg_guest_entry(v, (intpte_t *) pl1e, 
 111.304 -                                              &t, val, _mfn(mfn));
 111.305 -            okay = (okay && t == old);
 111.306 -        }
 111.307 -        else 
 111.308 -            okay = (cmpxchg((intpte_t *)pl1e, old, val) == old);
 111.309 +        okay = paging_cmpxchg_guest_entry(v, (intpte_t *) pl1e, 
 111.310 +                                          &t, val, _mfn(mfn));
 111.311 +        okay = (okay && t == old);
 111.312  
 111.313          if ( !okay )
 111.314          {
   112.1 --- a/xen/arch/x86/mm/Makefile	Fri Feb 09 14:43:22 2007 -0600
   112.2 +++ b/xen/arch/x86/mm/Makefile	Wed Feb 14 19:01:35 2007 +0000
   112.3 @@ -1,1 +1,4 @@
   112.4  subdir-y += shadow
   112.5 +
   112.6 +obj-y += paging.o
   112.7 +obj-y += p2m.o
   113.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   113.2 +++ b/xen/arch/x86/mm/p2m.c	Wed Feb 14 19:01:35 2007 +0000
   113.3 @@ -0,0 +1,699 @@
   113.4 +/******************************************************************************
   113.5 + * arch/x86/mm/p2m.c
   113.6 + *
   113.7 + * physical-to-machine mappings for automatically-translated domains.
   113.8 + * 
   113.9 + * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
  113.10 + * Parts of this code are Copyright (c) 2006 by XenSource Inc.
  113.11 + * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
  113.12 + * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
  113.13 + * 
  113.14 + * This program is free software; you can redistribute it and/or modify
  113.15 + * it under the terms of the GNU General Public License as published by
  113.16 + * the Free Software Foundation; either version 2 of the License, or
  113.17 + * (at your option) any later version.
  113.18 + *
  113.19 + * This program is distributed in the hope that it will be useful,
  113.20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  113.21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  113.22 + * GNU General Public License for more details.
  113.23 + *
  113.24 + * You should have received a copy of the GNU General Public License
  113.25 + * along with this program; if not, write to the Free Software
  113.26 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  113.27 + */
  113.28 +
  113.29 +#include <asm/domain.h>
  113.30 +#include <asm/page.h>
  113.31 +#include <asm/paging.h>
  113.32 +#include <asm/p2m.h>
  113.33 +
  113.34 +/* Debugging and auditing of the P2M code? */
  113.35 +#define P2M_AUDIT     0
  113.36 +#define P2M_DEBUGGING 1
  113.37 +
  113.38 +/* The P2M lock.  This protects all updates to the p2m table.
  113.39 + * Updates are expected to be safe against concurrent reads, 
  113.40 + * which do *not* require the lock */
  113.41 +
  113.42 +#define p2m_lock_init(_d)                            \
  113.43 +    do {                                             \
  113.44 +        spin_lock_init(&(_d)->arch.p2m.lock);        \
  113.45 +        (_d)->arch.p2m.locker = -1;                  \
  113.46 +        (_d)->arch.p2m.locker_function = "nobody";   \
  113.47 +    } while (0)
  113.48 +
  113.49 +#define p2m_lock(_d)                                                \
  113.50 +    do {                                                            \
  113.51 +        if ( unlikely((_d)->arch.p2m.locker == current->processor) )\
  113.52 +        {                                                           \
  113.53 +            printk("Error: p2m lock held by %s\n",                  \
  113.54 +                   (_d)->arch.p2m.locker_function);                 \
  113.55 +            BUG();                                                  \
  113.56 +        }                                                           \
  113.57 +        spin_lock(&(_d)->arch.p2m.lock);                            \
  113.58 +        ASSERT((_d)->arch.p2m.locker == -1);                        \
  113.59 +        (_d)->arch.p2m.locker = current->processor;                 \
  113.60 +        (_d)->arch.p2m.locker_function = __func__;                  \
  113.61 +    } while (0)
  113.62 +
  113.63 +#define p2m_unlock(_d)                                              \
  113.64 +    do {                                                            \
  113.65 +        ASSERT((_d)->arch.p2m.locker == current->processor); \
  113.66 +        (_d)->arch.p2m.locker = -1;                          \
  113.67 +        (_d)->arch.p2m.locker_function = "nobody";           \
  113.68 +        spin_unlock(&(_d)->arch.p2m.lock);                   \
  113.69 +    } while (0)
  113.70 +
  113.71 +
  113.72 +
  113.73 +/* Printouts */
  113.74 +#define P2M_PRINTK(_f, _a...)                                \
  113.75 +    debugtrace_printk("p2m: %s(): " _f, __func__, ##_a)
  113.76 +#define P2M_ERROR(_f, _a...)                                 \
  113.77 +    printk("pg error: %s(): " _f, __func__, ##_a)
  113.78 +#if P2M_DEBUGGING
  113.79 +#define P2M_DEBUG(_f, _a...)                                 \
  113.80 +    debugtrace_printk("p2mdebug: %s(): " _f, __func__, ##_a)
  113.81 +#else
  113.82 +#define P2M_DEBUG(_f, _a...) do { (void)(_f); } while(0) 
  113.83 +#endif
  113.84 +
  113.85 +
  113.86 +/* Override macros from asm/page.h to make them work with mfn_t */
  113.87 +#undef mfn_to_page
  113.88 +#define mfn_to_page(_m) (frame_table + mfn_x(_m))
  113.89 +#undef mfn_valid
  113.90 +#define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)
  113.91 +#undef page_to_mfn
  113.92 +#define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
  113.93 +
  113.94 +
  113.95 +
  113.96 +// Find the next level's P2M entry, checking for out-of-range gfn's...
  113.97 +// Returns NULL on error.
  113.98 +//
  113.99 +static l1_pgentry_t *
 113.100 +p2m_find_entry(void *table, unsigned long *gfn_remainder,
 113.101 +                   unsigned long gfn, u32 shift, u32 max)
 113.102 +{
 113.103 +    u32 index;
 113.104 +
 113.105 +    index = *gfn_remainder >> shift;
 113.106 +    if ( index >= max )
 113.107 +    {
 113.108 +        P2M_DEBUG("gfn=0x%lx out of range "
 113.109 +                  "(gfn_remainder=0x%lx shift=%d index=0x%x max=0x%x)\n",
 113.110 +                  gfn, *gfn_remainder, shift, index, max);
 113.111 +        return NULL;
 113.112 +    }
 113.113 +    *gfn_remainder &= (1 << shift) - 1;
 113.114 +    return (l1_pgentry_t *)table + index;
 113.115 +}
 113.116 +
 113.117 +// Walk one level of the P2M table, allocating a new table if required.
 113.118 +// Returns 0 on error.
 113.119 +//
 113.120 +static int
 113.121 +p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table, 
 113.122 +               unsigned long *gfn_remainder, unsigned long gfn, u32 shift, 
 113.123 +               u32 max, unsigned long type)
 113.124 +{
 113.125 +    l1_pgentry_t *p2m_entry;
 113.126 +    l1_pgentry_t new_entry;
 113.127 +    void *next;
 113.128 +    ASSERT(d->arch.p2m.alloc_page);
 113.129 +
 113.130 +    if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
 113.131 +                                      shift, max)) )
 113.132 +        return 0;
 113.133 +
 113.134 +    if ( !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) )
 113.135 +    {
 113.136 +        struct page_info *pg = d->arch.p2m.alloc_page(d);
 113.137 +        if ( pg == NULL )
 113.138 +            return 0;
 113.139 +        list_add_tail(&pg->list, &d->arch.p2m.pages);
 113.140 +        pg->u.inuse.type_info = type | 1 | PGT_validated;
 113.141 +        pg->count_info = 1;
 113.142 +
 113.143 +        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
 113.144 +                                 __PAGE_HYPERVISOR|_PAGE_USER);
 113.145 +
 113.146 +        switch ( type ) {
 113.147 +        case PGT_l3_page_table:
 113.148 +            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 4);
 113.149 +            break;
 113.150 +        case PGT_l2_page_table:
 113.151 +            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 3);
 113.152 +            break;
 113.153 +        case PGT_l1_page_table:
 113.154 +            paging_write_p2m_entry(d, gfn, p2m_entry, new_entry, 2);
 113.155 +            break;
 113.156 +        default:
 113.157 +            BUG();
 113.158 +            break;
 113.159 +        }
 113.160 +    }
 113.161 +    *table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
 113.162 +    next = map_domain_page(mfn_x(*table_mfn));
 113.163 +    unmap_domain_page(*table);
 113.164 +    *table = next;
 113.165 +
 113.166 +    return 1;
 113.167 +}
 113.168 +
 113.169 +// Returns 0 on error (out of memory)
 113.170 +static int
 113.171 +set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 113.172 +{
 113.173 +    // XXX -- this might be able to be faster iff current->domain == d
 113.174 +    mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
 113.175 +    void *table =map_domain_page(mfn_x(table_mfn));
 113.176 +    unsigned long gfn_remainder = gfn;
 113.177 +    l1_pgentry_t *p2m_entry;
 113.178 +    l1_pgentry_t entry_content;
 113.179 +    int rv=0;
 113.180 +
 113.181 +#if CONFIG_PAGING_LEVELS >= 4
 113.182 +    if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
 113.183 +                         L4_PAGETABLE_SHIFT - PAGE_SHIFT,
 113.184 +                         L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
 113.185 +        goto out;
 113.186 +#endif
 113.187 +#if CONFIG_PAGING_LEVELS >= 3
 113.188 +    // When using PAE Xen, we only allow 33 bits of pseudo-physical
 113.189 +    // address in translated guests (i.e. 8 GBytes).  This restriction
 113.190 +    // comes from wanting to map the P2M table into the 16MB RO_MPT hole
 113.191 +    // in Xen's address space for translated PV guests.
 113.192 +    //
 113.193 +    if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
 113.194 +                         L3_PAGETABLE_SHIFT - PAGE_SHIFT,
 113.195 +                         (CONFIG_PAGING_LEVELS == 3
 113.196 +                          ? 8
 113.197 +                          : L3_PAGETABLE_ENTRIES),
 113.198 +                         PGT_l2_page_table) )
 113.199 +        goto out;
 113.200 +#endif
 113.201 +    if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
 113.202 +                         L2_PAGETABLE_SHIFT - PAGE_SHIFT,
 113.203 +                         L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
 113.204 +        goto out;
 113.205 +
 113.206 +    p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
 113.207 +                               0, L1_PAGETABLE_ENTRIES);
 113.208 +    ASSERT(p2m_entry);
 113.209 +
 113.210 +    /* Track the highest gfn for which we have ever had a valid mapping */
 113.211 +    if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) ) 
 113.212 +        d->arch.p2m.max_mapped_pfn = gfn;
 113.213 +
 113.214 +    if ( mfn_valid(mfn) )
 113.215 +        entry_content = l1e_from_pfn(mfn_x(mfn), __PAGE_HYPERVISOR|_PAGE_USER);
 113.216 +    else
 113.217 +        entry_content = l1e_empty();
 113.218 +
 113.219 +    /* level 1 entry */
 113.220 +    paging_write_p2m_entry(d, gfn, p2m_entry, entry_content, 1);
 113.221 +
 113.222 +    /* Success */
 113.223 +    rv = 1;
 113.224 + 
 113.225 + out:
 113.226 +    unmap_domain_page(table);
 113.227 +    return rv;
 113.228 +}
 113.229 +
 113.230 +
 113.231 +/* Init the datastructures for later use by the p2m code */
 113.232 +void p2m_init(struct domain *d)
 113.233 +{
 113.234 +    p2m_lock_init(d);
 113.235 +    INIT_LIST_HEAD(&d->arch.p2m.pages);
 113.236 +}
 113.237 +
 113.238 +
 113.239 +// Allocate a new p2m table for a domain.
 113.240 +//
 113.241 +// The structure of the p2m table is that of a pagetable for xen (i.e. it is
 113.242 +// controlled by CONFIG_PAGING_LEVELS).
 113.243 +//
 113.244 +// The alloc_page and free_page functions will be used to get memory to
 113.245 +// build the p2m, and to release it again at the end of day. 
 113.246 +//
 113.247 +// Returns 0 for success or -errno.
 113.248 +//
 113.249 +int p2m_alloc_table(struct domain *d,
 113.250 +                    struct page_info * (*alloc_page)(struct domain *d),
 113.251 +                    void (*free_page)(struct domain *d, struct page_info *pg))
 113.252 +
 113.253 +{
 113.254 +    mfn_t mfn;
 113.255 +    struct list_head *entry;
 113.256 +    struct page_info *page, *p2m_top;
 113.257 +    unsigned int page_count = 0;
 113.258 +    unsigned long gfn;
 113.259 +    
 113.260 +    p2m_lock(d);
 113.261 +
 113.262 +    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
 113.263 +    {
 113.264 +        P2M_ERROR("p2m already allocated for this domain\n");
 113.265 +        p2m_unlock(d);
 113.266 +        return -EINVAL;
 113.267 +    }
 113.268 +
 113.269 +    P2M_PRINTK("allocating p2m table\n");
 113.270 +
 113.271 +    d->arch.p2m.alloc_page = alloc_page;
 113.272 +    d->arch.p2m.free_page = free_page;
 113.273 +
 113.274 +    p2m_top = d->arch.p2m.alloc_page(d);
 113.275 +    if ( p2m_top == NULL )
 113.276 +    {
 113.277 +        p2m_unlock(d);
 113.278 +        return -ENOMEM;
 113.279 +    }
 113.280 +list_add_tail(&p2m_top->list, &d->arch.p2m.pages);
 113.281 +
 113.282 +    p2m_top->count_info = 1;
 113.283 +    p2m_top->u.inuse.type_info = 
 113.284 +#if CONFIG_PAGING_LEVELS == 4
 113.285 +        PGT_l4_page_table
 113.286 +#elif CONFIG_PAGING_LEVELS == 3
 113.287 +        PGT_l3_page_table
 113.288 +#elif CONFIG_PAGING_LEVELS == 2
 113.289 +        PGT_l2_page_table
 113.290 +#endif
 113.291 +        | 1 | PGT_validated;
 113.292 +
 113.293 +    d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
 113.294 +
 113.295 +    P2M_PRINTK("populating p2m table\n");
 113.296 + 
 113.297 +    /* Initialise physmap tables for slot zero. Other code assumes this. */
 113.298 +    gfn = 0;
 113.299 +mfn = _mfn(INVALID_MFN);
 113.300 +    if ( !set_p2m_entry(d, gfn, mfn) )
 113.301 +        goto error;
 113.302 +
 113.303 +    for ( entry = d->page_list.next;
 113.304 +          entry != &d->page_list;
 113.305 +          entry = entry->next )
 113.306 +    {
 113.307 +        page = list_entry(entry, struct page_info, list);
 113.308 +        mfn = page_to_mfn(page);
 113.309 +        gfn = get_gpfn_from_mfn(mfn_x(mfn));
 113.310 +        page_count++;
 113.311 +        if (
 113.312 +#ifdef __x86_64__
 113.313 +            (gfn != 0x5555555555555555L)
 113.314 +#else
 113.315 +            (gfn != 0x55555555L)
 113.316 +#endif
 113.317 +             && gfn != INVALID_M2P_ENTRY
 113.318 +             && !set_p2m_entry(d, gfn, mfn) )
 113.319 +            goto error;
 113.320 +    }
 113.321 +
 113.322 +    P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
 113.323 +    p2m_unlock(d);
 113.324 +    return 0;
 113.325 +
 113.326 + error:
 113.327 +    P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%"
 113.328 +               PRI_mfn "\n", gfn, mfn_x(mfn));
 113.329 +    p2m_unlock(d);
 113.330 +    return -ENOMEM;
 113.331 +}
 113.332 +
 113.333 +void p2m_teardown(struct domain *d)
 113.334 +/* Return all the p2m pages to Xen.
 113.335 + * We know we don't have any extra mappings to these pages */
 113.336 +{
 113.337 +    struct list_head *entry, *n;
 113.338 +    struct page_info *pg;
 113.339 +
 113.340 +    p2m_lock(d);
 113.341 +    d->arch.phys_table = pagetable_null();
 113.342 +
 113.343 +    list_for_each_safe(entry, n, &d->arch.p2m.pages)
 113.344 +    {
 113.345 +        pg = list_entry(entry, struct page_info, list);
 113.346 +        list_del(entry);
 113.347 +        d->arch.p2m.free_page(d, pg);
 113.348 +    }
 113.349 +    p2m_unlock(d);
 113.350 +}
 113.351 +
 113.352 +mfn_t
 113.353 +gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
 113.354 +/* Read another domain's p2m entries */
 113.355 +{
 113.356 +    mfn_t mfn;
 113.357 +    paddr_t addr = ((paddr_t)gpfn) << PAGE_SHIFT;
 113.358 +    l2_pgentry_t *l2e;
 113.359 +    l1_pgentry_t *l1e;
 113.360 +    
 113.361 +    ASSERT(paging_mode_translate(d));
 113.362 +    mfn = pagetable_get_mfn(d->arch.phys_table);
 113.363 +
 113.364 +
 113.365 +    if ( gpfn > d->arch.p2m.max_mapped_pfn ) 
 113.366 +        /* This pfn is higher than the highest the p2m map currently holds */
 113.367 +        return _mfn(INVALID_MFN);
 113.368 +
 113.369 +#if CONFIG_PAGING_LEVELS >= 4
 113.370 +    { 
 113.371 +        l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
 113.372 +        l4e += l4_table_offset(addr);
 113.373 +        if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
 113.374 +        {
 113.375 +            unmap_domain_page(l4e);
 113.376 +            return _mfn(INVALID_MFN);
 113.377 +        }
 113.378 +        mfn = _mfn(l4e_get_pfn(*l4e));
 113.379 +        unmap_domain_page(l4e);
 113.380 +    }
 113.381 +#endif
 113.382 +#if CONFIG_PAGING_LEVELS >= 3
 113.383 +    {
 113.384 +        l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
 113.385 +#if CONFIG_PAGING_LEVELS == 3
 113.386 +        /* On PAE hosts the p2m has eight l3 entries, not four (see
 113.387 +         * shadow_set_p2m_entry()) so we can't use l3_table_offset.
 113.388 +         * Instead, just count the number of l3es from zero.  It's safe
 113.389 +         * to do this because we already checked that the gfn is within
 113.390 +         * the bounds of the p2m. */
 113.391 +        l3e += (addr >> L3_PAGETABLE_SHIFT);
 113.392 +#else
 113.393 +        l3e += l3_table_offset(addr);        
 113.394 +#endif
 113.395 +        if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
 113.396 +        {
 113.397 +            unmap_domain_page(l3e);
 113.398 +            return _mfn(INVALID_MFN);
 113.399 +        }
 113.400 +        mfn = _mfn(l3e_get_pfn(*l3e));
 113.401 +        unmap_domain_page(l3e);
 113.402 +    }
 113.403 +#endif
 113.404 +
 113.405 +    l2e = map_domain_page(mfn_x(mfn));
 113.406 +    l2e += l2_table_offset(addr);
 113.407 +    if ( (l2e_get_flags(*l2e) & _PAGE_PRESENT) == 0 )
 113.408 +    {
 113.409 +        unmap_domain_page(l2e);
 113.410 +        return _mfn(INVALID_MFN);
 113.411 +    }
 113.412 +    mfn = _mfn(l2e_get_pfn(*l2e));
 113.413 +    unmap_domain_page(l2e);
 113.414 +
 113.415 +    l1e = map_domain_page(mfn_x(mfn));
 113.416 +    l1e += l1_table_offset(addr);
 113.417 +    if ( (l1e_get_flags(*l1e) & _PAGE_PRESENT) == 0 )
 113.418 +    {
 113.419 +        unmap_domain_page(l1e);
 113.420 +        return _mfn(INVALID_MFN);
 113.421 +    }
 113.422 +    mfn = _mfn(l1e_get_pfn(*l1e));
 113.423 +    unmap_domain_page(l1e);
 113.424 +
 113.425 +    return mfn;
 113.426 +}
 113.427 +
 113.428 +#if P2M_AUDIT
 113.429 +static void audit_p2m(struct domain *d)
 113.430 +{
 113.431 +    struct list_head *entry;
 113.432 +    struct page_info *page;
 113.433 +    struct domain *od;
 113.434 +    unsigned long mfn, gfn, m2pfn, lp2mfn = 0;
 113.435 +    mfn_t p2mfn;
 113.436 +    unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0;
 113.437 +    int test_linear;
 113.438 +    
 113.439 +    if ( !paging_mode_translate(d) )
 113.440 +        return;
 113.441 +
 113.442 +    //P2M_PRINTK("p2m audit starts\n");
 113.443 +
 113.444 +    test_linear = ( (d == current->domain) 
 113.445 +                    && !pagetable_is_null(current->arch.monitor_table) );
 113.446 +    if ( test_linear )
 113.447 +        local_flush_tlb(); 
 113.448 +
 113.449 +    /* Audit part one: walk the domain's page allocation list, checking 
 113.450 +     * the m2p entries. */
 113.451 +    for ( entry = d->page_list.next;
 113.452 +          entry != &d->page_list;
 113.453 +          entry = entry->next )
 113.454 +    {
 113.455 +        page = list_entry(entry, struct page_info, list);
 113.456 +        mfn = mfn_x(page_to_mfn(page));
 113.457 +
 113.458 +        // P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn); 
 113.459 +
 113.460 +        od = page_get_owner(page);
 113.461 +
 113.462 +        if ( od != d ) 
 113.463 +        {
 113.464 +            P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n",
 113.465 +                       mfn, od, (od?od->domain_id:-1), d, d->domain_id);
 113.466 +            continue;
 113.467 +        }
 113.468 +
 113.469 +        gfn = get_gpfn_from_mfn(mfn);
 113.470 +        if ( gfn == INVALID_M2P_ENTRY ) 
 113.471 +        {
 113.472 +            orphans_i++;
 113.473 +            //P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n",
 113.474 +            //               mfn); 
 113.475 +            continue;
 113.476 +        }
 113.477 +
 113.478 +        if ( gfn == 0x55555555 ) 
 113.479 +        {
 113.480 +            orphans_d++;
 113.481 +            //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n", 
 113.482 +            //               mfn); 
 113.483 +            continue;
 113.484 +        }
 113.485 +
 113.486 +        p2mfn = gfn_to_mfn_foreign(d, gfn);
 113.487 +        if ( mfn_x(p2mfn) != mfn )
 113.488 +        {
 113.489 +            mpbad++;
 113.490 +            P2M_PRINTK("map mismatch mfn %#lx -> gfn %#lx -> mfn %#lx"
 113.491 +                       " (-> gfn %#lx)\n",
 113.492 +                       mfn, gfn, mfn_x(p2mfn),
 113.493 +                       (mfn_valid(p2mfn)
 113.494 +                        ? get_gpfn_from_mfn(mfn_x(p2mfn))
 113.495 +                        : -1u));
 113.496 +            /* This m2p entry is stale: the domain has another frame in
 113.497 +             * this physical slot.  No great disaster, but for neatness,
 113.498 +             * blow away the m2p entry. */ 
 113.499 +            set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
 113.500 +        }
 113.501 +
 113.502 +        if ( test_linear && (gfn <= d->arch.p2m.max_mapped_pfn) )
 113.503 +        {
 113.504 +            lp2mfn = mfn_x(gfn_to_mfn_current(gfn));
 113.505 +            if ( lp2mfn != mfn_x(p2mfn) )
 113.506 +            {
 113.507 +                P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
 113.508 +                           "(!= mfn %#lx)\n", gfn, lp2mfn, mfn_x(p2mfn));
 113.509 +            }
 113.510 +        }
 113.511 +
 113.512 +        // P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx, lp2mfn=%#lx\n", 
 113.513 +        //                mfn, gfn, p2mfn, lp2mfn); 
 113.514 +    }   
 113.515 +
 113.516 +    /* Audit part two: walk the domain's p2m table, checking the entries. */
 113.517 +    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
 113.518 +    {
 113.519 +        l2_pgentry_t *l2e;
 113.520 +        l1_pgentry_t *l1e;
 113.521 +        int i1, i2;
 113.522 +        
 113.523 +#if CONFIG_PAGING_LEVELS == 4
 113.524 +        l4_pgentry_t *l4e;
 113.525 +        l3_pgentry_t *l3e;
 113.526 +        int i3, i4;
 113.527 +        l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
 113.528 +#elif CONFIG_PAGING_LEVELS == 3
 113.529 +        l3_pgentry_t *l3e;
 113.530 +        int i3;
 113.531 +        l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
 113.532 +#else /* CONFIG_PAGING_LEVELS == 2 */
 113.533 +        l2e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
 113.534 +#endif
 113.535 +
 113.536 +        gfn = 0;
 113.537 +#if CONFIG_PAGING_LEVELS >= 3
 113.538 +#if CONFIG_PAGING_LEVELS >= 4
 113.539 +        for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
 113.540 +        {
 113.541 +            if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
 113.542 +            {
 113.543 +                gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
 113.544 +                continue;
 113.545 +            }
 113.546 +            l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
 113.547 +#endif /* now at levels 3 or 4... */
 113.548 +            for ( i3 = 0; 
 113.549 +                  i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); 
 113.550 +                  i3++ )
 113.551 +            {
 113.552 +                if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
 113.553 +                {
 113.554 +                    gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT);
 113.555 +                    continue;
 113.556 +                }
 113.557 +                l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3]))));
 113.558 +#endif /* all levels... */
 113.559 +                for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
 113.560 +                {
 113.561 +                    if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
 113.562 +                    {
 113.563 +                        gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT);
 113.564 +                        continue;
 113.565 +                    }
 113.566 +                    l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2]))));
 113.567 +                    
 113.568 +                    for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
 113.569 +                    {
 113.570 +                        if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
 113.571 +                            continue;
 113.572 +                        mfn = l1e_get_pfn(l1e[i1]);
 113.573 +                        ASSERT(mfn_valid(_mfn(mfn)));
 113.574 +                        m2pfn = get_gpfn_from_mfn(mfn);
 113.575 +                        if ( m2pfn != gfn )
 113.576 +                        {
 113.577 +                            pmbad++;
 113.578 +                            P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
 113.579 +                                       " -> gfn %#lx\n", gfn, mfn, m2pfn);
 113.580 +                            BUG();
 113.581 +                        }
 113.582 +                    }
 113.583 +                    unmap_domain_page(l1e);
 113.584 +                }
 113.585 +#if CONFIG_PAGING_LEVELS >= 3
 113.586 +                unmap_domain_page(l2e);
 113.587 +            }
 113.588 +#if CONFIG_PAGING_LEVELS >= 4
 113.589 +            unmap_domain_page(l3e);
 113.590 +        }
 113.591 +#endif
 113.592 +#endif
 113.593 +
 113.594 +#if CONFIG_PAGING_LEVELS == 4
 113.595 +        unmap_domain_page(l4e);
 113.596 +#elif CONFIG_PAGING_LEVELS == 3
 113.597 +        unmap_domain_page(l3e);
 113.598 +#else /* CONFIG_PAGING_LEVELS == 2 */
 113.599 +        unmap_domain_page(l2e);
 113.600 +#endif
 113.601 +
 113.602 +    }
 113.603 +
 113.604 +    //P2M_PRINTK("p2m audit complete\n");
 113.605 +    //if ( orphans_i | orphans_d | mpbad | pmbad ) 
 113.606 +    //    P2M_PRINTK("p2m audit found %lu orphans (%lu inval %lu debug)\n",
 113.607 +    //                   orphans_i + orphans_d, orphans_i, orphans_d,
 113.608 +    if ( mpbad | pmbad ) 
 113.609 +        P2M_PRINTK("p2m audit found %lu odd p2m, %lu bad m2p entries\n",
 113.610 +                   pmbad, mpbad);
 113.611 +}
 113.612 +#else 
 113.613 +#define audit_p2m(_d) do { (void)(_d); } while(0)
 113.614 +#endif /* P2M_AUDIT */
 113.615 +
 113.616 +
 113.617 +
 113.618 +static void
 113.619 +p2m_remove_page(struct domain *d, unsigned long gfn, unsigned long mfn)
 113.620 +{
 113.621 +    if ( !paging_mode_translate(d) )
 113.622 +        return;
 113.623 +    P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn);
 113.624 +
 113.625 +    ASSERT(mfn_x(gfn_to_mfn(d, gfn)) == mfn);
 113.626 +    //ASSERT(mfn_to_gfn(d, mfn) == gfn);
 113.627 +
 113.628 +    set_p2m_entry(d, gfn, _mfn(INVALID_MFN));
 113.629 +    set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
 113.630 +}
 113.631 +
 113.632 +void
 113.633 +guest_physmap_remove_page(struct domain *d, unsigned long gfn,
 113.634 +                          unsigned long mfn)
 113.635 +{
 113.636 +    p2m_lock(d);
 113.637 +    audit_p2m(d);
 113.638 +    p2m_remove_page(d, gfn, mfn);
 113.639 +    audit_p2m(d);
 113.640 +    p2m_unlock(d);    
 113.641 +}
 113.642 +
 113.643 +void
 113.644 +guest_physmap_add_page(struct domain *d, unsigned long gfn,
 113.645 +                       unsigned long mfn)
 113.646 +{
 113.647 +    unsigned long ogfn;
 113.648 +    mfn_t omfn;
 113.649 +
 113.650 +    if ( !paging_mode_translate(d) )
 113.651 +        return;
 113.652 +
 113.653 +    p2m_lock(d);
 113.654 +    audit_p2m(d);
 113.655 +
 113.656 +    P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn);
 113.657 +
 113.658 +    omfn = gfn_to_mfn(d, gfn);
 113.659 +    if ( mfn_valid(omfn) )
 113.660 +    {
 113.661 +        set_p2m_entry(d, gfn, _mfn(INVALID_MFN));
 113.662 +        set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
 113.663 +    }
 113.664 +
 113.665 +    ogfn = mfn_to_gfn(d, _mfn(mfn));
 113.666 +    if (
 113.667 +#ifdef __x86_64__
 113.668 +        (ogfn != 0x5555555555555555L)
 113.669 +#else
 113.670 +        (ogfn != 0x55555555L)
 113.671 +#endif
 113.672 +        && (ogfn != INVALID_M2P_ENTRY)
 113.673 +        && (ogfn != gfn) )
 113.674 +    {
 113.675 +        /* This machine frame is already mapped at another physical address */
 113.676 +        P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
 113.677 +                  mfn, ogfn, gfn);
 113.678 +        if ( mfn_valid(omfn = gfn_to_mfn(d, ogfn)) ) 
 113.679 +        {
 113.680 +            P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", 
 113.681 +                      ogfn , mfn_x(omfn));
 113.682 +            if ( mfn_x(omfn) == mfn ) 
 113.683 +                p2m_remove_page(d, ogfn, mfn);
 113.684 +        }
 113.685 +    }
 113.686 +
 113.687 +    set_p2m_entry(d, gfn, _mfn(mfn));
 113.688 +    set_gpfn_from_mfn(mfn, gfn);
 113.689 +
 113.690 +    audit_p2m(d);
 113.691 +    p2m_unlock(d);
 113.692 +}
 113.693 +
 113.694 +
 113.695 +/*
 113.696 + * Local variables:
 113.697 + * mode: C
 113.698 + * c-set-style: "BSD"
 113.699 + * c-basic-offset: 4
 113.700 + * indent-tabs-mode: nil
 113.701 + * End:
 113.702 + */
   114.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   114.2 +++ b/xen/arch/x86/mm/paging.c	Wed Feb 14 19:01:35 2007 +0000
   114.3 @@ -0,0 +1,143 @@
   114.4 +/******************************************************************************
   114.5 + * arch/x86/paging.c
   114.6 + *
   114.7 + * x86 specific paging support
   114.8 + * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
   114.9 + * Copyright (c) 2007 XenSource Inc.
  114.10 + *
  114.11 + * This program is free software; you can redistribute it and/or modify
  114.12 + * it under the terms of the GNU General Public License as published by
  114.13 + * the Free Software Foundation; either version 2 of the License, or
  114.14 + * (at your option) any later version.
  114.15 + *
  114.16 + * This program is distributed in the hope that it will be useful,
  114.17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  114.18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  114.19 + * GNU General Public License for more details.
  114.20 + *
  114.21 + * You should have received a copy of the GNU General Public License
  114.22 + * along with this program; if not, write to the Free Software
  114.23 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  114.24 + */
  114.25 +
  114.26 +#include <xen/init.h>
  114.27 +#include <asm/paging.h>
  114.28 +#include <asm/shadow.h>
  114.29 +#include <asm/p2m.h>
  114.30 +
  114.31 +/* Xen command-line option to enable hardware-assisted paging */
  114.32 +int opt_hap_enabled = 0; 
  114.33 +boolean_param("hap", opt_hap_enabled);
  114.34 +
  114.35 +/* Printouts */
  114.36 +#define PAGING_PRINTK(_f, _a...)                                     \
  114.37 +    debugtrace_printk("pg: %s(): " _f, __func__, ##_a)
  114.38 +#define PAGING_ERROR(_f, _a...)                                      \
  114.39 +    printk("pg error: %s(): " _f, __func__, ##_a)
  114.40 +#define PAGING_DEBUG(flag, _f, _a...)                                \
  114.41 +    do {                                                             \
  114.42 +        if (PAGING_DEBUG_ ## flag)                                   \
  114.43 +            debugtrace_printk("pgdebug: %s(): " _f, __func__, ##_a); \
  114.44 +    } while (0)
  114.45 +
  114.46 +
  114.47 +/* Domain paging struct initialization. */
  114.48 +void paging_domain_init(struct domain *d)
  114.49 +{
  114.50 +    p2m_init(d);
  114.51 +    shadow_domain_init(d);
  114.52 +}
  114.53 +
  114.54 +/* vcpu paging struct initialization goes here */
  114.55 +void paging_vcpu_init(struct vcpu *v)
  114.56 +{
  114.57 +    shadow_vcpu_init(v);
  114.58 +}
  114.59 +
  114.60 +
  114.61 +int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
  114.62 +                  XEN_GUEST_HANDLE(void) u_domctl)
  114.63 +{
  114.64 +    /* Here, dispatch domctl to the appropriate paging code */
  114.65 +    return shadow_domctl(d, sc, u_domctl);
  114.66 +}
  114.67 +
  114.68 +/* Call when destroying a domain */
  114.69 +void paging_teardown(struct domain *d)
  114.70 +{
  114.71 +    shadow_teardown(d);
  114.72 +    /* Call other modes' teardown code here */    
  114.73 +}
  114.74 +
  114.75 +/* Call once all of the references to the domain have gone away */
  114.76 +void paging_final_teardown(struct domain *d)
  114.77 +{
  114.78 +    shadow_teardown(d);
  114.79 +    /* Call other modes' final teardown code here */
  114.80 +}
  114.81 +
  114.82 +/* Enable an arbitrary paging-assistance mode.  Call once at domain
  114.83 + * creation. */
  114.84 +int paging_enable(struct domain *d, u32 mode)
  114.85 +{
  114.86 +    if ( mode & PG_SH_enable ) 
  114.87 +        return shadow_enable(d, mode);
  114.88 +    else
  114.89 +        /* No other modes supported yet */
  114.90 +        return -EINVAL; 
  114.91 +}
  114.92 +
  114.93 +/* Print paging-assistance info to the console */
  114.94 +void paging_dump_domain_info(struct domain *d)
  114.95 +{
  114.96 +    if ( paging_mode_enabled(d) )
  114.97 +    {
  114.98 +        printk("    paging assistance: ");
  114.99 +        if ( paging_mode_shadow(d) )
 114.100 +            printk("shadow ");
 114.101 +        if ( paging_mode_hap(d) )
 114.102 +            printk("hap ");
 114.103 +        if ( paging_mode_refcounts(d) )
 114.104 +            printk("refcounts ");
 114.105 +        if ( paging_mode_log_dirty(d) )
 114.106 +            printk("log_dirty ");
 114.107 +        if ( paging_mode_translate(d) )
 114.108 +            printk("translate ");
 114.109 +        if ( paging_mode_external(d) )
 114.110 +            printk("external ");
 114.111 +        printk("\n");
 114.112 +    }
 114.113 +}
 114.114 +
 114.115 +void paging_dump_vcpu_info(struct vcpu *v)
 114.116 +{
 114.117 +    if ( paging_mode_enabled(v->domain) )
 114.118 +    {
 114.119 +        printk("    paging assistance: ");        
 114.120 +        if ( paging_mode_shadow(v->domain) )
 114.121 +        {
 114.122 +            if ( v->arch.paging.mode )
 114.123 +                printk("shadowed %u-on-%u, %stranslated\n",
 114.124 +                       v->arch.paging.mode->guest_levels,
 114.125 +                       v->arch.paging.mode->shadow.shadow_levels,
 114.126 +                       paging_vcpu_mode_translate(v) ? "" : "not ");
 114.127 +            else
 114.128 +                printk("not shadowed\n");
 114.129 +        }
 114.130 +        else if ( paging_mode_hap(v->domain) && v->arch.paging.mode )
 114.131 +            printk("hap, %u levels\n", 
 114.132 +                   v->arch.paging.mode->guest_levels);
 114.133 +        else
 114.134 +            printk("none\n");
 114.135 +    }
 114.136 +}
 114.137 +
 114.138 +
 114.139 +/*
 114.140 + * Local variables:
 114.141 + * mode: C
 114.142 + * c-set-style: "BSD"
 114.143 + * c-basic-offset: 4
 114.144 + * indent-tabs-mode: nil
 114.145 + * End:
 114.146 + */
   115.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Feb 09 14:43:22 2007 -0600
   115.2 +++ b/xen/arch/x86/mm/shadow/common.c	Wed Feb 14 19:01:35 2007 +0000
   115.3 @@ -47,12 +47,27 @@ void shadow_domain_init(struct domain *d
   115.4      int i;
   115.5      shadow_lock_init(d);
   115.6      for ( i = 0; i <= SHADOW_MAX_ORDER; i++ )
   115.7 -        INIT_LIST_HEAD(&d->arch.shadow.freelists[i]);
   115.8 -    INIT_LIST_HEAD(&d->arch.shadow.p2m_freelist);
   115.9 -    INIT_LIST_HEAD(&d->arch.shadow.p2m_inuse);
  115.10 -    INIT_LIST_HEAD(&d->arch.shadow.pinned_shadows);
  115.11 +        INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]);
  115.12 +    INIT_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist);
  115.13 +    INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
  115.14  }
  115.15  
  115.16 +/* Setup the shadow-specfic parts of a vcpu struct. Note: The most important
  115.17 + * job is to initialize the update_paging_modes() function pointer, which is
  115.18 + * used to initialized the rest of resources. Therefore, it really does not
  115.19 + * matter to have v->arch.paging.mode pointing to any mode, as long as it can
  115.20 + * be compiled.
  115.21 + */
  115.22 +void shadow_vcpu_init(struct vcpu *v)
  115.23 +{
  115.24 +#if CONFIG_PAGING_LEVELS == 4
  115.25 +    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
  115.26 +#elif CONFIG_PAGING_LEVELS == 3
  115.27 +    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
  115.28 +#elif CONFIG_PAGING_LEVELS == 2
  115.29 +    v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,2,2);
  115.30 +#endif
  115.31 +}
  115.32  
  115.33  #if SHADOW_AUDIT
  115.34  int shadow_audit_enable = 0;
  115.35 @@ -265,7 +280,7 @@ hvm_emulate_write(enum x86_segment seg,
  115.36      if ( rc )
  115.37          return rc;
  115.38  
  115.39 -    return v->arch.shadow.mode->x86_emulate_write(
  115.40 +    return v->arch.paging.mode->shadow.x86_emulate_write(
  115.41          v, addr, &val, bytes, sh_ctxt);
  115.42  }
  115.43  
  115.44 @@ -288,7 +303,7 @@ hvm_emulate_cmpxchg(enum x86_segment seg
  115.45      if ( rc )
  115.46          return rc;
  115.47  
  115.48 -    return v->arch.shadow.mode->x86_emulate_cmpxchg(
  115.49 +    return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
  115.50          v, addr, old, new, bytes, sh_ctxt);
  115.51  }
  115.52  
  115.53 @@ -312,7 +327,7 @@ hvm_emulate_cmpxchg8b(enum x86_segment s
  115.54      if ( rc )
  115.55          return rc;
  115.56  
  115.57 -    return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
  115.58 +    return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b(
  115.59          v, addr, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
  115.60  }
  115.61  
  115.62 @@ -353,7 +368,7 @@ pv_emulate_write(enum x86_segment seg,
  115.63      struct sh_emulate_ctxt *sh_ctxt =
  115.64          container_of(ctxt, struct sh_emulate_ctxt, ctxt);
  115.65      struct vcpu *v = current;
  115.66 -    return v->arch.shadow.mode->x86_emulate_write(
  115.67 +    return v->arch.paging.mode->shadow.x86_emulate_write(
  115.68          v, offset, &val, bytes, sh_ctxt);
  115.69  }
  115.70  
  115.71 @@ -368,7 +383,7 @@ pv_emulate_cmpxchg(enum x86_segment seg,
  115.72      struct sh_emulate_ctxt *sh_ctxt =
  115.73          container_of(ctxt, struct sh_emulate_ctxt, ctxt);
  115.74      struct vcpu *v = current;
  115.75 -    return v->arch.shadow.mode->x86_emulate_cmpxchg(
  115.76 +    return v->arch.paging.mode->shadow.x86_emulate_cmpxchg(
  115.77          v, offset, old, new, bytes, sh_ctxt);
  115.78  }
  115.79  
  115.80 @@ -384,7 +399,7 @@ pv_emulate_cmpxchg8b(enum x86_segment se
  115.81      struct sh_emulate_ctxt *sh_ctxt =
  115.82          container_of(ctxt, struct sh_emulate_ctxt, ctxt);
  115.83      struct vcpu *v = current;
  115.84 -    return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
  115.85 +    return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b(
  115.86          v, offset, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
  115.87  }
  115.88  
  115.89 @@ -721,7 +736,7 @@ static inline int chunk_is_available(str
  115.90      int i;
  115.91      
  115.92      for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
  115.93 -        if ( !list_empty(&d->arch.shadow.freelists[i]) )
  115.94 +        if ( !list_empty(&d->arch.paging.shadow.freelists[i]) )
  115.95              return 1;
  115.96      return 0;
  115.97  }
  115.98 @@ -783,7 +798,7 @@ void shadow_prealloc(struct domain *d, u
  115.99  
 115.100      /* Stage one: walk the list of pinned pages, unpinning them */
 115.101      perfc_incrc(shadow_prealloc_1);
 115.102 -    list_for_each_backwards_safe(l, t, &d->arch.shadow.pinned_shadows)
 115.103 +    list_for_each_backwards_safe(l, t, &d->arch.paging.shadow.pinned_shadows)
 115.104      {
 115.105          sp = list_entry(l, struct shadow_page_info, list);
 115.106          smfn = shadow_page_to_mfn(sp);
 115.107 @@ -823,9 +838,9 @@ void shadow_prealloc(struct domain *d, u
 115.108      SHADOW_PRINTK("Can't pre-allocate %i shadow pages!\n"
 115.109                     "  shadow pages total = %u, free = %u, p2m=%u\n",
 115.110                     1 << order, 
 115.111 -                   d->arch.shadow.total_pages, 
 115.112 -                   d->arch.shadow.free_pages, 
 115.113 -                   d->arch.shadow.p2m_pages);
 115.114 +                   d->arch.paging.shadow.total_pages, 
 115.115 +                   d->arch.paging.shadow.free_pages, 
 115.116 +                   d->arch.paging.shadow.p2m_pages);
 115.117      BUG();
 115.118  }
 115.119  
 115.120 @@ -840,7 +855,7 @@ static void shadow_blow_tables(struct do
 115.121      int i;
 115.122      
 115.123      /* Pass one: unpin all pinned pages */
 115.124 -    list_for_each_backwards_safe(l,t, &d->arch.shadow.pinned_shadows)
 115.125 +    list_for_each_backwards_safe(l,t, &d->arch.paging.shadow.pinned_shadows)
 115.126      {
 115.127          sp = list_entry(l, struct shadow_page_info, list);
 115.128          smfn = shadow_page_to_mfn(sp);
 115.129 @@ -905,9 +920,9 @@ mfn_t shadow_alloc(struct domain *d,
 115.130  
 115.131      /* Find smallest order which can satisfy the request. */
 115.132      for ( i = order; i <= SHADOW_MAX_ORDER; i++ )
 115.133 -        if ( !list_empty(&d->arch.shadow.freelists[i]) )
 115.134 +        if ( !list_empty(&d->arch.paging.shadow.freelists[i]) )
 115.135          {
 115.136 -            sp = list_entry(d->arch.shadow.freelists[i].next, 
 115.137 +            sp = list_entry(d->arch.paging.shadow.freelists[i].next, 
 115.138                              struct shadow_page_info, list);
 115.139              list_del(&sp->list);
 115.140              
 115.141 @@ -916,10 +931,10 @@ mfn_t shadow_alloc(struct domain *d,
 115.142              {
 115.143                  i--;
 115.144                  sp->order = i;
 115.145 -                list_add_tail(&sp->list, &d->arch.shadow.freelists[i]);
 115.146 +                list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[i]);
 115.147                  sp += 1 << i;
 115.148              }
 115.149 -            d->arch.shadow.free_pages -= 1 << order;
 115.150 +            d->arch.paging.shadow.free_pages -= 1 << order;
 115.151  
 115.152              /* Init page info fields and clear the pages */
 115.153              for ( i = 0; i < 1<<order ; i++ ) 
 115.154 @@ -976,7 +991,7 @@ void shadow_free(struct domain *d, mfn_t
 115.155      ASSERT(shadow_type != SH_type_p2m_table);
 115.156      order = shadow_order(shadow_type);
 115.157  
 115.158 -    d->arch.shadow.free_pages += 1 << order;
 115.159 +    d->arch.paging.shadow.free_pages += 1 << order;
 115.160  
 115.161      for ( i = 0; i < 1<<order; i++ ) 
 115.162      {
 115.163 @@ -985,8 +1000,8 @@ void shadow_free(struct domain *d, mfn_t
 115.164          for_each_vcpu(d, v) 
 115.165          {
 115.166              /* No longer safe to look for a writeable mapping in this shadow */
 115.167 -            if ( v->arch.shadow.last_writeable_pte_smfn == mfn_x(smfn) + i ) 
 115.168 -                v->arch.shadow.last_writeable_pte_smfn = 0;
 115.169 +            if ( v->arch.paging.shadow.last_writeable_pte_smfn == mfn_x(smfn) + i ) 
 115.170 +                v->arch.paging.shadow.last_writeable_pte_smfn = 0;
 115.171          }
 115.172  #endif
 115.173          /* Strip out the type: this is now a free shadow page */
 115.174 @@ -1019,7 +1034,7 @@ void shadow_free(struct domain *d, mfn_t
 115.175      }
 115.176  
 115.177      sp->order = order;
 115.178 -    list_add_tail(&sp->list, &d->arch.shadow.freelists[order]);
 115.179 +    list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[order]);
 115.180  }
 115.181  
 115.182  /* Divert some memory from the pool to be used by the p2m mapping.
 115.183 @@ -1033,19 +1048,19 @@ void shadow_free(struct domain *d, mfn_t
 115.184   * returns non-zero on success.
 115.185   */
 115.186  static int
 115.187 -shadow_alloc_p2m_pages(struct domain *d)
 115.188 +sh_alloc_p2m_pages(struct domain *d)
 115.189  {
 115.190      struct page_info *pg;
 115.191      u32 i;
 115.192      ASSERT(shadow_locked_by_me(d));
 115.193      
 115.194 -    if ( d->arch.shadow.total_pages 
 115.195 +    if ( d->arch.paging.shadow.total_pages 
 115.196           < (shadow_min_acceptable_pages(d) + (1<<SHADOW_MAX_ORDER)) )
 115.197          return 0; /* Not enough shadow memory: need to increase it first */
 115.198      
 115.199      pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
 115.200 -    d->arch.shadow.p2m_pages += (1<<SHADOW_MAX_ORDER);
 115.201 -    d->arch.shadow.total_pages -= (1<<SHADOW_MAX_ORDER);
 115.202 +    d->arch.paging.shadow.p2m_pages += (1<<SHADOW_MAX_ORDER);
 115.203 +    d->arch.paging.shadow.total_pages -= (1<<SHADOW_MAX_ORDER);
 115.204      for (i = 0; i < (1<<SHADOW_MAX_ORDER); i++)
 115.205      {
 115.206          /* Unlike shadow pages, mark p2m pages as owned by the domain.
 115.207 @@ -1055,34 +1070,59 @@ shadow_alloc_p2m_pages(struct domain *d)
 115.208           * believed to be a concern.
 115.209           */
 115.210          page_set_owner(&pg[i], d);
 115.211 -        list_add_tail(&pg[i].list, &d->arch.shadow.p2m_freelist);
 115.212 +        pg->count_info = 1;
 115.213 +        list_add_tail(&pg[i].list, &d->arch.paging.shadow.p2m_freelist);
 115.214      }
 115.215      return 1;
 115.216  }
 115.217  
 115.218  // Returns 0 if no memory is available...
 115.219 -mfn_t
 115.220 +struct page_info * 
 115.221  shadow_alloc_p2m_page(struct domain *d)
 115.222  {
 115.223      struct list_head *entry;
 115.224      struct page_info *pg;
 115.225      mfn_t mfn;
 115.226      void *p;
 115.227 -
 115.228 -    if ( list_empty(&d->arch.shadow.p2m_freelist) &&
 115.229 -         !shadow_alloc_p2m_pages(d) )
 115.230 -        return _mfn(0);
 115.231 -    entry = d->arch.shadow.p2m_freelist.next;
 115.232 +    
 115.233 +    shadow_lock(d);
 115.234 +
 115.235 +    if ( list_empty(&d->arch.paging.shadow.p2m_freelist) &&
 115.236 +         !sh_alloc_p2m_pages(d) )
 115.237 +    {
 115.238 +        shadow_unlock(d);
 115.239 +        return NULL;
 115.240 +    }
 115.241 +    entry = d->arch.paging.shadow.p2m_freelist.next;
 115.242      list_del(entry);
 115.243 -    list_add_tail(entry, &d->arch.shadow.p2m_inuse);
 115.244 +
 115.245 +    shadow_unlock(d);
 115.246 +
 115.247      pg = list_entry(entry, struct page_info, list);
 115.248 -    pg->count_info = 1;
 115.249      mfn = page_to_mfn(pg);
 115.250      p = sh_map_domain_page(mfn);
 115.251      clear_page(p);
 115.252      sh_unmap_domain_page(p);
 115.253  
 115.254 -    return mfn;
 115.255 +    return pg;
 115.256 +}
 115.257 +
 115.258 +void
 115.259 +shadow_free_p2m_page(struct domain *d, struct page_info *pg)
 115.260 +{
 115.261 +    ASSERT(page_get_owner(pg) == d);
 115.262 +    /* Should have just the one ref we gave it in alloc_p2m_page() */
 115.263 +    if ( (pg->count_info & PGC_count_mask) != 1 )
 115.264 +    {
 115.265 +        SHADOW_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
 115.266 +                     pg->count_info, pg->u.inuse.type_info);
 115.267 +    }
 115.268 +    /* Free should not decrement domain's total allocation, since 
 115.269 +     * these pages were allocated without an owner. */
 115.270 +    page_set_owner(pg, NULL); 
 115.271 +    free_domheap_pages(pg, 0);
 115.272 +    d->arch.paging.shadow.p2m_pages--;
 115.273 +    perfc_decr(shadow_alloc_count);
 115.274  }
 115.275  
 115.276  #if CONFIG_PAGING_LEVELS == 3
 115.277 @@ -1130,344 +1170,6 @@ static void p2m_install_entry_in_monitor
 115.278  }
 115.279  #endif
 115.280  
 115.281 -// Find the next level's P2M entry, checking for out-of-range gfn's...
 115.282 -// Returns NULL on error.
 115.283 -//
 115.284 -static l1_pgentry_t *
 115.285 -p2m_find_entry(void *table, unsigned long *gfn_remainder,
 115.286 -                   unsigned long gfn, u32 shift, u32 max)
 115.287 -{
 115.288 -    u32 index;
 115.289 -
 115.290 -    index = *gfn_remainder >> shift;
 115.291 -    if ( index >= max )
 115.292 -    {
 115.293 -        SHADOW_DEBUG(P2M, "gfn=0x%lx out of range "
 115.294 -                      "(gfn_remainder=0x%lx shift=%d index=0x%x max=0x%x)\n",
 115.295 -                       gfn, *gfn_remainder, shift, index, max);
 115.296 -        return NULL;
 115.297 -    }
 115.298 -    *gfn_remainder &= (1 << shift) - 1;
 115.299 -    return (l1_pgentry_t *)table + index;
 115.300 -}
 115.301 -
 115.302 -// Walk one level of the P2M table, allocating a new table if required.
 115.303 -// Returns 0 on error.
 115.304 -//
 115.305 -static int
 115.306 -p2m_next_level(struct domain *d, mfn_t *table_mfn, void **table, 
 115.307 -               unsigned long *gfn_remainder, unsigned long gfn, u32 shift, 
 115.308 -               u32 max, unsigned long type)
 115.309 -{
 115.310 -    l1_pgentry_t *p2m_entry;
 115.311 -    void *next;
 115.312 -
 115.313 -    if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
 115.314 -                                      shift, max)) )
 115.315 -        return 0;
 115.316 -
 115.317 -    if ( !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) )
 115.318 -    {
 115.319 -        mfn_t mfn = shadow_alloc_p2m_page(d);
 115.320 -        if ( mfn_x(mfn) == 0 )
 115.321 -            return 0;
 115.322 -        *p2m_entry = l1e_from_pfn(mfn_x(mfn), __PAGE_HYPERVISOR|_PAGE_USER);
 115.323 -        mfn_to_page(mfn)->u.inuse.type_info = type | 1 | PGT_validated;
 115.324 -        mfn_to_page(mfn)->count_info = 1;
 115.325 -#if CONFIG_PAGING_LEVELS == 3
 115.326 -        if (type == PGT_l2_page_table)
 115.327 -        {
 115.328 -            struct vcpu *v;
 115.329 -            /* We have written to the p2m l3: need to sync the per-vcpu
 115.330 -             * copies of it in the monitor tables */
 115.331 -            p2m_install_entry_in_monitors(d, (l3_pgentry_t *)p2m_entry);
 115.332 -            /* Also, any vcpus running on shadows of the p2m need to 
 115.333 -             * reload their CR3s so the change propagates to the shadow */
 115.334 -            ASSERT(shadow_locked_by_me(d));
 115.335 -            for_each_vcpu(d, v) 
 115.336 -            {
 115.337 -                if ( pagetable_get_pfn(v->arch.guest_table) 
 115.338 -                     == pagetable_get_pfn(d->arch.phys_table) 
 115.339 -                     && v->arch.shadow.mode != NULL )
 115.340 -                    v->arch.shadow.mode->update_cr3(v, 0);
 115.341 -            }
 115.342 -        }
 115.343 -#endif
 115.344 -        /* The P2M can be shadowed: keep the shadows synced */
 115.345 -        if ( d->vcpu[0] != NULL )
 115.346 -            (void)sh_validate_guest_entry(d->vcpu[0], *table_mfn,
 115.347 -                                          p2m_entry, sizeof *p2m_entry);
 115.348 -    }
 115.349 -    *table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
 115.350 -    next = sh_map_domain_page(*table_mfn);
 115.351 -    sh_unmap_domain_page(*table);
 115.352 -    *table = next;
 115.353 -
 115.354 -    return 1;
 115.355 -}
 115.356 -
 115.357 -// Returns 0 on error (out of memory)
 115.358 -int
 115.359 -shadow_set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 115.360 -{
 115.361 -    // XXX -- this might be able to be faster iff current->domain == d
 115.362 -    mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
 115.363 -    void *table = sh_map_domain_page(table_mfn);
 115.364 -    unsigned long gfn_remainder = gfn;
 115.365 -    l1_pgentry_t *p2m_entry;
 115.366 -    int rv=0;
 115.367 -
 115.368 -#if CONFIG_PAGING_LEVELS >= 4
 115.369 -    if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
 115.370 -                         L4_PAGETABLE_SHIFT - PAGE_SHIFT,
 115.371 -                         L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
 115.372 -        goto out;
 115.373 -#endif
 115.374 -#if CONFIG_PAGING_LEVELS >= 3
 115.375 -    // When using PAE Xen, we only allow 33 bits of pseudo-physical
 115.376 -    // address in translated guests (i.e. 8 GBytes).  This restriction
 115.377 -    // comes from wanting to map the P2M table into the 16MB RO_MPT hole
 115.378 -    // in Xen's address space for translated PV guests.
 115.379 -    //
 115.380 -    if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
 115.381 -                         L3_PAGETABLE_SHIFT - PAGE_SHIFT,
 115.382 -                         (CONFIG_PAGING_LEVELS == 3
 115.383 -                          ? 8
 115.384 -                          : L3_PAGETABLE_ENTRIES),
 115.385 -                         PGT_l2_page_table) )
 115.386 -        goto out;
 115.387 -#endif
 115.388 -    if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
 115.389 -                         L2_PAGETABLE_SHIFT - PAGE_SHIFT,
 115.390 -                         L2_PAGETABLE_ENTRIES, PGT_l1_page_table) )
 115.391 -        goto out;
 115.392 -
 115.393 -    p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
 115.394 -                               0, L1_PAGETABLE_ENTRIES);
 115.395 -    ASSERT(p2m_entry);
 115.396 -    if ( mfn_valid(mfn) )
 115.397 -        *p2m_entry = l1e_from_pfn(mfn_x(mfn), __PAGE_HYPERVISOR|_PAGE_USER);
 115.398 -    else
 115.399 -        *p2m_entry = l1e_empty();
 115.400 -
 115.401 -    /* Track the highest gfn for which we have ever had a valid mapping */
 115.402 -    if ( mfn_valid(mfn) && (gfn > d->arch.max_mapped_pfn) ) 
 115.403 -        d->arch.max_mapped_pfn = gfn;
 115.404 -
 115.405 -    /* The P2M can be shadowed: keep the shadows synced */
 115.406 -    if ( d->vcpu[0] != NULL )
 115.407 -        (void)sh_validate_guest_entry(d->vcpu[0], table_mfn, 
 115.408 -                                      p2m_entry, sizeof(*p2m_entry));
 115.409 -
 115.410 -    /* Success */
 115.411 -    rv = 1;
 115.412 - 
 115.413 - out:
 115.414 -    sh_unmap_domain_page(table);
 115.415 -    return rv;
 115.416 -}
 115.417 -
 115.418 -// Allocate a new p2m table for a domain.
 115.419 -//
 115.420 -// The structure of the p2m table is that of a pagetable for xen (i.e. it is
 115.421 -// controlled by CONFIG_PAGING_LEVELS).
 115.422 -//
 115.423 -// Returns 0 if p2m table could not be initialized
 115.424 -//
 115.425 -static int
 115.426 -shadow_alloc_p2m_table(struct domain *d)
 115.427 -{
 115.428 -    mfn_t p2m_top, mfn;
 115.429 -    struct list_head *entry;
 115.430 -    struct page_info *page;
 115.431 -    unsigned int page_count = 0;
 115.432 -    unsigned long gfn;
 115.433 -    
 115.434 -    SHADOW_PRINTK("allocating p2m table\n");
 115.435 -    ASSERT(pagetable_get_pfn(d->arch.phys_table) == 0);
 115.436 -
 115.437 -    p2m_top = shadow_alloc_p2m_page(d);
 115.438 -    mfn_to_page(p2m_top)->count_info = 1;
 115.439 -    mfn_to_page(p2m_top)->u.inuse.type_info = 
 115.440 -#if CONFIG_PAGING_LEVELS == 4
 115.441 -        PGT_l4_page_table
 115.442 -#elif CONFIG_PAGING_LEVELS == 3
 115.443 -        PGT_l3_page_table
 115.444 -#elif CONFIG_PAGING_LEVELS == 2
 115.445 -        PGT_l2_page_table
 115.446 -#endif
 115.447 -        | 1 | PGT_validated;
 115.448 -   
 115.449 -    if ( mfn_x(p2m_top) == 0 )
 115.450 -        return 0;
 115.451 -
 115.452 -    d->arch.phys_table = pagetable_from_mfn(p2m_top);
 115.453 -
 115.454 -    SHADOW_PRINTK("populating p2m table\n");
 115.455 - 
 115.456 -    /* Initialise physmap tables for slot zero. Other code assumes this. */
 115.457 -    gfn = 0;
 115.458 -    mfn = _mfn(INVALID_MFN);
 115.459 -    if ( !shadow_set_p2m_entry(d, gfn, mfn) )
 115.460 -        goto error;
 115.461 -
 115.462 -    /* Build a p2m map that matches the m2p entries for this domain's
 115.463 -     * allocated pages.  Skip any pages that have an explicitly invalid
 115.464 -     * or obviously bogus m2p entry. */
 115.465 -    for ( entry = d->page_list.next;
 115.466 -          entry != &d->page_list;
 115.467 -          entry = entry->next )
 115.468 -    {
 115.469 -        page = list_entry(entry, struct page_info, list);
 115.470 -        mfn = page_to_mfn(page);
 115.471 -        gfn = get_gpfn_from_mfn(mfn_x(mfn));
 115.472 -        page_count++;
 115.473 -        if (
 115.474 -#ifdef __x86_64__
 115.475 -            (gfn != 0x5555555555555555L)
 115.476 -#else
 115.477 -            (gfn != 0x55555555L)
 115.478 -#endif
 115.479 -             && gfn != INVALID_M2P_ENTRY
 115.480 -             && (gfn < 
 115.481 -                 (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof (l1_pgentry_t))
 115.482 -             && !shadow_set_p2m_entry(d, gfn, mfn) )
 115.483 -            goto error;
 115.484 -    }
 115.485 -
 115.486 -    SHADOW_PRINTK("p2m table initialised (%u pages)\n", page_count);
 115.487 -    return 1;
 115.488 -
 115.489 - error:
 115.490 -    SHADOW_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%"
 115.491 -                  SH_PRI_mfn "\n", gfn, mfn_x(mfn));
 115.492 -    return 0;
 115.493 -}
 115.494 -
 115.495 -mfn_t
 115.496 -sh_gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
 115.497 -/* Read another domain's p2m entries */
 115.498 -{
 115.499 -    mfn_t mfn;
 115.500 -    paddr_t addr = ((paddr_t)gpfn) << PAGE_SHIFT;
 115.501 -    l2_pgentry_t *l2e;
 115.502 -    l1_pgentry_t *l1e;
 115.503 -    
 115.504 -    ASSERT(shadow_mode_translate(d));
 115.505 -    mfn = pagetable_get_mfn(d->arch.phys_table);
 115.506 -
 115.507 -
 115.508 -    if ( gpfn > d->arch.max_mapped_pfn ) 
 115.509 -        /* This pfn is higher than the highest the p2m map currently holds */
 115.510 -        return _mfn(INVALID_MFN);
 115.511 -
 115.512 -#if CONFIG_PAGING_LEVELS >= 4
 115.513 -    { 
 115.514 -        l4_pgentry_t *l4e = sh_map_domain_page(mfn);
 115.515 -        l4e += l4_table_offset(addr);
 115.516 -        if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
 115.517 -        {
 115.518 -            sh_unmap_domain_page(l4e);
 115.519 -            return _mfn(INVALID_MFN);
 115.520 -        }
 115.521 -        mfn = _mfn(l4e_get_pfn(*l4e));
 115.522 -        sh_unmap_domain_page(l4e);
 115.523 -    }
 115.524 -#endif
 115.525 -#if CONFIG_PAGING_LEVELS >= 3
 115.526 -    {
 115.527 -        l3_pgentry_t *l3e = sh_map_domain_page(mfn);
 115.528 -#if CONFIG_PAGING_LEVELS == 3
 115.529 -        /* On PAE hosts the p2m has eight l3 entries, not four (see
 115.530 -         * shadow_set_p2m_entry()) so we can't use l3_table_offset.
 115.531 -         * Instead, just count the number of l3es from zero.  It's safe
 115.532 -         * to do this because we already checked that the gfn is within
 115.533 -         * the bounds of the p2m. */
 115.534 -        l3e += (addr >> L3_PAGETABLE_SHIFT);
 115.535 -#else
 115.536 -        l3e += l3_table_offset(addr);        
 115.537 -#endif
 115.538 -        if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
 115.539 -        {
 115.540 -            sh_unmap_domain_page(l3e);
 115.541 -            return _mfn(INVALID_MFN);
 115.542 -        }
 115.543 -        mfn = _mfn(l3e_get_pfn(*l3e));
 115.544 -        sh_unmap_domain_page(l3e);
 115.545 -    }
 115.546 -#endif
 115.547 -
 115.548 -    l2e = sh_map_domain_page(mfn);
 115.549 -    l2e += l2_table_offset(addr);
 115.550 -    if ( (l2e_get_flags(*l2e) & _PAGE_PRESENT) == 0 )
 115.551 -    {
 115.552 -        sh_unmap_domain_page(l2e);
 115.553 -        return _mfn(INVALID_MFN);
 115.554 -    }
 115.555 -    mfn = _mfn(l2e_get_pfn(*l2e));
 115.556 -    sh_unmap_domain_page(l2e);
 115.557 -
 115.558 -    l1e = sh_map_domain_page(mfn);
 115.559 -    l1e += l1_table_offset(addr);
 115.560 -    if ( (l1e_get_flags(*l1e) & _PAGE_PRESENT) == 0 )
 115.561 -    {
 115.562 -        sh_unmap_domain_page(l1e);
 115.563 -        return _mfn(INVALID_MFN);
 115.564 -    }
 115.565 -    mfn = _mfn(l1e_get_pfn(*l1e));
 115.566 -    sh_unmap_domain_page(l1e);
 115.567 -
 115.568 -    return mfn;
 115.569 -}
 115.570 -
 115.571 -unsigned long
 115.572 -shadow_gfn_to_mfn_foreign(unsigned long gpfn)
 115.573 -{
 115.574 -    return mfn_x(sh_gfn_to_mfn_foreign(current->domain, gpfn));
 115.575 -}
 115.576 -
 115.577 -
 115.578 -static void shadow_p2m_teardown(struct domain *d)
 115.579 -/* Return all the p2m pages to Xen.
 115.580 - * We know we don't have any extra mappings to these pages */
 115.581 -{
 115.582 -    struct list_head *entry, *n;
 115.583 -    struct page_info *pg;
 115.584 -
 115.585 -    d->arch.phys_table = pagetable_null();
 115.586 -
 115.587 -    list_for_each_safe(entry, n, &d->arch.shadow.p2m_inuse)
 115.588 -    {
 115.589 -        pg = list_entry(entry, struct page_info, list);
 115.590 -        list_del(entry);
 115.591 -        /* Should have just the one ref we gave it in alloc_p2m_page() */
 115.592 -        if ( (pg->count_info & PGC_count_mask) != 1 )
 115.593 -        {
 115.594 -            SHADOW_PRINTK("Odd p2m page count c=%#x t=%"PRtype_info"\n",
 115.595 -                           pg->count_info, pg->u.inuse.type_info);
 115.596 -        }
 115.597 -        ASSERT(page_get_owner(pg) == d);
 115.598 -        /* Free should not decrement domain's total allocation, since 
 115.599 -         * these pages were allocated without an owner. */
 115.600 -        page_set_owner(pg, NULL); 
 115.601 -        free_domheap_pages(pg, 0);
 115.602 -        d->arch.shadow.p2m_pages--;
 115.603 -        perfc_decr(shadow_alloc_count);
 115.604 -    }
 115.605 -    list_for_each_safe(entry, n, &d->arch.shadow.p2m_freelist)
 115.606 -    {
 115.607 -        list_del(entry);
 115.608 -        pg = list_entry(entry, struct page_info, list);
 115.609 -        ASSERT(page_get_owner(pg) == d);
 115.610 -        /* Free should not decrement domain's total allocation. */
 115.611 -        page_set_owner(pg, NULL); 
 115.612 -        free_domheap_pages(pg, 0);
 115.613 -        d->arch.shadow.p2m_pages--;
 115.614 -        perfc_decr(shadow_alloc_count);
 115.615 -    }
 115.616 -    ASSERT(d->arch.shadow.p2m_pages == 0);
 115.617 -}
 115.618 -
 115.619  /* Set the pool of shadow pages to the required number of pages.
 115.620   * Input will be rounded up to at least shadow_min_acceptable_pages(),
 115.621   * plus space for the p2m table.
 115.622 @@ -1491,11 +1193,11 @@ static unsigned int sh_set_allocation(st
 115.623      pages = (pages + ((1<<SHADOW_MAX_ORDER)-1)) & ~((1<<SHADOW_MAX_ORDER)-1);
 115.624  
 115.625      SHADOW_PRINTK("current %i target %i\n", 
 115.626 -                   d->arch.shadow.total_pages, pages);
 115.627 -
 115.628 -    while ( d->arch.shadow.total_pages != pages ) 
 115.629 +                   d->arch.paging.shadow.total_pages, pages);
 115.630 +
 115.631 +    while ( d->arch.paging.shadow.total_pages != pages ) 
 115.632      {
 115.633 -        if ( d->arch.shadow.total_pages < pages ) 
 115.634 +        if ( d->arch.paging.shadow.total_pages < pages ) 
 115.635          {
 115.636              /* Need to allocate more memory from domheap */
 115.637              sp = (struct shadow_page_info *)
 115.638 @@ -1505,8 +1207,8 @@ static unsigned int sh_set_allocation(st
 115.639                  SHADOW_PRINTK("failed to allocate shadow pages.\n");
 115.640                  return -ENOMEM;
 115.641              }
 115.642 -            d->arch.shadow.free_pages += 1<<SHADOW_MAX_ORDER;
 115.643 -            d->arch.shadow.total_pages += 1<<SHADOW_MAX_ORDER;
 115.644 +            d->arch.paging.shadow.free_pages += 1<<SHADOW_MAX_ORDER;
 115.645 +            d->arch.paging.shadow.total_pages += 1<<SHADOW_MAX_ORDER;
 115.646              for ( j = 0; j < 1<<SHADOW_MAX_ORDER; j++ ) 
 115.647              {
 115.648                  sp[j].type = 0;  
 115.649 @@ -1518,18 +1220,18 @@ static unsigned int sh_set_allocation(st
 115.650              }
 115.651              sp->order = SHADOW_MAX_ORDER;
 115.652              list_add_tail(&sp->list, 
 115.653 -                          &d->arch.shadow.freelists[SHADOW_MAX_ORDER]);
 115.654 +                          &d->arch.paging.shadow.freelists[SHADOW_MAX_ORDER]);
 115.655          } 
 115.656 -        else if ( d->arch.shadow.total_pages > pages ) 
 115.657 +        else if ( d->arch.paging.shadow.total_pages > pages ) 
 115.658          {
 115.659              /* Need to return memory to domheap */
 115.660              shadow_prealloc(d, SHADOW_MAX_ORDER);
 115.661 -            ASSERT(!list_empty(&d->arch.shadow.freelists[SHADOW_MAX_ORDER]));
 115.662 -            sp = list_entry(d->arch.shadow.freelists[SHADOW_MAX_ORDER].next, 
 115.663 +            ASSERT(!list_empty(&d->arch.paging.shadow.freelists[SHADOW_MAX_ORDER]));
 115.664 +            sp = list_entry(d->arch.paging.shadow.freelists[SHADOW_MAX_ORDER].next, 
 115.665                              struct shadow_page_info, list);
 115.666              list_del(&sp->list);
 115.667 -            d->arch.shadow.free_pages -= 1<<SHADOW_MAX_ORDER;
 115.668 -            d->arch.shadow.total_pages -= 1<<SHADOW_MAX_ORDER;
 115.669 +            d->arch.paging.shadow.free_pages -= 1<<SHADOW_MAX_ORDER;
 115.670 +            d->arch.paging.shadow.total_pages -= 1<<SHADOW_MAX_ORDER;
 115.671              free_domheap_pages((struct page_info *)sp, SHADOW_MAX_ORDER);
 115.672          }
 115.673  
 115.674 @@ -1547,7 +1249,7 @@ static unsigned int sh_set_allocation(st
 115.675  /* Return the size of the shadow pool, rounded up to the nearest MB */
 115.676  static unsigned int shadow_get_allocation(struct domain *d)
 115.677  {
 115.678 -    unsigned int pg = d->arch.shadow.total_pages;
 115.679 +    unsigned int pg = d->arch.paging.shadow.total_pages;
 115.680      return ((pg >> (20 - PAGE_SHIFT))
 115.681              + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
 115.682  }
 115.683 @@ -1583,7 +1285,7 @@ static void sh_hash_audit_bucket(struct 
 115.684      if ( !(SHADOW_AUDIT_ENABLE) )
 115.685          return;
 115.686  
 115.687 -    sp = d->arch.shadow.hash_table[bucket];
 115.688 +    sp = d->arch.paging.shadow.hash_table[bucket];
 115.689      while ( sp )
 115.690      {
 115.691          /* Not a shadow? */
 115.692 @@ -1608,7 +1310,7 @@ static void sh_hash_audit_bucket(struct 
 115.693              if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page 
 115.694                   && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
 115.695              {
 115.696 -                SHADOW_ERROR("MFN %#lx shadowed (by %#"SH_PRI_mfn")"
 115.697 +                SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"
 115.698                               " but has typecount %#lx\n",
 115.699                               sp->backpointer, mfn_x(shadow_page_to_mfn(sp)), 
 115.700                               gpg->u.inuse.type_info);
 115.701 @@ -1652,13 +1354,13 @@ static int shadow_hash_alloc(struct doma
 115.702      struct shadow_page_info **table;
 115.703  
 115.704      ASSERT(shadow_locked_by_me(d));
 115.705 -    ASSERT(!d->arch.shadow.hash_table);
 115.706 +    ASSERT(!d->arch.paging.shadow.hash_table);
 115.707  
 115.708      table = xmalloc_array(struct shadow_page_info *, SHADOW_HASH_BUCKETS);
 115.709      if ( !table ) return 1;
 115.710      memset(table, 0, 
 115.711             SHADOW_HASH_BUCKETS * sizeof (struct shadow_page_info *));
 115.712 -    d->arch.shadow.hash_table = table;
 115.713 +    d->arch.paging.shadow.hash_table = table;
 115.714      return 0;
 115.715  }
 115.716  
 115.717 @@ -1667,10 +1369,10 @@ static int shadow_hash_alloc(struct doma
 115.718  static void shadow_hash_teardown(struct domain *d)
 115.719  {
 115.720      ASSERT(shadow_locked_by_me(d));
 115.721 -    ASSERT(d->arch.shadow.hash_table);
 115.722 -
 115.723 -    xfree(d->arch.shadow.hash_table);
 115.724 -    d->arch.shadow.hash_table = NULL;
 115.725 +    ASSERT(d->arch.paging.shadow.hash_table);
 115.726 +
 115.727 +    xfree(d->arch.paging.shadow.hash_table);
 115.728 +    d->arch.paging.shadow.hash_table = NULL;
 115.729  }
 115.730  
 115.731  
 115.732 @@ -1683,7 +1385,7 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
 115.733      key_t key;
 115.734  
 115.735      ASSERT(shadow_locked_by_me(d));
 115.736 -    ASSERT(d->arch.shadow.hash_table);
 115.737 +    ASSERT(d->arch.paging.shadow.hash_table);
 115.738      ASSERT(t);
 115.739  
 115.740      sh_hash_audit(d);
 115.741 @@ -1692,16 +1394,16 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
 115.742      key = sh_hash(n, t);
 115.743      sh_hash_audit_bucket(d, key);
 115.744  
 115.745 -    sp = d->arch.shadow.hash_table[key];
 115.746 +    sp = d->arch.paging.shadow.hash_table[key];
 115.747      prev = NULL;
 115.748      while(sp)
 115.749      {
 115.750          if ( sp->backpointer == n && sp->type == t )
 115.751          {
 115.752              /* Pull-to-front if 'sp' isn't already the head item */
 115.753 -            if ( unlikely(sp != d->arch.shadow.hash_table[key]) )
 115.754 +            if ( unlikely(sp != d->arch.paging.shadow.hash_table[key]) )
 115.755              {
 115.756 -                if ( unlikely(d->arch.shadow.hash_walking != 0) )
 115.757 +                if ( unlikely(d->arch.paging.shadow.hash_walking != 0) )
 115.758                      /* Can't reorder: someone is walking the hash chains */
 115.759                      return shadow_page_to_mfn(sp);
 115.760                  else 
 115.761 @@ -1710,8 +1412,8 @@ mfn_t shadow_hash_lookup(struct vcpu *v,
 115.762                      /* Delete sp from the list */
 115.763                      prev->next_shadow = sp->next_shadow;                    
 115.764                      /* Re-insert it at the head of the list */
 115.765 -                    sp->next_shadow = d->arch.shadow.hash_table[key];
 115.766 -                    d->arch.shadow.hash_table[key] = sp;
 115.767 +                    sp->next_shadow = d->arch.paging.shadow.hash_table[key];
 115.768 +                    d->arch.paging.shadow.hash_table[key] = sp;
 115.769                  }
 115.770              }
 115.771              else
 115.772 @@ -1737,7 +1439,7 @@ void shadow_hash_insert(struct vcpu *v, 
 115.773      key_t key;
 115.774      
 115.775      ASSERT(shadow_locked_by_me(d));
 115.776 -    ASSERT(d->arch.shadow.hash_table);
 115.777 +    ASSERT(d->arch.paging.shadow.hash_table);
 115.778      ASSERT(t);
 115.779  
 115.780      sh_hash_audit(d);
 115.781 @@ -1748,8 +1450,8 @@ void shadow_hash_insert(struct vcpu *v, 
 115.782      
 115.783      /* Insert this shadow at the top of the bucket */
 115.784      sp = mfn_to_shadow_page(smfn);
 115.785 -    sp->next_shadow = d->arch.shadow.hash_table[key];
 115.786 -    d->arch.shadow.hash_table[key] = sp;
 115.787 +    sp->next_shadow = d->arch.paging.shadow.hash_table[key];
 115.788 +    d->arch.paging.shadow.hash_table[key] = sp;
 115.789      
 115.790      sh_hash_audit_bucket(d, key);
 115.791  }
 115.792 @@ -1763,7 +1465,7 @@ void shadow_hash_delete(struct vcpu *v, 
 115.793      key_t key;
 115.794  
 115.795      ASSERT(shadow_locked_by_me(d));
 115.796 -    ASSERT(d->arch.shadow.hash_table);
 115.797 +    ASSERT(d->arch.paging.shadow.hash_table);
 115.798      ASSERT(t);
 115.799  
 115.800      sh_hash_audit(d);
 115.801 @@ -1773,13 +1475,13 @@ void shadow_hash_delete(struct vcpu *v, 
 115.802      sh_hash_audit_bucket(d, key);
 115.803      
 115.804      sp = mfn_to_shadow_page(smfn);
 115.805 -    if ( d->arch.shadow.hash_table[key] == sp ) 
 115.806 +    if ( d->arch.paging.shadow.hash_table[key] == sp ) 
 115.807          /* Easy case: we're deleting the head item. */
 115.808 -        d->arch.shadow.hash_table[key] = sp->next_shadow;
 115.809 +        d->arch.paging.shadow.hash_table[key] = sp->next_shadow;
 115.810      else 
 115.811      {
 115.812          /* Need to search for the one we want */
 115.813 -        x = d->arch.shadow.hash_table[key];
 115.814 +        x = d->arch.paging.shadow.hash_table[key];
 115.815          while ( 1 )
 115.816          {
 115.817              ASSERT(x); /* We can't have hit the end, since our target is
 115.818 @@ -1818,15 +1520,15 @@ static void hash_foreach(struct vcpu *v,
 115.819  
 115.820      /* Say we're here, to stop hash-lookups reordering the chains */
 115.821      ASSERT(shadow_locked_by_me(d));
 115.822 -    ASSERT(d->arch.shadow.hash_walking == 0);
 115.823 -    d->arch.shadow.hash_walking = 1;
 115.824 +    ASSERT(d->arch.paging.shadow.hash_walking == 0);
 115.825 +    d->arch.paging.shadow.hash_walking = 1;
 115.826  
 115.827      for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ ) 
 115.828      {
 115.829          /* WARNING: This is not safe against changes to the hash table.
 115.830           * The callback *must* return non-zero if it has inserted or
 115.831           * deleted anything from the hash (lookups are OK, though). */
 115.832 -        for ( x = d->arch.shadow.hash_table[i]; x; x = x->next_shadow )
 115.833 +        for ( x = d->arch.paging.shadow.hash_table[i]; x; x = x->next_shadow )
 115.834          {
 115.835              if ( callback_mask & (1 << x->type) ) 
 115.836              {
 115.837 @@ -1839,7 +1541,7 @@ static void hash_foreach(struct vcpu *v,
 115.838          }
 115.839          if ( done ) break; 
 115.840      }
 115.841 -    d->arch.shadow.hash_walking = 0; 
 115.842 +    d->arch.paging.shadow.hash_walking = 0; 
 115.843  }
 115.844  
 115.845  
 115.846 @@ -2008,27 +1710,27 @@ int sh_remove_write_access(struct vcpu *
 115.847           * and that mapping is likely to be in the current pagetable,
 115.848           * in the guest's linear map (on non-HIGHPTE linux and windows)*/
 115.849  
 115.850 -#define GUESS(_a, _h) do {                                              \
 115.851 -            if ( v->arch.shadow.mode->guess_wrmap(v, (_a), gmfn) )      \
 115.852 -                perfc_incrc(shadow_writeable_h_ ## _h);                 \
 115.853 -            if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )        \
 115.854 -                return 1;                                               \
 115.855 +#define GUESS(_a, _h) do {                                                \
 115.856 +            if ( v->arch.paging.mode->shadow.guess_wrmap(v, (_a), gmfn) ) \
 115.857 +                perfc_incrc(shadow_writeable_h_ ## _h);                   \
 115.858 +            if ( (pg->u.inuse.type_info & PGT_count_mask) == 0 )          \
 115.859 +                return 1;                                                 \
 115.860          } while (0)
 115.861  
 115.862          
 115.863 -        if ( v->arch.shadow.mode->guest_levels == 2 )
 115.864 +        if ( v->arch.paging.mode->guest_levels == 2 )
 115.865          {
 115.866              if ( level == 1 )
 115.867                  /* 32bit non-PAE w2k3: linear map at 0xC0000000 */
 115.868                  GUESS(0xC0000000UL + (fault_addr >> 10), 1);
 115.869  
 115.870              /* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
 115.871 -            if ((gfn = sh_mfn_to_gfn(v->domain, gmfn)) < 0x38000 ) 
 115.872 +            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 ) 
 115.873                  GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
 115.874  
 115.875          }
 115.876  #if CONFIG_PAGING_LEVELS >= 3
 115.877 -        else if ( v->arch.shadow.mode->guest_levels == 3 )
 115.878 +        else if ( v->arch.paging.mode->guest_levels == 3 )
 115.879          {
 115.880              /* 32bit PAE w2k3: linear map at 0xC0000000 */
 115.881              switch ( level ) 
 115.882 @@ -2038,11 +1740,11 @@ int sh_remove_write_access(struct vcpu *
 115.883              }
 115.884  
 115.885              /* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
 115.886 -            if ((gfn = sh_mfn_to_gfn(v->domain, gmfn)) < 0x38000 ) 
 115.887 +            if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 ) 
 115.888                  GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
 115.889          }
 115.890  #if CONFIG_PAGING_LEVELS >= 4
 115.891 -        else if ( v->arch.shadow.mode->guest_levels == 4 )
 115.892 +        else if ( v->arch.paging.mode->guest_levels == 4 )
 115.893          {
 115.894              /* 64bit w2k3: linear map at 0x0000070000000000 */
 115.895              switch ( level ) 
 115.896 @@ -2054,7 +1756,7 @@ int sh_remove_write_access(struct vcpu *
 115.897  
 115.898              /* 64bit Linux direct map at 0xffff810000000000; older kernels 
 115.899               * had it at 0x0000010000000000UL */
 115.900 -            gfn = sh_mfn_to_gfn(v->domain, gmfn); 
 115.901 +            gfn = mfn_to_gfn(v->domain, gmfn); 
 115.902              GUESS(0xffff810000000000UL + (gfn << PAGE_SHIFT), 4); 
 115.903              GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4); 
 115.904          }
 115.905 @@ -2073,10 +1775,10 @@ int sh_remove_write_access(struct vcpu *
 115.906       * the writeable mapping by looking at the same MFN where the last
 115.907       * brute-force search succeeded. */
 115.908  
 115.909 -    if ( v->arch.shadow.last_writeable_pte_smfn != 0 )
 115.910 +    if ( v->arch.paging.shadow.last_writeable_pte_smfn != 0 )
 115.911      {
 115.912          unsigned long old_count = (pg->u.inuse.type_info & PGT_count_mask);
 115.913 -        mfn_t last_smfn = _mfn(v->arch.shadow.last_writeable_pte_smfn);
 115.914 +        mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
 115.915          int shtype = mfn_to_shadow_page(last_smfn)->type;
 115.916  
 115.917          if ( callbacks[shtype] ) 
 115.918 @@ -2431,7 +2133,7 @@ sh_remove_all_shadows_and_parents(struct
 115.919  static void sh_update_paging_modes(struct vcpu *v)
 115.920  {
 115.921      struct domain *d = v->domain;
 115.922 -    struct shadow_paging_mode *old_mode = v->arch.shadow.mode;
 115.923 +    struct paging_mode *old_mode = v->arch.paging.mode;
 115.924      mfn_t old_guest_table;
 115.925  
 115.926      ASSERT(shadow_locked_by_me(d));
 115.927 @@ -2446,8 +2148,8 @@ static void sh_update_paging_modes(struc
 115.928  
 115.929      // First, tear down any old shadow tables held by this vcpu.
 115.930      //
 115.931 -    if ( v->arch.shadow.mode )
 115.932 -        v->arch.shadow.mode->detach_old_tables(v);
 115.933 +    if ( v->arch.paging.mode )
 115.934 +        v->arch.paging.mode->shadow.detach_old_tables(v);
 115.935  
 115.936      if ( !is_hvm_domain(d) )
 115.937      {
 115.938 @@ -2456,17 +2158,17 @@ static void sh_update_paging_modes(struc
 115.939          ///
 115.940  #if CONFIG_PAGING_LEVELS == 4
 115.941          if ( pv_32bit_guest(v) )
 115.942 -            v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 115.943 +            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 115.944          else
 115.945 -            v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
 115.946 +            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
 115.947  #elif CONFIG_PAGING_LEVELS == 3
 115.948 -        v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 115.949 +        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 115.950  #elif CONFIG_PAGING_LEVELS == 2
 115.951 -        v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,2,2);
 115.952 +        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,2,2);
 115.953  #else
 115.954  #error unexpected paging mode
 115.955  #endif
 115.956 -        v->arch.shadow.translate_enabled = !!shadow_mode_translate(d);
 115.957 +        v->arch.paging.translate_enabled = !!shadow_mode_translate(d);
 115.958      }
 115.959      else
 115.960      {
 115.961 @@ -2476,8 +2178,8 @@ static void sh_update_paging_modes(struc
 115.962          ASSERT(shadow_mode_translate(d));
 115.963          ASSERT(shadow_mode_external(d));
 115.964  
 115.965 -        v->arch.shadow.translate_enabled = !!hvm_paging_enabled(v);
 115.966 -        if ( !v->arch.shadow.translate_enabled )
 115.967 +        v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
 115.968 +        if ( !v->arch.paging.translate_enabled )
 115.969          {
 115.970              /* Set v->arch.guest_table to use the p2m map, and choose
 115.971               * the appropriate shadow mode */
 115.972 @@ -2485,11 +2187,11 @@ static void sh_update_paging_modes(struc
 115.973  #if CONFIG_PAGING_LEVELS == 2
 115.974              v->arch.guest_table =
 115.975                  pagetable_from_pfn(pagetable_get_pfn(d->arch.phys_table));
 115.976 -            v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,2,2);
 115.977 +            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,2,2);
 115.978  #elif CONFIG_PAGING_LEVELS == 3 
 115.979              v->arch.guest_table =
 115.980                  pagetable_from_pfn(pagetable_get_pfn(d->arch.phys_table));
 115.981 -            v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 115.982 +            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 115.983  #else /* CONFIG_PAGING_LEVELS == 4 */
 115.984              { 
 115.985                  l4_pgentry_t *l4e; 
 115.986 @@ -2501,7 +2203,7 @@ static void sh_update_paging_modes(struc
 115.987                      pagetable_from_pfn(l4e_get_pfn(l4e[0]));
 115.988                  sh_unmap_domain_page(l4e);
 115.989              }
 115.990 -            v->arch.shadow.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 115.991 +            v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
 115.992  #endif
 115.993              /* Fix up refcounts on guest_table */
 115.994              get_page(mfn_to_page(pagetable_get_mfn(v->arch.guest_table)), d);
 115.995 @@ -2514,7 +2216,7 @@ static void sh_update_paging_modes(struc
 115.996              if ( hvm_long_mode_enabled(v) )
 115.997              {
 115.998                  // long mode guest...
 115.999 -                v->arch.shadow.mode =
115.1000 +                v->arch.paging.mode =
115.1001                      &SHADOW_INTERNAL_NAME(sh_paging_mode, 4, 4);
115.1002              }
115.1003              else
115.1004 @@ -2523,7 +2225,7 @@ static void sh_update_paging_modes(struc
115.1005                  {
115.1006  #if CONFIG_PAGING_LEVELS >= 3
115.1007                      // 32-bit PAE mode guest...
115.1008 -                    v->arch.shadow.mode =
115.1009 +                    v->arch.paging.mode =
115.1010                          &SHADOW_INTERNAL_NAME(sh_paging_mode, 3, 3);
115.1011  #else
115.1012                      SHADOW_ERROR("PAE not supported in 32-bit Xen\n");
115.1013 @@ -2535,10 +2237,10 @@ static void sh_update_paging_modes(struc
115.1014                  {
115.1015                      // 32-bit 2 level guest...
115.1016  #if CONFIG_PAGING_LEVELS >= 3
115.1017 -                    v->arch.shadow.mode =
115.1018 +                    v->arch.paging.mode =
115.1019                          &SHADOW_INTERNAL_NAME(sh_paging_mode, 3, 2);
115.1020  #else
115.1021 -                    v->arch.shadow.mode =
115.1022 +                    v->arch.paging.mode =
115.1023                          &SHADOW_INTERNAL_NAME(sh_paging_mode, 2, 2);
115.1024  #endif
115.1025                  }
115.1026 @@ -2546,25 +2248,25 @@ static void sh_update_paging_modes(struc
115.1027  
115.1028          if ( pagetable_is_null(v->arch.monitor_table) )
115.1029          {
115.1030 -            mfn_t mmfn = v->arch.shadow.mode->make_monitor_table(v);
115.1031 +            mfn_t mmfn = v->arch.paging.mode->shadow.make_monitor_table(v);
115.1032              v->arch.monitor_table = pagetable_from_mfn(mmfn);
115.1033              make_cr3(v, mfn_x(mmfn));
115.1034              hvm_update_host_cr3(v);
115.1035          }
115.1036  
115.1037 -        if ( v->arch.shadow.mode != old_mode )
115.1038 +        if ( v->arch.paging.mode != old_mode )
115.1039          {
115.1040              SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
115.1041                            "(was g=%u s=%u)\n",
115.1042                            d->domain_id, v->vcpu_id,
115.1043                            is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1,
115.1044 -                          v->arch.shadow.mode->guest_levels,
115.1045 -                          v->arch.shadow.mode->shadow_levels,
115.1046 +                          v->arch.paging.mode->guest_levels,
115.1047 +                          v->arch.paging.mode->shadow.shadow_levels,
115.1048                            old_mode ? old_mode->guest_levels : 0,
115.1049 -                          old_mode ? old_mode->shadow_levels : 0);
115.1050 +                          old_mode ? old_mode->shadow.shadow_levels : 0);
115.1051              if ( old_mode &&
115.1052 -                 (v->arch.shadow.mode->shadow_levels !=
115.1053 -                  old_mode->shadow_levels) )
115.1054 +                 (v->arch.paging.mode->shadow.shadow_levels !=
115.1055 +                  old_mode->shadow.shadow_levels) )
115.1056              {
115.1057                  /* Need to make a new monitor table for the new mode */
115.1058                  mfn_t new_mfn, old_mfn;
115.1059 @@ -2584,9 +2286,9 @@ static void sh_update_paging_modes(struc
115.1060  
115.1061                  old_mfn = pagetable_get_mfn(v->arch.monitor_table);
115.1062                  v->arch.monitor_table = pagetable_null();
115.1063 -                new_mfn = v->arch.shadow.mode->make_monitor_table(v);
115.1064 +                new_mfn = v->arch.paging.mode->shadow.make_monitor_table(v);
115.1065                  v->arch.monitor_table = pagetable_from_mfn(new_mfn);
115.1066 -                SHADOW_PRINTK("new monitor table %"SH_PRI_mfn "\n",
115.1067 +                SHADOW_PRINTK("new monitor table %"PRI_mfn "\n",
115.1068                                 mfn_x(new_mfn));
115.1069  
115.1070                  /* Don't be running on the old monitor table when we 
115.1071 @@ -2596,7 +2298,7 @@ static void sh_update_paging_modes(struc
115.1072                  if ( v == current )
115.1073                      write_ptbase(v);
115.1074                  hvm_update_host_cr3(v);
115.1075 -                old_mode->destroy_monitor_table(v, old_mfn);
115.1076 +                old_mode->shadow.destroy_monitor_table(v, old_mfn);
115.1077              }
115.1078          }
115.1079  
115.1080 @@ -2606,7 +2308,7 @@ static void sh_update_paging_modes(struc
115.1081          //        This *does* happen, at least for CR4.PGE...
115.1082      }
115.1083  
115.1084 -    v->arch.shadow.mode->update_cr3(v, 0);
115.1085 +    v->arch.paging.mode->update_cr3(v, 0);
115.1086  }
115.1087  
115.1088  void shadow_update_paging_modes(struct vcpu *v)
115.1089 @@ -2626,9 +2328,7 @@ static void sh_new_mode(struct domain *d
115.1090  
115.1091      ASSERT(shadow_locked_by_me(d));
115.1092      ASSERT(d != current->domain);
115.1093 -    d->arch.shadow.mode = new_mode;
115.1094 -    if ( new_mode & SHM2_translate ) 
115.1095 -        shadow_audit_p2m(d);
115.1096 +    d->arch.paging.mode = new_mode;
115.1097      for_each_vcpu(d, v)
115.1098          sh_update_paging_modes(v);
115.1099  }
115.1100 @@ -2642,75 +2342,75 @@ int shadow_enable(struct domain *d, u32 
115.1101      unsigned int old_pages;
115.1102      int rv = 0;
115.1103  
115.1104 -    mode |= SHM2_enable;
115.1105 +    mode |= PG_SH_enable;
115.1106  
115.1107      domain_pause(d);
115.1108 -    shadow_lock(d);
115.1109  
115.1110      /* Sanity check the arguments */
115.1111      if ( (d == current->domain) ||
115.1112           shadow_mode_enabled(d) ||
115.1113 -         ((mode & SHM2_translate) && !(mode & SHM2_refcounts)) ||
115.1114 -         ((mode & SHM2_external) && !(mode & SHM2_translate)) )
115.1115 +         ((mode & PG_translate) && !(mode & PG_refcounts)) ||
115.1116 +         ((mode & PG_external) && !(mode & PG_translate)) )
115.1117      {
115.1118          rv = -EINVAL;
115.1119 -        goto out;
115.1120 +        goto out_unlocked;
115.1121      }
115.1122  
115.1123 -    // XXX -- eventually would like to require that all memory be allocated
115.1124 -    // *after* shadow_enabled() is called...  So here, we would test to make
115.1125 -    // sure that d->page_list is empty.
115.1126 -#if 0
115.1127 -    spin_lock(&d->page_alloc_lock);
115.1128 -    if ( !list_empty(&d->page_list) )
115.1129 -    {
115.1130 -        spin_unlock(&d->page_alloc_lock);
115.1131 -        rv = -EINVAL;
115.1132 -        goto out;
115.1133 -    }
115.1134 -    spin_unlock(&d->page_alloc_lock);
115.1135 -#endif
115.1136 -
115.1137      /* Init the shadow memory allocation if the user hasn't done so */
115.1138 -    old_pages = d->arch.shadow.total_pages;
115.1139 +    old_pages = d->arch.paging.shadow.total_pages;
115.1140      if ( old_pages == 0 )
115.1141 -        if ( sh_set_allocation(d, 256, NULL) != 0 ) /* Use at least 1MB */
115.1142 +    {
115.1143 +        unsigned int r;
115.1144 +        shadow_lock(d);                
115.1145 +        r = sh_set_allocation(d, 256, NULL); /* Use at least 1MB */
115.1146 +        shadow_unlock(d);
115.1147 +        if ( r != 0 )
115.1148          {
115.1149              sh_set_allocation(d, 0, NULL);
115.1150              rv = -ENOMEM;
115.1151 -            goto out;
115.1152 -        }
115.1153 +            goto out_unlocked;
115.1154 +        }        
115.1155 +    }
115.1156 +
115.1157 +    /* Init the P2M table.  Must be done before we take the shadow lock 
115.1158 +     * to avoid possible deadlock. */
115.1159 +    if ( mode & PG_translate )
115.1160 +    {
115.1161 +        rv = p2m_alloc_table(d, shadow_alloc_p2m_page, shadow_free_p2m_page);
115.1162 +        if (rv != 0)
115.1163 +            goto out_unlocked;
115.1164 +    }
115.1165 +
115.1166 +    shadow_lock(d);
115.1167 +
115.1168 +    /* Sanity check again with the lock held */
115.1169 +    if ( shadow_mode_enabled(d) )
115.1170 +    {
115.1171 +        rv = -EINVAL;
115.1172 +        goto out_locked;
115.1173 +    }
115.1174  
115.1175      /* Init the hash table */
115.1176      if ( shadow_hash_alloc(d) != 0 )
115.1177      {
115.1178 -        sh_set_allocation(d, old_pages, NULL);            
115.1179          rv = -ENOMEM;
115.1180 -        goto out;
115.1181 +        goto out_locked;
115.1182      }
115.1183  
115.1184 -    /* Init the P2M table */
115.1185 -    if ( mode & SHM2_translate )
115.1186 -        if ( !shadow_alloc_p2m_table(d) )
115.1187 -        {
115.1188 -            shadow_hash_teardown(d);
115.1189 -            sh_set_allocation(d, old_pages, NULL);
115.1190 -            shadow_p2m_teardown(d);
115.1191 -            rv = -ENOMEM;
115.1192 -            goto out;
115.1193 -        }
115.1194 -
115.1195  #if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) 
115.1196      /* We assume we're dealing with an older 64bit linux guest until we 
115.1197       * see the guest use more than one l4 per vcpu. */
115.1198 -    d->arch.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
115.1199 +    d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
115.1200  #endif
115.1201  
115.1202      /* Update the bits */
115.1203      sh_new_mode(d, mode);
115.1204 -    shadow_audit_p2m(d);
115.1205 - out:
115.1206 +
115.1207 + out_locked:
115.1208      shadow_unlock(d);
115.1209 + out_unlocked:
115.1210 +    if ( rv != 0 && !pagetable_is_null(d->arch.phys_table) )
115.1211 +        p2m_teardown(d);
115.1212      domain_unpause(d);
115.1213      return rv;
115.1214  }
115.1215 @@ -2721,6 +2421,8 @@ void shadow_teardown(struct domain *d)
115.1216  {
115.1217      struct vcpu *v;
115.1218      mfn_t mfn;
115.1219 +    struct list_head *entry, *n;
115.1220 +    struct page_info *pg;
115.1221  
115.1222      ASSERT(test_bit(_DOMF_dying, &d->domain_flags));
115.1223      ASSERT(d != current->domain);
115.1224 @@ -2733,48 +2435,55 @@ void shadow_teardown(struct domain *d)
115.1225          /* Release the shadow and monitor tables held by each vcpu */
115.1226          for_each_vcpu(d, v)
115.1227          {
115.1228 -            if ( v->arch.shadow.mode )
115.1229 +            if ( v->arch.paging.mode )
115.1230              {
115.1231 -                v->arch.shadow.mode->detach_old_tables(v);
115.1232 +                v->arch.paging.mode->shadow.detach_old_tables(v);
115.1233                  if ( shadow_mode_external(d) )
115.1234                  {
115.1235                      mfn = pagetable_get_mfn(v->arch.monitor_table);
115.1236                      if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
115.1237 -                        v->arch.shadow.mode->destroy_monitor_table(v, mfn);
115.1238 +                        v->arch.paging.mode->shadow.destroy_monitor_table(v, mfn);
115.1239                      v->arch.monitor_table = pagetable_null();
115.1240                  }
115.1241              }
115.1242          }
115.1243      }
115.1244  
115.1245 -    if ( d->arch.shadow.total_pages != 0 )
115.1246 +    list_for_each_safe(entry, n, &d->arch.paging.shadow.p2m_freelist)
115.1247 +    {
115.1248 +        list_del(entry);
115.1249 +        pg = list_entry(entry, struct page_info, list);
115.1250 +        shadow_free_p2m_page(d, pg);
115.1251 +    }
115.1252 +
115.1253 +    if ( d->arch.paging.shadow.total_pages != 0 )
115.1254      {
115.1255          SHADOW_PRINTK("teardown of domain %u starts."
115.1256                         "  Shadow pages total = %u, free = %u, p2m=%u\n",
115.1257                         d->domain_id,
115.1258 -                       d->arch.shadow.total_pages, 
115.1259 -                       d->arch.shadow.free_pages, 
115.1260 -                       d->arch.shadow.p2m_pages);
115.1261 +                       d->arch.paging.shadow.total_pages, 
115.1262 +                       d->arch.paging.shadow.free_pages, 
115.1263 +                       d->arch.paging.shadow.p2m_pages);
115.1264          /* Destroy all the shadows and release memory to domheap */
115.1265          sh_set_allocation(d, 0, NULL);
115.1266          /* Release the hash table back to xenheap */
115.1267 -        if (d->arch.shadow.hash_table) 
115.1268 +        if (d->arch.paging.shadow.hash_table) 
115.1269              shadow_hash_teardown(d);
115.1270          /* Release the log-dirty bitmap of dirtied pages */
115.1271          sh_free_log_dirty_bitmap(d);
115.1272          /* Should not have any more memory held */
115.1273          SHADOW_PRINTK("teardown done."
115.1274                         "  Shadow pages total = %u, free = %u, p2m=%u\n",
115.1275 -                       d->arch.shadow.total_pages, 
115.1276 -                       d->arch.shadow.free_pages, 
115.1277 -                       d->arch.shadow.p2m_pages);
115.1278 -        ASSERT(d->arch.shadow.total_pages == 0);
115.1279 +                       d->arch.paging.shadow.total_pages, 
115.1280 +                       d->arch.paging.shadow.free_pages, 
115.1281 +                       d->arch.paging.shadow.p2m_pages);
115.1282 +        ASSERT(d->arch.paging.shadow.total_pages == 0);
115.1283      }
115.1284  
115.1285      /* We leave the "permanent" shadow modes enabled, but clear the
115.1286       * log-dirty mode bit.  We don't want any more mark_dirty()
115.1287       * calls now that we've torn down the bitmap */
115.1288 -    d->arch.shadow.mode &= ~SHM2_log_dirty;
115.1289 +    d->arch.paging.mode &= ~PG_log_dirty;
115.1290  
115.1291      shadow_unlock(d);
115.1292  }
115.1293 @@ -2782,30 +2491,28 @@ void shadow_teardown(struct domain *d)
115.1294  void shadow_final_teardown(struct domain *d)
115.1295  /* Called by arch_domain_destroy(), when it's safe to pull down the p2m map. */
115.1296  {
115.1297 -
115.1298      SHADOW_PRINTK("dom %u final teardown starts."
115.1299                     "  Shadow pages total = %u, free = %u, p2m=%u\n",
115.1300                     d->domain_id,
115.1301 -                   d->arch.shadow.total_pages, 
115.1302 -                   d->arch.shadow.free_pages, 
115.1303 -                   d->arch.shadow.p2m_pages);
115.1304 +                   d->arch.paging.shadow.total_pages, 
115.1305 +                   d->arch.paging.shadow.free_pages, 
115.1306 +                   d->arch.paging.shadow.p2m_pages);
115.1307  
115.1308      /* Double-check that the domain didn't have any shadow memory.  
115.1309       * It is possible for a domain that never got domain_kill()ed
115.1310       * to get here with its shadow allocation intact. */
115.1311 -    if ( d->arch.shadow.total_pages != 0 )
115.1312 +    if ( d->arch.paging.shadow.total_pages != 0 )
115.1313          shadow_teardown(d);
115.1314  
115.1315      /* It is now safe to pull down the p2m map. */
115.1316 -    if ( d->arch.shadow.p2m_pages != 0 )
115.1317 -        shadow_p2m_teardown(d);
115.1318 +    p2m_teardown(d);
115.1319  
115.1320      SHADOW_PRINTK("dom %u final teardown done."
115.1321                     "  Shadow pages total = %u, free = %u, p2m=%u\n",
115.1322                     d->domain_id,
115.1323 -                   d->arch.shadow.total_pages, 
115.1324 -                   d->arch.shadow.free_pages, 
115.1325 -                   d->arch.shadow.p2m_pages);
115.1326 +                   d->arch.paging.shadow.total_pages, 
115.1327 +                   d->arch.paging.shadow.free_pages, 
115.1328 +                   d->arch.paging.shadow.p2m_pages);
115.1329  }
115.1330  
115.1331  static int shadow_one_bit_enable(struct domain *d, u32 mode)
115.1332 @@ -2814,12 +2521,14 @@ static int shadow_one_bit_enable(struct 
115.1333      ASSERT(shadow_locked_by_me(d));
115.1334  
115.1335      /* Sanity check the call */
115.1336 -    if ( d == current->domain || (d->arch.shadow.mode & mode) )
115.1337 +    if ( d == current->domain || (d->arch.paging.mode & mode) )
115.1338      {
115.1339          return -EINVAL;
115.1340      }
115.1341  
115.1342 -    if ( d->arch.shadow.mode == 0 )
115.1343 +    mode |= PG_SH_enable;
115.1344 +
115.1345 +    if ( d->arch.paging.mode == 0 )
115.1346      {
115.1347          /* Init the shadow memory allocation and the hash table */
115.1348          if ( sh_set_allocation(d, 1, NULL) != 0 
115.1349 @@ -2831,7 +2540,7 @@ static int shadow_one_bit_enable(struct 
115.1350      }
115.1351  
115.1352      /* Update the bits */
115.1353 -