ia64/xen-unstable

changeset 13867:fbc233a1dc53

merge with xen-unstable.hg
author awilliam@xenbuild2.aw
date Wed Feb 07 10:46:18 2007 -0700 (2007-02-07)
parents d3f08d39e695 584ab4fd1ad5
children f9277e2548b5
files
line diff
     1.1 --- a/.hgignore	Wed Feb 07 10:14:41 2007 -0700
     1.2 +++ b/.hgignore	Wed Feb 07 10:46:18 2007 -0700
     1.3 @@ -107,6 +107,7 @@
     1.4  ^tools/firmware/rombios/BIOS-bochs-[^/]*$
     1.5  ^tools/firmware/rombios/_rombios[^/]*_\.c$
     1.6  ^tools/firmware/rombios/rombios[^/]*\.s$
     1.7 +^tools/firmware/rombios/32bit/32bitbios_flat\.h$
     1.8  ^tools/firmware/vmxassist/gen$
     1.9  ^tools/firmware/vmxassist/offsets\.h$
    1.10  ^tools/firmware/vmxassist/vmxassist$
     2.1 --- a/extras/mini-os/gnttab.c	Wed Feb 07 10:14:41 2007 -0700
     2.2 +++ b/extras/mini-os/gnttab.c	Wed Feb 07 10:46:18 2007 -0700
     2.3 @@ -21,7 +21,12 @@
     2.4  
     2.5  #define NR_RESERVED_ENTRIES 8
     2.6  
     2.7 +/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
     2.8 +#ifdef __ia64__
     2.9 +#define NR_GRANT_FRAMES 1
    2.10 +#else
    2.11  #define NR_GRANT_FRAMES 4
    2.12 +#endif
    2.13  #define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
    2.14  
    2.15  static grant_entry_t *gnttab_table;
     3.1 --- a/extras/mini-os/netfront.c	Wed Feb 07 10:14:41 2007 -0700
     3.2 +++ b/extras/mini-os/netfront.c	Wed Feb 07 10:46:18 2007 -0700
     3.3 @@ -349,7 +349,9 @@ done:
     3.4      init_rx_buffers();
     3.5  
     3.6      unsigned char rawmac[6];
     3.7 -    sscanf(mac,"%x:%x:%x:%x:%x:%x",
     3.8 +        /* Special conversion specifier 'hh' needed for __ia64__. Without
     3.9 +           this mini-os panics with 'Unaligned reference'. */
    3.10 +    sscanf(mac,"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
    3.11              &rawmac[0],
    3.12              &rawmac[1],
    3.13              &rawmac[2],
     4.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S	Wed Feb 07 10:14:41 2007 -0700
     4.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S	Wed Feb 07 10:46:18 2007 -0700
     4.3 @@ -747,7 +747,7 @@ ENTRY(hypervisor_callback)
     4.4  	jb   11f
     4.5  	cmpl $sysexit_ecrit,%eax
     4.6  	ja   11f
     4.7 -	addl $0x34,%esp			# Remove cs...ebx from stack frame.
     4.8 +	addl $OLDESP,%esp		# Remove eflags...ebx from stack frame.
     4.9  11:	push %esp
    4.10  	call evtchn_do_upcall
    4.11  	add  $4,%esp
    4.12 @@ -777,18 +777,13 @@ ecrit:  /**** END OF CRITICAL REGION ***
    4.13  # provides the number of bytes which have already been popped from the
    4.14  # interrupted stack frame.
    4.15  critical_region_fixup:
    4.16 -	addl $critical_fixup_table-scrit,%eax
    4.17 -	movzbl (%eax),%eax		# %eax contains num bytes popped
    4.18 -	cmpb $0xff,%al                  # 0xff => vcpu_info critical region
    4.19 +	movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
    4.20 +	cmpb $0xff,%cl                  # 0xff => vcpu_info critical region
    4.21  	jne  15f
    4.22 -	GET_THREAD_INFO(%ebp)
    4.23 -        xorl %eax,%eax
    4.24 -15:	mov  %esp,%esi
    4.25 -	add  %eax,%esi			# %esi points at end of src region
    4.26 -	mov  %esp,%edi
    4.27 -	add  $0x34,%edi			# %edi points at end of dst region
    4.28 -	mov  %eax,%ecx
    4.29 -	shr  $2,%ecx			# convert words to bytes
    4.30 +	xorl %ecx,%ecx
    4.31 +15:	leal (%esp,%ecx),%esi		# %esi points at end of src region
    4.32 +	leal OLDESP(%esp),%edi		# %edi points at end of dst region
    4.33 +	shrl $2,%ecx			# convert words to bytes
    4.34  	je   17f			# skip loop if nothing to copy
    4.35  16:	subl $4,%esi			# pre-decrementing copy loop
    4.36  	subl $4,%edi
    4.37 @@ -798,6 +793,7 @@ 16:	subl $4,%esi			# pre-decrementing co
    4.38  17:	movl %edi,%esp			# final %edi is top of merged stack
    4.39  	jmp  11b
    4.40  
    4.41 +.section .rodata,"a"
    4.42  critical_fixup_table:
    4.43  	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = __TEST_PENDING
    4.44  	.byte 0xff,0xff			# jnz  14f
    4.45 @@ -814,6 +810,7 @@ critical_fixup_table:
    4.46  	.byte 0x28			# iret
    4.47  	.byte 0xff,0xff,0xff,0xff	# movb $1,1(%esi)
    4.48  	.byte 0x00,0x00			# jmp  11b
    4.49 +.previous
    4.50  
    4.51  # Hypervisor uses this for application faults while it executes.
    4.52  # We get here for two reasons:
    4.53 @@ -1194,6 +1191,7 @@ ENTRY(fixup_4gb_segment)
    4.54  	jmp error_code
    4.55  
    4.56  .section .rodata,"a"
    4.57 +.align 4
    4.58  #include "syscall_table.S"
    4.59  
    4.60  syscall_table_size=(.-sys_call_table)
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Wed Feb 07 10:14:41 2007 -0700
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Wed Feb 07 10:46:18 2007 -0700
     5.3 @@ -424,7 +424,7 @@ static int bind_ipi_to_irq(unsigned int 
     5.4  static void unbind_from_irq(unsigned int irq)
     5.5  {
     5.6  	struct evtchn_close close;
     5.7 -	int evtchn = evtchn_from_irq(irq);
     5.8 +	int cpu, evtchn = evtchn_from_irq(irq);
     5.9  
    5.10  	spin_lock(&irq_mapping_update_lock);
    5.11  
    5.12 @@ -452,6 +452,10 @@ static void unbind_from_irq(unsigned int
    5.13  
    5.14  		evtchn_to_irq[evtchn] = -1;
    5.15  		irq_info[irq] = IRQ_UNBOUND;
    5.16 +
    5.17 +		/* Zap stats across IRQ changes of use. */
    5.18 +		for_each_possible_cpu(cpu)
    5.19 +			kstat_cpu(cpu).irqs[irq] = 0;
    5.20  	}
    5.21  
    5.22  	spin_unlock(&irq_mapping_update_lock);
     6.1 --- a/patches/linux-2.6.18/series	Wed Feb 07 10:14:41 2007 -0700
     6.2 +++ b/patches/linux-2.6.18/series	Wed Feb 07 10:46:18 2007 -0700
     6.3 @@ -18,3 +18,4 @@ git-dbaab49f92ff6ae6255762a948375e4036cb
     6.4  x86-elfnote-as-preprocessor-macro.patch
     6.5  fixaddr-top.patch
     6.6  git-c06cb8b1c4d25e5b4d7a2d7c2462619de1e0dbc4.patch
     6.7 +softlockup-no-idle-hz.patch
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/patches/linux-2.6.18/softlockup-no-idle-hz.patch	Wed Feb 07 10:46:18 2007 -0700
     7.3 @@ -0,0 +1,56 @@
     7.4 +diff -pruN ../orig-linux-2.6.18/include/linux/sched.h ./include/linux/sched.h
     7.5 +--- ../orig-linux-2.6.18/include/linux/sched.h	2006-09-20 04:42:06.000000000 +0100
     7.6 ++++ ./include/linux/sched.h	2007-02-07 01:10:24.000000000 +0000
     7.7 +@@ -211,10 +211,15 @@ extern void update_process_times(int use
     7.8 + extern void scheduler_tick(void);
     7.9 + 
    7.10 + #ifdef CONFIG_DETECT_SOFTLOCKUP
    7.11 ++extern unsigned long softlockup_get_next_event(void);
    7.12 + extern void softlockup_tick(void);
    7.13 + extern void spawn_softlockup_task(void);
    7.14 + extern void touch_softlockup_watchdog(void);
    7.15 + #else
    7.16 ++static inline unsigned long softlockup_get_next_event(void)
    7.17 ++{
    7.18 ++	return MAX_JIFFY_OFFSET;
    7.19 ++}
    7.20 + static inline void softlockup_tick(void)
    7.21 + {
    7.22 + }
    7.23 +diff -pruN ../orig-linux-2.6.18/kernel/softlockup.c ./kernel/softlockup.c
    7.24 +--- ../orig-linux-2.6.18/kernel/softlockup.c	2006-09-20 04:42:06.000000000 +0100
    7.25 ++++ ./kernel/softlockup.c	2007-02-07 01:53:22.000000000 +0000
    7.26 +@@ -40,6 +40,19 @@ void touch_softlockup_watchdog(void)
    7.27 + }
    7.28 + EXPORT_SYMBOL(touch_softlockup_watchdog);
    7.29 + 
    7.30 ++unsigned long softlockup_get_next_event(void)
    7.31 ++{
    7.32 ++	int this_cpu = smp_processor_id();
    7.33 ++	unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
    7.34 ++
    7.35 ++	if (per_cpu(print_timestamp, this_cpu) == touch_timestamp ||
    7.36 ++		did_panic ||
    7.37 ++			!per_cpu(watchdog_task, this_cpu))
    7.38 ++		return MAX_JIFFY_OFFSET;
    7.39 ++
    7.40 ++	return min_t(long, 0, touch_timestamp + HZ - jiffies);
    7.41 ++}
    7.42 ++
    7.43 + /*
    7.44 +  * This callback runs from the timer interrupt, and checks
    7.45 +  * whether the watchdog thread has hung or not:
    7.46 +diff -pruN ../orig-linux-2.6.18/kernel/timer.c ./kernel/timer.c
    7.47 +--- ../orig-linux-2.6.18/kernel/timer.c	2006-09-20 04:42:06.000000000 +0100
    7.48 ++++ ./kernel/timer.c	2007-02-07 01:29:34.000000000 +0000
    7.49 +@@ -485,7 +485,9 @@ unsigned long next_timer_interrupt(void)
    7.50 + 		if (hr_expires < 3)
    7.51 + 			return hr_expires + jiffies;
    7.52 + 	}
    7.53 +-	hr_expires += jiffies;
    7.54 ++	hr_expires = min_t(unsigned long,
    7.55 ++			   softlockup_get_next_event(),
    7.56 ++			   hr_expires) + jiffies;
    7.57 + 
    7.58 + 	base = __get_cpu_var(tvec_bases);
    7.59 + 	spin_lock(&base->lock);
     8.1 --- a/tools/firmware/rombios/32bit/tcgbios/tcgbios.c	Wed Feb 07 10:14:41 2007 -0700
     8.2 +++ b/tools/firmware/rombios/32bit/tcgbios/tcgbios.c	Wed Feb 07 10:46:18 2007 -0700
     8.3 @@ -146,7 +146,7 @@ static tcpa_acpi_t tcpa_acpi;
     8.4  static int tpm_driver_to_use = TPM_INVALID_DRIVER;
     8.5  
     8.6  static
     8.7 -uint32_t MA_IsTPMPresent()
     8.8 +uint32_t MA_IsTPMPresent(void)
     8.9  {
    8.10  	uint32_t rc = 0;
    8.11  	unsigned int i;
    8.12 @@ -263,11 +263,11 @@ void tcpa_acpi_init(void)
    8.13  {
    8.14  	struct acpi_20_rsdt *rsdt;
    8.15  	uint32_t length;
    8.16 -	struct acpi_20_tcpa *tcpa;
    8.17 +	struct acpi_20_tcpa *tcpa = (void *)0;
    8.18  	uint16_t found = 0;
    8.19  	uint16_t rsdp_off;
    8.20  	uint16_t off;
    8.21 -	struct acpi_20_rsdp *rsdp;
    8.22 +	struct acpi_20_rsdp *rsdp = (void *)0;
    8.23  
    8.24  	if (MA_IsTPMPresent() == 0) {
    8.25  		return;
    8.26 @@ -732,8 +732,8 @@ void tcpa_ipl(Bit32u seg)
    8.27  void tcpa_measure_post(Bit32u from, Bit32u to)
    8.28  {
    8.29  	struct pcpes pcpes; /* PCClientPCREventStruc */
    8.30 +	int len = to - from;
    8.31  	memset(&pcpes, 0x0, sizeof(pcpes));
    8.32 -	int len = to - from;
    8.33  
    8.34  	if (len > 0) {
    8.35  		sha1((unsigned char *)from,
    8.36 @@ -986,7 +986,7 @@ uint32_t PassThroughToTPM32(struct pttti
    8.37  {
    8.38  	uint32_t rc = 0;
    8.39  	uint8_t *cmd32;
    8.40 -	uint32_t resbuflen;
    8.41 +	uint32_t resbuflen = 0;
    8.42  
    8.43  	if (TCG_IsShutdownPreBootInterface() != 0) {
    8.44  		rc = (TCG_PC_TPMERROR |
    8.45 @@ -1277,9 +1277,7 @@ typedef struct _sha1_ctx {
    8.46  } sha1_ctx;
    8.47  
    8.48  
    8.49 -static inline uint32_t rol(val, rol)
    8.50 -  uint32_t val;
    8.51 -  uint16_t rol;
    8.52 +static inline uint32_t rol(uint32_t val, uint16_t rol)
    8.53  {
    8.54  	return (val << rol) | (val >> (32 - rol));
    8.55  }
     9.1 --- a/tools/firmware/rombios/32bit/tcgbios/tpm_drivers.c	Wed Feb 07 10:14:41 2007 -0700
     9.2 +++ b/tools/firmware/rombios/32bit/tcgbios/tpm_drivers.c	Wed Feb 07 10:46:18 2007 -0700
     9.3 @@ -27,12 +27,27 @@
     9.4  #include "tpm_drivers.h"
     9.5  #include "tcgbios.h"
     9.6  
     9.7 +#define STS_VALID                    (1 << 7) /* 0x80 */
     9.8 +#define STS_COMMAND_READY            (1 << 6) /* 0x40 */
     9.9 +#define STS_TPM_GO                   (1 << 5) /* 0x20 */
    9.10 +#define STS_DATA_AVAILABLE           (1 << 4) /* 0x10 */
    9.11 +#define STS_EXPECT                   (1 << 3) /* 0x08 */
    9.12 +#define STS_RESPONSE_RETRY           (1 << 1) /* 0x02 */
    9.13 +
    9.14 +#define ACCESS_TPM_REG_VALID_STS     (1 << 7) /* 0x80 */
    9.15 +#define ACCESS_ACTIVE_LOCALITY       (1 << 5) /* 0x20 */
    9.16 +#define ACCESS_BEEN_SEIZED           (1 << 4) /* 0x10 */
    9.17 +#define ACCESS_SEIZE                 (1 << 3) /* 0x08 */
    9.18 +#define ACCESS_PENDING_REQUEST       (1 << 2) /* 0x04 */
    9.19 +#define ACCESS_REQUEST_USE           (1 << 1) /* 0x02 */
    9.20 +#define ACCESS_TPM_ESTABLISHMENT     (1 << 0) /* 0x01 */
    9.21 +
    9.22  static uint32_t tis_wait_sts(uint8_t *addr, uint32_t time,
    9.23                               uint8_t mask, uint8_t expect)
    9.24  {
    9.25  	uint32_t rc = 0;
    9.26  	while (time > 0) {
    9.27 -		uint8_t sts = addr[TPM_STS];
    9.28 +		uint8_t sts = mmio_readb(&addr[TPM_STS]);
    9.29  		if ((sts & mask) == expect) {
    9.30  			rc = 1;
    9.31  			break;
    9.32 @@ -45,16 +60,17 @@ static uint32_t tis_wait_sts(uint8_t *ad
    9.33  
    9.34  static uint32_t tis_activate(uint32_t baseaddr)
    9.35  {
    9.36 -	uint32_t rc = 0;
    9.37 +	uint32_t rc = 1;
    9.38  	uint8_t *tis_addr = (uint8_t*)baseaddr;
    9.39  	uint8_t acc;
    9.40  	/* request access to locality */
    9.41 -	tis_addr[TPM_ACCESS] = 0x2;
    9.42 +	tis_addr[TPM_ACCESS] = ACCESS_REQUEST_USE;
    9.43  
    9.44 -	acc = tis_addr[TPM_ACCESS];
    9.45 -	if ((acc & 0x20) != 0) {
    9.46 -		tis_addr[TPM_STS] = 0x40;
    9.47 -		rc = tis_wait_sts(tis_addr, 100, 0x40, 0x40);
    9.48 +	acc = mmio_readb(&tis_addr[TPM_ACCESS]);
    9.49 +	if ((acc & ACCESS_ACTIVE_LOCALITY) != 0) {
    9.50 +		tis_addr[TPM_STS] = STS_COMMAND_READY;
    9.51 +		rc = tis_wait_sts(tis_addr, 100,
    9.52 +		                  STS_COMMAND_READY, STS_COMMAND_READY);
    9.53  	}
    9.54  	return rc;
    9.55  }
    9.56 @@ -64,8 +80,8 @@ uint32_t tis_ready(uint32_t baseaddr)
    9.57  	uint32_t rc = 0;
    9.58  	uint8_t *tis_addr = (uint8_t*)baseaddr;
    9.59  
    9.60 -	tis_addr[TPM_STS] = 0x40;
    9.61 -	rc = tis_wait_sts(tis_addr, 100, 0x40, 0x40);
    9.62 +	tis_addr[TPM_STS] = STS_COMMAND_READY;
    9.63 +	rc = tis_wait_sts(tis_addr, 100, STS_COMMAND_READY, STS_COMMAND_READY);
    9.64  
    9.65  	return rc;
    9.66  }
    9.67 @@ -81,8 +97,7 @@ uint32_t tis_senddata(uint32_t baseaddr,
    9.68  		uint16_t burst = 0;
    9.69  		uint32_t ctr = 0;
    9.70  		while (burst == 0 && ctr < 2000) {
    9.71 -			burst = (((uint16_t)tis_addr[TPM_STS+1])     ) +
    9.72 -			        (((uint16_t)tis_addr[TPM_STS+2]) << 8);
    9.73 +			burst = mmio_readw((uint16_t *)&tis_addr[TPM_STS+1]);
    9.74  			if (burst == 0) {
    9.75  				mssleep(1);
    9.76  				ctr++;
    9.77 @@ -120,11 +135,11 @@ uint32_t tis_readresp(uint32_t baseaddr,
    9.78  	uint32_t sts;
    9.79  
    9.80  	while (offset < len) {
    9.81 -		buffer[offset] = tis_addr[TPM_DATA_FIFO];
    9.82 +		buffer[offset] = mmio_readb(&tis_addr[TPM_DATA_FIFO]);
    9.83  		offset++;
    9.84 -		sts = tis_addr[TPM_STS];
    9.85 +		sts = mmio_readb(&tis_addr[TPM_STS]);
    9.86  		/* data left ? */
    9.87 -		if ((sts & 0x10) == 0) {
    9.88 +		if ((sts & STS_DATA_AVAILABLE) == 0) {
    9.89  			break;
    9.90  		}
    9.91  	}
    9.92 @@ -136,7 +151,7 @@ uint32_t tis_waitdatavalid(uint32_t base
    9.93  {
    9.94  	uint8_t *tis_addr = (uint8_t*)baseaddr;
    9.95  	uint32_t rc = 0;
    9.96 -	if (tis_wait_sts(tis_addr, 1000, 0x80, 0x80) == 0) {
    9.97 +	if (tis_wait_sts(tis_addr, 1000, STS_VALID, STS_VALID) == 0) {
    9.98  		rc = TCG_NO_RESPONSE;
    9.99  	}
   9.100  	return rc;
   9.101 @@ -146,8 +161,9 @@ uint32_t tis_waitrespready(uint32_t base
   9.102  {
   9.103  	uint32_t rc = 0;
   9.104  	uint8_t *tis_addr = (uint8_t*)baseaddr;
   9.105 -	tis_addr[TPM_STS] = 0x20;
   9.106 -	if (tis_wait_sts(tis_addr, timeout, 0x10, 0x10) == 0) {
   9.107 +	tis_addr[TPM_STS] = STS_TPM_GO;
   9.108 +	if (tis_wait_sts(tis_addr, timeout,
   9.109 +	                 STS_DATA_AVAILABLE, STS_DATA_AVAILABLE) == 0) {
   9.110  		rc = TCG_NO_RESPONSE;
   9.111  	}
   9.112  	return rc;
   9.113 @@ -158,7 +174,7 @@ uint32_t tis_probe(uint32_t baseaddr)
   9.114  {
   9.115  	uint32_t rc = 0;
   9.116  	uint8_t *tis_addr = (uint8_t*)baseaddr;
   9.117 -	uint32_t didvid = *(uint32_t*)&tis_addr[TPM_DID_VID];
   9.118 +	uint32_t didvid = mmio_readl((uint32_t *)&tis_addr[TPM_DID_VID]);
   9.119  	if ((didvid != 0) && (didvid != 0xffffffff)) {
   9.120  		rc = 1;
   9.121  	}
    10.1 --- a/tools/firmware/rombios/32bit/util.h	Wed Feb 07 10:14:41 2007 -0700
    10.2 +++ b/tools/firmware/rombios/32bit/util.h	Wed Feb 07 10:46:18 2007 -0700
    10.3 @@ -24,5 +24,20 @@ void byte_to_hex(char *digits, uint8_t b
    10.4  void uuid_to_string(char *dest, uint8_t *uuid);
    10.5  int printf(const char *fmt, ...);
    10.6  
    10.7 +static inline uint8_t mmio_readb(uint8_t *addr)
    10.8 +{
    10.9 +	return *(volatile uint8_t *)addr;
   10.10 +}
   10.11 +
   10.12 +static inline uint16_t mmio_readw(uint16_t *addr)
   10.13 +{
   10.14 +	return *(volatile uint16_t *)addr;
   10.15 +}
   10.16 +
   10.17 +static inline uint32_t mmio_readl(uint32_t *addr)
   10.18 +{
   10.19 +	return *(volatile uint32_t *)addr;
   10.20 +}
   10.21 +
   10.22  
   10.23  #endif
    11.1 --- a/tools/firmware/rombios/rombios.c	Wed Feb 07 10:14:41 2007 -0700
    11.2 +++ b/tools/firmware/rombios/rombios.c	Wed Feb 07 10:46:18 2007 -0700
    11.3 @@ -5722,9 +5722,6 @@ int13_cdemu(DS, ES, DI, SI, BP, SP, BX, 
    11.4      goto int13_fail;
    11.5      }
    11.6  
    11.7 -#if BX_TCGBIOS
    11.8 -  tcpa_ipl((Bit32u)bootseg);               /* specs: 8.2.3 steps 4 and 5 */
    11.9 -#endif
   11.10    
   11.11    switch (GET_AH()) {
   11.12  
   11.13 @@ -7741,6 +7738,10 @@ ASM_END
   11.14        }
   11.15      }
   11.16  
   11.17 +#if BX_TCGBIOS
   11.18 +    tcpa_add_bootdevice((Bit32u)0L, (Bit32u)bootdrv);
   11.19 +#endif
   11.20 +
   11.21      /* Canonicalize bootseg:bootip */
   11.22      bootip = (bootseg & 0x0fff) << 4;
   11.23      bootseg &= 0xf000;
   11.24 @@ -7760,6 +7761,9 @@ ASM_END
   11.25      bootdrv = (Bit8u)(status>>8);
   11.26      bootseg = read_word(ebda_seg,&EbdaData->cdemu.load_segment);
   11.27      /* Canonicalize bootseg:bootip */
   11.28 +#if BX_TCGBIOS
   11.29 +    tcpa_add_bootdevice((Bit32u)1L, (Bit32u)0L);
   11.30 +#endif
   11.31      bootip = (bootseg & 0x0fff) << 4;
   11.32      bootseg &= 0xf000;
   11.33      break;
   11.34 @@ -7773,6 +7777,9 @@ ASM_END
   11.35    default: return;
   11.36    }
   11.37  
   11.38 +#if BX_TCGBIOS
   11.39 +  tcpa_ipl((Bit32u)bootseg);               /* specs: 8.2.3 steps 4 and 5 */
   11.40 +#endif
   11.41    /* Debugging info */
   11.42    printf("Booting from %x:%x\n", bootseg, bootip);
   11.43    
    12.1 --- a/tools/ioemu/hw/cirrus_vga.c	Wed Feb 07 10:14:41 2007 -0700
    12.2 +++ b/tools/ioemu/hw/cirrus_vga.c	Wed Feb 07 10:46:18 2007 -0700
    12.3 @@ -2571,7 +2571,8 @@ static void *set_vram_mapping(unsigned l
    12.4      return vram_pointer;
    12.5  }
    12.6  
    12.7 -static int unset_vram_mapping(unsigned long begin, unsigned long end)
    12.8 +static int unset_vram_mapping(unsigned long begin, unsigned long end, 
    12.9 +                              void *mapping)
   12.10  {
   12.11      xen_pfn_t *extent_start = NULL;
   12.12      unsigned long nr_extents;
   12.13 @@ -2591,11 +2592,13 @@ static int unset_vram_mapping(unsigned l
   12.14          return -1;
   12.15      }
   12.16  
   12.17 +    /* Drop our own references to the vram pages */
   12.18 +    munmap(mapping, nr_extents * TARGET_PAGE_SIZE);
   12.19 +
   12.20 +    /* Now drop the guest's mappings */
   12.21      memset(extent_start, 0, sizeof(xen_pfn_t) * nr_extents);
   12.22 -
   12.23      for (i = 0; i < nr_extents; i++)
   12.24          extent_start[i] = (begin + (i * TARGET_PAGE_SIZE)) >> TARGET_PAGE_BITS;
   12.25 -
   12.26      unset_mm_mapping(xc_handle, domid, nr_extents, 0, extent_start);
   12.27  
   12.28      free(extent_start);
   12.29 @@ -2642,16 +2645,14 @@ static void cirrus_update_memory_access(
   12.30          } else {
   12.31          generic_io:
   12.32              if (s->cirrus_lfb_addr && s->cirrus_lfb_end && s->map_addr) {
   12.33 -		int error;
   12.34 -                void *old_vram = NULL;
   12.35 -
   12.36 -		error = unset_vram_mapping(s->cirrus_lfb_addr,
   12.37 -					   s->cirrus_lfb_end);
   12.38 -		if (!error)
   12.39 -		    old_vram = vga_update_vram((VGAState *)s, NULL,
   12.40 -                                               VGA_RAM_SIZE);
   12.41 -                if (old_vram)
   12.42 -                    munmap(old_vram, s->map_addr - s->map_end);
   12.43 +                void *old_vram;
   12.44 +
   12.45 +                old_vram = vga_update_vram((VGAState *)s, NULL, VGA_RAM_SIZE);
   12.46 +
   12.47 +                unset_vram_mapping(s->cirrus_lfb_addr,
   12.48 +                                   s->cirrus_lfb_end, 
   12.49 +                                   old_vram);
   12.50 +
   12.51                  s->map_addr = s->map_end = 0;
   12.52              }
   12.53              s->cirrus_linear_write[0] = cirrus_linear_writeb;
   12.54 @@ -3016,10 +3017,8 @@ void cirrus_stop_acc(CirrusVGAState *s)
   12.55          int error;
   12.56          s->map_addr = 0;
   12.57          error = unset_vram_mapping(s->cirrus_lfb_addr,
   12.58 -                s->cirrus_lfb_end);
   12.59 +                s->cirrus_lfb_end, s->vram_ptr);
   12.60          fprintf(stderr, "cirrus_stop_acc:unset_vram_mapping.\n");
   12.61 -
   12.62 -        munmap(s->vram_ptr, VGA_RAM_SIZE);
   12.63      }
   12.64  }
   12.65  
    13.1 --- a/tools/ioemu/hw/tpm_tis.c	Wed Feb 07 10:14:41 2007 -0700
    13.2 +++ b/tools/ioemu/hw/tpm_tis.c	Wed Feb 07 10:46:18 2007 -0700
    13.3 @@ -517,7 +517,7 @@ static uint32_t tis_mem_readl(void *opaq
    13.4  
    13.5  #ifdef DEBUG_TPM
    13.6      fprintf(logfile," read(%08x) = %08x\n",
    13.7 -            addr,
    13.8 +            (int)addr,
    13.9              val);
   13.10  #endif
   13.11  
   13.12 @@ -538,7 +538,7 @@ static void tis_mem_writel(void *opaque,
   13.13  
   13.14  #ifdef DEBUG_TPM
   13.15      fprintf(logfile,"write(%08x) = %08x\n",
   13.16 -            addr,
   13.17 +            (int)addr,
   13.18              val);
   13.19  #endif
   13.20  
   13.21 @@ -757,10 +757,11 @@ static CPUWriteMemoryFunc *tis_writefn[3
   13.22  static void tpm_save(QEMUFile* f,void* opaque)
   13.23  {
   13.24      tpmState* s=(tpmState*)opaque;
   13.25 +    uint8_t locty = s->active_loc;
   13.26      int c;
   13.27  
   13.28      /* need to wait for outstanding requests to complete */
   13.29 -    if (IS_COMM_WITH_VTPM(s)) {
   13.30 +    if (s->loc[locty].state == STATE_EXECUTION) {
   13.31          int repeats = 30; /* 30 seconds; really should be infty */
   13.32          while (repeats > 0 &&
   13.33                 !(s->loc[s->active_loc].sts & STS_DATA_AVAILABLE)) {
   13.34 @@ -777,6 +778,10 @@ static void tpm_save(QEMUFile* f,void* o
   13.35          }
   13.36      }
   13.37  
   13.38 +    if (IS_COMM_WITH_VTPM(s)) {
   13.39 +        close_vtpm_channel(s, 1);
   13.40 +    }
   13.41 +
   13.42      qemu_put_be32s(f,&s->offset);
   13.43      qemu_put_buffer(f, s->buffer.buf, TPM_MAX_PKT);
   13.44      qemu_put_8s(f, &s->active_loc);
   13.45 @@ -993,7 +998,7 @@ static int TPM_Receive(tpmState *s, tpmB
   13.46          uint32_t size = tpm_get_size_from_buffer(buffer->buf);
   13.47          if (size + sizeof(buffer->instance) != off) {
   13.48              fprintf(logfile,"TPM: Packet size is bad! %d != %d\n",
   13.49 -                    size + sizeof(buffer->instance),
   13.50 +                    (int)(size + sizeof(buffer->instance)),
   13.51                      off);
   13.52          } else {
   13.53              uint32_t ret;
    14.1 --- a/tools/libxc/xc_domain.c	Wed Feb 07 10:14:41 2007 -0700
    14.2 +++ b/tools/libxc/xc_domain.c	Wed Feb 07 10:46:18 2007 -0700
    14.3 @@ -252,12 +252,14 @@ int xc_domain_hvm_getcontext(int xc_hand
    14.4      domctl.u.hvmcontext.size = size;
    14.5      set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
    14.6  
    14.7 -    if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
    14.8 -        return ret;
    14.9 +    if ( ctxt_buf ) 
   14.10 +        if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
   14.11 +            return ret;
   14.12  
   14.13      ret = do_domctl(xc_handle, &domctl);
   14.14  
   14.15 -    unlock_pages(ctxt_buf, size);
   14.16 +    if ( ctxt_buf ) 
   14.17 +        unlock_pages(ctxt_buf, size);
   14.18  
   14.19      return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
   14.20  }
    15.1 --- a/tools/libxc/xc_hvm_save.c	Wed Feb 07 10:14:41 2007 -0700
    15.2 +++ b/tools/libxc/xc_hvm_save.c	Wed Feb 07 10:46:18 2007 -0700
    15.3 @@ -33,12 +33,6 @@
    15.4  #include "xg_save_restore.h"
    15.5  
    15.6  /*
    15.7 - * Size of a buffer big enough to take the HVM state of a domain.
    15.8 - * Ought to calculate this a bit more carefully, or maybe ask Xen.
    15.9 - */
   15.10 -#define HVM_CTXT_SIZE 8192
   15.11 -
   15.12 -/*
   15.13  ** Default values for important tuning parameters. Can override by passing
   15.14  ** non-zero replacement values to xc_hvm_save().
   15.15  **
   15.16 @@ -286,6 +280,7 @@ int xc_hvm_save(int xc_handle, int io_fd
   15.17      unsigned long *pfn_batch = NULL;
   15.18  
   15.19      /* A copy of hvm domain context buffer*/
   15.20 +    uint32_t hvm_buf_size;
   15.21      uint8_t *hvm_buf = NULL;
   15.22  
   15.23      /* Live mapping of shared info structure */
   15.24 @@ -431,9 +426,15 @@ int xc_hvm_save(int xc_handle, int io_fd
   15.25  
   15.26      page_array = (unsigned long *) malloc( sizeof(unsigned long) * max_pfn);
   15.27  
   15.28 -    hvm_buf = malloc(HVM_CTXT_SIZE);
   15.29 +    hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
   15.30 +    if ( hvm_buf_size == -1 )
   15.31 +    {
   15.32 +        ERROR("Couldn't get HVM context size from Xen");
   15.33 +        goto out;
   15.34 +    }
   15.35 +    hvm_buf = malloc(hvm_buf_size);
   15.36  
   15.37 -    if (!to_send ||!to_skip ||!page_array ||!hvm_buf ) {
   15.38 +    if (!to_send ||!to_skip ||!page_array ||!hvm_buf) {
   15.39          ERROR("Couldn't allocate memory");
   15.40          goto out;
   15.41      }
   15.42 @@ -661,7 +662,7 @@ int xc_hvm_save(int xc_handle, int io_fd
   15.43      }
   15.44  
   15.45      if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, 
   15.46 -                                              HVM_CTXT_SIZE)) == -1) {
   15.47 +                                              hvm_buf_size)) == -1) {
   15.48          ERROR("HVM:Could not get hvm buffer");
   15.49          goto out;
   15.50      }
    16.1 --- a/tools/python/xen/xend/XendLogging.py	Wed Feb 07 10:14:41 2007 -0700
    16.2 +++ b/tools/python/xen/xend/XendLogging.py	Wed Feb 07 10:46:18 2007 -0700
    16.3 @@ -52,8 +52,8 @@ if 'TRACE' not in logging.__dict__:
    16.4          for frame in frames:
    16.5              filename = os.path.normcase(frame[1])
    16.6              if filename != thisfile and filename != logging._srcfile:
    16.7 -                major, minor, _, _, _ = sys.version_info
    16.8 -                if major == 2 and minor >= 4:
    16.9 +                major, minor, micro, _, _ = sys.version_info
   16.10 +                if (major, minor, micro) >= (2, 4, 2):
   16.11                      return filename, frame[2], frame[3]
   16.12                  else:
   16.13                      return filename, frame[2]
    17.1 --- a/tools/python/xen/xm/main.py	Wed Feb 07 10:14:41 2007 -0700
    17.2 +++ b/tools/python/xen/xm/main.py	Wed Feb 07 10:46:18 2007 -0700
    17.3 @@ -1144,6 +1144,9 @@ def xm_sched_sedf(args):
    17.4      doms = filter(lambda x : domid_match(domid, x),
    17.5                          [parse_doms_info(dom)
    17.6                           for dom in getDomains(None, 'running')])
    17.7 +    if domid is not None and doms == []: 
    17.8 +        err("Domain '%s' does not exist." % domid)
    17.9 +        usage('sched-sedf')
   17.10  
   17.11      # print header if we aren't setting any parameters
   17.12      if len(opts.keys()) == 0:
   17.13 @@ -1207,6 +1210,9 @@ def xm_sched_credit(args):
   17.14                    for dom in getDomains(None, 'running')])
   17.15  
   17.16      if weight is None and cap is None:
   17.17 +        if domid is not None and doms == []: 
   17.18 +            err("Domain '%s' does not exist." % domid)
   17.19 +            usage('sched-credit')
   17.20          # print header if we aren't setting any parameters
   17.21          print '%-33s %-2s %-6s %-4s' % ('Name','ID','Weight','Cap')
   17.22          
    18.1 --- a/tools/python/xen/xm/opts.py	Wed Feb 07 10:14:41 2007 -0700
    18.2 +++ b/tools/python/xen/xm/opts.py	Wed Feb 07 10:46:18 2007 -0700
    18.3 @@ -250,7 +250,8 @@ class OptVar(Opt):
    18.4  class OptVals:
    18.5      """Class to hold option values.
    18.6      """
    18.7 -    pass
    18.8 +    def __init__(self):
    18.9 +        self.quiet = False
   18.10  
   18.11  class Opts:
   18.12      """Container for options.
   18.13 @@ -276,7 +277,6 @@ class Opts:
   18.14          self.argv = []
   18.15          # Option values.
   18.16          self.vals = OptVals()
   18.17 -        self.vals.quiet = 0
   18.18          # Variables for default scripts.
   18.19          self.vars = {}
   18.20          # Option to use for bare words.
    19.1 --- a/tools/xentrace/xentrace_format	Wed Feb 07 10:14:41 2007 -0700
    19.2 +++ b/tools/xentrace/xentrace_format	Wed Feb 07 10:46:18 2007 -0700
    19.3 @@ -107,6 +107,9 @@ while not interrupted:
    19.4  
    19.5          (tsc, event, d1, d2, d3, d4, d5) = struct.unpack(TRCREC, line)
    19.6  
    19.7 +        # Event field is 'uint32_t', not 'long'.
    19.8 +        event &= 0xffffffff
    19.9 +
   19.10  	#tsc = (tscH<<32) | tscL
   19.11  
   19.12  	#print i, tsc
    20.1 --- a/xen/arch/x86/domctl.c	Wed Feb 07 10:14:41 2007 -0700
    20.2 +++ b/xen/arch/x86/domctl.c	Wed Feb 07 10:46:18 2007 -0700
    20.3 @@ -326,10 +326,6 @@ long arch_do_domctl(
    20.4          struct hvm_domain_context c;
    20.5          struct domain             *d;
    20.6  
    20.7 -        c.cur = 0;
    20.8 -        c.size = domctl->u.hvmcontext.size;
    20.9 -        c.data = NULL;
   20.10 -
   20.11          ret = -ESRCH;
   20.12          if ( (d = get_domain_by_id(domctl->domain)) == NULL )
   20.13              break;
   20.14 @@ -338,19 +334,38 @@ long arch_do_domctl(
   20.15          if ( !is_hvm_domain(d) ) 
   20.16              goto gethvmcontext_out;
   20.17  
   20.18 +        c.cur = 0;
   20.19 +        c.size = hvm_save_size(d);
   20.20 +        c.data = NULL;
   20.21 +
   20.22 +        if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
   20.23 +        {
   20.24 +            /* Client is querying for the correct buffer size */
   20.25 +            domctl->u.hvmcontext.size = c.size;
   20.26 +            ret = 0;
   20.27 +            goto gethvmcontext_out;            
   20.28 +        }
   20.29 +
   20.30 +        /* Check that the client has a big enough buffer */
   20.31 +        ret = -ENOSPC;
   20.32 +        if ( domctl->u.hvmcontext.size < c.size ) 
   20.33 +            goto gethvmcontext_out;
   20.34 +
   20.35 +        /* Allocate our own marshalling buffer */
   20.36          ret = -ENOMEM;
   20.37          if ( (c.data = xmalloc_bytes(c.size)) == NULL )
   20.38              goto gethvmcontext_out;
   20.39  
   20.40          ret = hvm_save(d, &c);
   20.41  
   20.42 +        domctl->u.hvmcontext.size = c.cur;
   20.43          if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
   20.44              ret = -EFAULT;
   20.45  
   20.46 +    gethvmcontext_out:
   20.47          if ( copy_to_guest(u_domctl, domctl, 1) )
   20.48              ret = -EFAULT;
   20.49  
   20.50 -    gethvmcontext_out:
   20.51          if ( c.data != NULL )
   20.52              xfree(c.data);
   20.53  
    21.1 --- a/xen/arch/x86/hvm/Makefile	Wed Feb 07 10:14:41 2007 -0700
    21.2 +++ b/xen/arch/x86/hvm/Makefile	Wed Feb 07 10:46:18 2007 -0700
    21.3 @@ -15,3 +15,4 @@ obj-y += vpt.o
    21.4  obj-y += vioapic.o
    21.5  obj-y += vlapic.o
    21.6  obj-y += vpic.o
    21.7 +obj-y += save.o
    22.1 --- a/xen/arch/x86/hvm/hpet.c	Wed Feb 07 10:14:41 2007 -0700
    22.2 +++ b/xen/arch/x86/hvm/hpet.c	Wed Feb 07 10:46:18 2007 -0700
    22.3 @@ -383,6 +383,9 @@ static int hpet_save(struct domain *d, h
    22.4  {
    22.5      HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
    22.6  
    22.7 +    /* Write the proper value into the main counter */
    22.8 +    hp->hpet.mc64 = hp->mc_offset + hvm_get_guest_time(hp->vcpu);
    22.9 +
   22.10      /* Save the HPET registers */
   22.11      return hvm_save_entry(HPET, 0, h, &hp->hpet);
   22.12  }
   22.13 @@ -406,7 +409,7 @@ static int hpet_load(struct domain *d, h
   22.14      return 0;
   22.15  }
   22.16  
   22.17 -HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load);
   22.18 +HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load, 1, HVMSR_PER_DOM);
   22.19  
   22.20  void hpet_init(struct vcpu *v)
   22.21  {
    23.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Feb 07 10:14:41 2007 -0700
    23.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Feb 07 10:46:18 2007 -0700
    23.3 @@ -227,7 +227,8 @@ static int hvm_load_cpu_ctxt(struct doma
    23.4      return 0;
    23.5  }
    23.6  
    23.7 -HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt);
    23.8 +HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
    23.9 +                          1, HVMSR_PER_VCPU);
   23.10  
   23.11  int hvm_vcpu_initialise(struct vcpu *v)
   23.12  {
   23.13 @@ -273,6 +274,24 @@ void hvm_vcpu_destroy(struct vcpu *v)
   23.14      /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
   23.15  }
   23.16  
   23.17 +
   23.18 +void hvm_vcpu_reset(struct vcpu *v)
   23.19 +{
   23.20 +    vcpu_pause(v);
   23.21 +
   23.22 +    vlapic_reset(vcpu_vlapic(v));
   23.23 +
   23.24 +    hvm_funcs.vcpu_initialise(v);
   23.25 +
   23.26 +    set_bit(_VCPUF_down, &v->vcpu_flags);
   23.27 +    clear_bit(_VCPUF_initialised, &v->vcpu_flags);
   23.28 +    clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
   23.29 +    clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
   23.30 +    clear_bit(_VCPUF_blocked, &v->vcpu_flags);
   23.31 +
   23.32 +    vcpu_unpause(v);
   23.33 +}
   23.34 +
   23.35  static void hvm_vcpu_down(void)
   23.36  {
   23.37      struct vcpu *v = current;
   23.38 @@ -624,20 +643,13 @@ void hvm_hypercall_page_initialise(struc
   23.39   */
   23.40  int hvm_bringup_ap(int vcpuid, int trampoline_vector)
   23.41  {
   23.42 -    struct vcpu *bsp = current, *v;
   23.43 -    struct domain *d = bsp->domain;
   23.44 +    struct vcpu *v;
   23.45 +    struct domain *d = current->domain;
   23.46      struct vcpu_guest_context *ctxt;
   23.47      int rc = 0;
   23.48  
   23.49      BUG_ON(!is_hvm_domain(d));
   23.50  
   23.51 -    if ( bsp->vcpu_id != 0 )
   23.52 -    {
   23.53 -        gdprintk(XENLOG_ERR, "Not calling hvm_bringup_ap from BSP context.\n");
   23.54 -        domain_crash(bsp->domain);
   23.55 -        return -EINVAL;
   23.56 -    }
   23.57 -
   23.58      if ( (v = d->vcpu[vcpuid]) == NULL )
   23.59          return -ENOENT;
   23.60  
   23.61 @@ -668,8 +680,8 @@ int hvm_bringup_ap(int vcpuid, int tramp
   23.62          goto out;
   23.63      }
   23.64  
   23.65 -    if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
   23.66 -        vcpu_wake(d->vcpu[vcpuid]);
   23.67 +    if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
   23.68 +        vcpu_wake(v);
   23.69      gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
   23.70  
   23.71   out:
    24.1 --- a/xen/arch/x86/hvm/i8254.c	Wed Feb 07 10:14:41 2007 -0700
    24.2 +++ b/xen/arch/x86/hvm/i8254.c	Wed Feb 07 10:46:18 2007 -0700
    24.3 @@ -83,8 +83,8 @@ static int pit_get_count(PITState *s, in
    24.4      struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
    24.5      struct periodic_time *pt = &s->pt[channel];
    24.6  
    24.7 -    d = muldiv64(hvm_get_guest_time(pt->vcpu) 
    24.8 -                 - c->count_load_time, PIT_FREQ, ticks_per_sec(pt->vcpu));
    24.9 +    d = muldiv64(hvm_get_guest_time(pt->vcpu) - s->count_load_time[channel],
   24.10 +                 PIT_FREQ, ticks_per_sec(pt->vcpu));
   24.11      switch(c->mode) {
   24.12      case 0:
   24.13      case 1:
   24.14 @@ -110,7 +110,7 @@ int pit_get_out(PITState *pit, int chann
   24.15      uint64_t d;
   24.16      int out;
   24.17  
   24.18 -    d = muldiv64(current_time - s->count_load_time, 
   24.19 +    d = muldiv64(current_time - pit->count_load_time[channel], 
   24.20                   PIT_FREQ, ticks_per_sec(pit->pt[channel].vcpu));
   24.21      switch(s->mode) {
   24.22      default:
   24.23 @@ -153,7 +153,7 @@ void pit_set_gate(PITState *pit, int cha
   24.24      case 5:
   24.25          if (s->gate < val) {
   24.26              /* restart counting on rising edge */
   24.27 -            s->count_load_time = hvm_get_guest_time(pt->vcpu);
   24.28 +            pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
   24.29  //            pit_irq_timer_update(s, s->count_load_time);
   24.30          }
   24.31          break;
   24.32 @@ -161,7 +161,7 @@ void pit_set_gate(PITState *pit, int cha
   24.33      case 3:
   24.34          if (s->gate < val) {
   24.35              /* restart counting on rising edge */
   24.36 -            s->count_load_time = hvm_get_guest_time(pt->vcpu);
   24.37 +            pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
   24.38  //            pit_irq_timer_update(s, s->count_load_time);
   24.39          }
   24.40          /* XXX: disable/enable counting */
   24.41 @@ -177,8 +177,8 @@ int pit_get_gate(PITState *pit, int chan
   24.42  
   24.43  void pit_time_fired(struct vcpu *v, void *priv)
   24.44  {
   24.45 -    struct hvm_hw_pit_channel *s = priv;
   24.46 -    s->count_load_time = hvm_get_guest_time(v);
   24.47 +    uint64_t *count_load_time = priv;
   24.48 +    *count_load_time = hvm_get_guest_time(v);
   24.49  }
   24.50  
   24.51  static inline void pit_load_count(PITState *pit, int channel, int val)
   24.52 @@ -190,7 +190,7 @@ static inline void pit_load_count(PITSta
   24.53  
   24.54      if (val == 0)
   24.55          val = 0x10000;
   24.56 -    s->count_load_time = hvm_get_guest_time(pt->vcpu);
   24.57 +    pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
   24.58      s->count = val;
   24.59      period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
   24.60  
   24.61 @@ -203,7 +203,7 @@ static inline void pit_load_count(PITSta
   24.62              val,
   24.63              period,
   24.64              s->mode,
   24.65 -            (long long)s->count_load_time);
   24.66 +            (long long)pit->count_load_time[channel]);
   24.67  #endif
   24.68  
   24.69      /* Choose a vcpu to set the timer on: current if appropriate else vcpu 0 */
   24.70 @@ -216,11 +216,13 @@ static inline void pit_load_count(PITSta
   24.71      switch (s->mode) {
   24.72          case 2:
   24.73              /* create periodic time */
   24.74 -            create_periodic_time(v, pt, period, 0, 0, pit_time_fired, s);
   24.75 +            create_periodic_time(v, pt, period, 0, 0, pit_time_fired, 
   24.76 +                                 &pit->count_load_time[channel]);
   24.77              break;
   24.78          case 1:
   24.79              /* create one shot time */
   24.80 -            create_periodic_time(v, pt, period, 0, 1, pit_time_fired, s);
   24.81 +            create_periodic_time(v, pt, period, 0, 1, pit_time_fired,
   24.82 +                                 &pit->count_load_time[channel]);
   24.83  #ifdef DEBUG_PIT
   24.84              printk("HVM_PIT: create one shot time.\n");
   24.85  #endif
   24.86 @@ -387,7 +389,7 @@ static void pit_info(PITState *pit)
   24.87          printk("pit 0x%x.\n", s->mode);
   24.88          printk("pit 0x%x.\n", s->bcd);
   24.89          printk("pit 0x%x.\n", s->gate);
   24.90 -        printk("pit %"PRId64"\n", s->count_load_time);
   24.91 +        printk("pit %"PRId64"\n", pit->count_load_time[i]);
   24.92  
   24.93          pt = &pit->pt[i];
   24.94          if (pt) {
   24.95 @@ -443,7 +445,7 @@ static int pit_load(struct domain *d, hv
   24.96      return 0;
   24.97  }
   24.98  
   24.99 -HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load);
  24.100 +HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
  24.101  
  24.102  static void pit_reset(void *opaque)
  24.103  {
    25.1 --- a/xen/arch/x86/hvm/intercept.c	Wed Feb 07 10:14:41 2007 -0700
    25.2 +++ b/xen/arch/x86/hvm/intercept.c	Wed Feb 07 10:46:18 2007 -0700
    25.3 @@ -29,8 +29,6 @@
    25.4  #include <asm/current.h>
    25.5  #include <io_ports.h>
    25.6  #include <xen/event.h>
    25.7 -#include <xen/compile.h>
    25.8 -#include <public/version.h>
    25.9  
   25.10  
   25.11  extern struct hvm_mmio_handler hpet_mmio_handler;
   25.12 @@ -157,180 +155,6 @@ static inline void hvm_mmio_access(struc
   25.13      }
   25.14  }
   25.15  
   25.16 -/* List of handlers for various HVM save and restore types */
   25.17 -static struct { 
   25.18 -    hvm_save_handler save;
   25.19 -    hvm_load_handler load; 
   25.20 -    const char *name;
   25.21 -} hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
   25.22 -
   25.23 -/* Init-time function to add entries to that list */
   25.24 -void hvm_register_savevm(uint16_t typecode, 
   25.25 -                         const char *name,
   25.26 -                         hvm_save_handler save_state,
   25.27 -                         hvm_load_handler load_state)
   25.28 -{
   25.29 -    ASSERT(typecode <= HVM_SAVE_CODE_MAX);
   25.30 -    ASSERT(hvm_sr_handlers[typecode].save == NULL);
   25.31 -    ASSERT(hvm_sr_handlers[typecode].load == NULL);
   25.32 -    hvm_sr_handlers[typecode].save = save_state;
   25.33 -    hvm_sr_handlers[typecode].load = load_state;
   25.34 -    hvm_sr_handlers[typecode].name = name;
   25.35 -}
   25.36 -
   25.37 -
   25.38 -int hvm_save(struct domain *d, hvm_domain_context_t *h)
   25.39 -{
   25.40 -    uint32_t eax, ebx, ecx, edx;
   25.41 -    char *c;
   25.42 -    struct hvm_save_header hdr;
   25.43 -    struct hvm_save_end end;
   25.44 -    hvm_save_handler handler;
   25.45 -    uint16_t i;
   25.46 -
   25.47 -    hdr.magic = HVM_FILE_MAGIC;
   25.48 -    hdr.version = HVM_FILE_VERSION;
   25.49 -
   25.50 -    /* Save some CPUID bits */
   25.51 -    cpuid(1, &eax, &ebx, &ecx, &edx);
   25.52 -    hdr.cpuid = eax;
   25.53 -
   25.54 -    /* Save xen changeset */
   25.55 -    c = strrchr(XEN_CHANGESET, ':');
   25.56 -    if ( c )
   25.57 -        hdr.changeset = simple_strtoll(c, NULL, 16);
   25.58 -    else 
   25.59 -        hdr.changeset = -1ULL; /* Unknown */
   25.60 -
   25.61 -    if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 )
   25.62 -    {
   25.63 -        gdprintk(XENLOG_ERR, "HVM save: failed to write header\n");
   25.64 -        return -EFAULT;
   25.65 -    } 
   25.66 -
   25.67 -    /* Save all available kinds of state */
   25.68 -    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
   25.69 -    {
   25.70 -        handler = hvm_sr_handlers[i].save;
   25.71 -        if ( handler != NULL ) 
   25.72 -        {
   25.73 -            gdprintk(XENLOG_INFO, "HVM save: %s\n",  hvm_sr_handlers[i].name);
   25.74 -            if ( handler(d, h) != 0 ) 
   25.75 -            {
   25.76 -                gdprintk(XENLOG_ERR, 
   25.77 -                         "HVM save: failed to save type %"PRIu16"\n", i);
   25.78 -                return -EFAULT;
   25.79 -            } 
   25.80 -        }
   25.81 -    }
   25.82 -
   25.83 -    /* Save an end-of-file marker */
   25.84 -    if ( hvm_save_entry(END, 0, h, &end) != 0 )
   25.85 -    {
   25.86 -        /* Run out of data */
   25.87 -        gdprintk(XENLOG_ERR, "HVM save: no room for end marker.\n");
   25.88 -        return -EFAULT;
   25.89 -    }
   25.90 -
   25.91 -    /* Save macros should not have let us overrun */
   25.92 -    ASSERT(h->cur <= h->size);
   25.93 -    return 0;
   25.94 -}
   25.95 -
   25.96 -int hvm_load(struct domain *d, hvm_domain_context_t *h)
   25.97 -{
   25.98 -    uint32_t eax, ebx, ecx, edx;
   25.99 -    char *c;
  25.100 -    uint64_t cset;
  25.101 -    struct hvm_save_header hdr;
  25.102 -    struct hvm_save_descriptor *desc;
  25.103 -    hvm_load_handler handler;
  25.104 -    struct vcpu *v;
  25.105 -    
  25.106 -    /* Read the save header, which must be first */
  25.107 -    if ( hvm_load_entry(HEADER, h, &hdr) != 0 ) 
  25.108 -        return -1;
  25.109 -
  25.110 -    if (hdr.magic != HVM_FILE_MAGIC) {
  25.111 -        gdprintk(XENLOG_ERR, 
  25.112 -                 "HVM restore: bad magic number %#"PRIx32"\n", hdr.magic);
  25.113 -        return -1;
  25.114 -    }
  25.115 -
  25.116 -    if (hdr.version != HVM_FILE_VERSION) {
  25.117 -        gdprintk(XENLOG_ERR, 
  25.118 -                 "HVM restore: unsupported version %u\n", hdr.version);
  25.119 -        return -1;
  25.120 -    }
  25.121 -
  25.122 -    cpuid(1, &eax, &ebx, &ecx, &edx);
  25.123 -    /*TODO: need to define how big a difference is acceptable */
  25.124 -    if (hdr.cpuid != eax)
  25.125 -        gdprintk(XENLOG_WARNING, "HVM restore: saved CPUID (%#"PRIx32") "
  25.126 -               "does not match host (%#"PRIx32").\n", hdr.cpuid, eax);
  25.127 -
  25.128 -
  25.129 -    c = strrchr(XEN_CHANGESET, ':');
  25.130 -    if ( hdr.changeset == -1ULL )
  25.131 -        gdprintk(XENLOG_WARNING, 
  25.132 -                 "HVM restore: Xen changeset was not saved.\n");
  25.133 -    else if ( c == NULL )
  25.134 -        gdprintk(XENLOG_WARNING, 
  25.135 -                 "HVM restore: Xen changeset is not available.\n");
  25.136 -    else
  25.137 -    {
  25.138 -        cset = simple_strtoll(c, NULL, 16);
  25.139 -        if ( hdr.changeset != cset )
  25.140 -        gdprintk(XENLOG_WARNING, "HVM restore: saved Xen changeset (%#"PRIx64
  25.141 -                 ") does not match host (%#"PRIx64").\n", hdr.changeset, cset);
  25.142 -    }
  25.143 -
  25.144 -    /* Down all the vcpus: we only re-enable the ones that had state saved. */
  25.145 -    for_each_vcpu(d, v) 
  25.146 -        if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
  25.147 -            vcpu_sleep_nosync(v);
  25.148 -
  25.149 -    while(1) {
  25.150 -
  25.151 -        if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) )
  25.152 -        {
  25.153 -            /* Run out of data */
  25.154 -            gdprintk(XENLOG_ERR, 
  25.155 -                     "HVM restore: save did not end with a null entry\n");
  25.156 -            return -1;
  25.157 -        }
  25.158 -        
  25.159 -        /* Read the typecode of the next entry  and check for the end-marker */
  25.160 -        desc = (struct hvm_save_descriptor *)(&h->data[h->cur]);
  25.161 -        if ( desc->typecode == 0 )
  25.162 -            return 0; 
  25.163 -        
  25.164 -        /* Find the handler for this entry */
  25.165 -        if ( desc->typecode > HVM_SAVE_CODE_MAX 
  25.166 -             || (handler = hvm_sr_handlers[desc->typecode].load) == NULL ) 
  25.167 -        {
  25.168 -            gdprintk(XENLOG_ERR, 
  25.169 -                     "HVM restore: unknown entry typecode %u\n", 
  25.170 -                     desc->typecode);
  25.171 -            return -1;
  25.172 -        }
  25.173 -
  25.174 -        /* Load the entry */
  25.175 -        gdprintk(XENLOG_INFO, "HVM restore: %s %"PRIu16"\n",  
  25.176 -                 hvm_sr_handlers[desc->typecode].name, desc->instance);
  25.177 -        if ( handler(d, h) != 0 ) 
  25.178 -        {
  25.179 -            gdprintk(XENLOG_ERR, 
  25.180 -                     "HVM restore: failed to load entry %u/%u\n", 
  25.181 -                     desc->typecode, desc->instance);
  25.182 -            return -1;
  25.183 -        }
  25.184 -    }
  25.185 -
  25.186 -    /* Not reached */
  25.187 -}
  25.188 -
  25.189 -
  25.190  int hvm_buffered_io_intercept(ioreq_t *p)
  25.191  {
  25.192      struct vcpu *v = current;
    26.1 --- a/xen/arch/x86/hvm/irq.c	Wed Feb 07 10:14:41 2007 -0700
    26.2 +++ b/xen/arch/x86/hvm/irq.c	Wed Feb 07 10:46:18 2007 -0700
    26.3 @@ -24,16 +24,17 @@
    26.4  #include <xen/event.h>
    26.5  #include <xen/sched.h>
    26.6  #include <asm/hvm/domain.h>
    26.7 +#include <asm/hvm/support.h>
    26.8  
    26.9  static void __hvm_pci_intx_assert(
   26.10      struct domain *d, unsigned int device, unsigned int intx)
   26.11  {
   26.12 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.13 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.14      unsigned int gsi, link, isa_irq;
   26.15  
   26.16      ASSERT((device <= 31) && (intx <= 3));
   26.17  
   26.18 -    if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx) )
   26.19 +    if ( __test_and_set_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
   26.20          return;
   26.21  
   26.22      gsi = hvm_pci_intx_gsi(device, intx);
   26.23 @@ -41,7 +42,7 @@ static void __hvm_pci_intx_assert(
   26.24          vioapic_irq_positive_edge(d, gsi);
   26.25  
   26.26      link    = hvm_pci_intx_link(device, intx);
   26.27 -    isa_irq = hvm_irq->pci_link_route[link];
   26.28 +    isa_irq = hvm_irq->pci_link.route[link];
   26.29      if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
   26.30           (hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
   26.31      {
   26.32 @@ -61,19 +62,19 @@ void hvm_pci_intx_assert(
   26.33  static void __hvm_pci_intx_deassert(
   26.34      struct domain *d, unsigned int device, unsigned int intx)
   26.35  {
   26.36 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.37 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.38      unsigned int gsi, link, isa_irq;
   26.39  
   26.40      ASSERT((device <= 31) && (intx <= 3));
   26.41  
   26.42 -    if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx) )
   26.43 +    if ( !__test_and_clear_bit(device*4 + intx, &hvm_irq->pci_intx.i) )
   26.44          return;
   26.45  
   26.46      gsi = hvm_pci_intx_gsi(device, intx);
   26.47      --hvm_irq->gsi_assert_count[gsi];
   26.48  
   26.49      link    = hvm_pci_intx_link(device, intx);
   26.50 -    isa_irq = hvm_irq->pci_link_route[link];
   26.51 +    isa_irq = hvm_irq->pci_link.route[link];
   26.52      if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
   26.53           (--hvm_irq->gsi_assert_count[isa_irq] == 0) )
   26.54          vpic_irq_negative_edge(d, isa_irq);
   26.55 @@ -90,14 +91,14 @@ void hvm_pci_intx_deassert(
   26.56  void hvm_isa_irq_assert(
   26.57      struct domain *d, unsigned int isa_irq)
   26.58  {
   26.59 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.60 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.61      unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
   26.62  
   26.63      ASSERT(isa_irq <= 15);
   26.64  
   26.65      spin_lock(&d->arch.hvm_domain.irq_lock);
   26.66  
   26.67 -    if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq) &&
   26.68 +    if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
   26.69           (hvm_irq->gsi_assert_count[gsi]++ == 0) )
   26.70      {
   26.71          vioapic_irq_positive_edge(d, gsi);
   26.72 @@ -110,14 +111,14 @@ void hvm_isa_irq_assert(
   26.73  void hvm_isa_irq_deassert(
   26.74      struct domain *d, unsigned int isa_irq)
   26.75  {
   26.76 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.77 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.78      unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq);
   26.79  
   26.80      ASSERT(isa_irq <= 15);
   26.81  
   26.82      spin_lock(&d->arch.hvm_domain.irq_lock);
   26.83  
   26.84 -    if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq) &&
   26.85 +    if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
   26.86           (--hvm_irq->gsi_assert_count[gsi] == 0) )
   26.87          vpic_irq_negative_edge(d, isa_irq);
   26.88  
   26.89 @@ -128,7 +129,7 @@ void hvm_set_callback_irq_level(void)
   26.90  {
   26.91      struct vcpu *v = current;
   26.92      struct domain *d = v->domain;
   26.93 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.94 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
   26.95      unsigned int gsi, pdev, pintx, asserted;
   26.96  
   26.97      /* Fast lock-free tests. */
   26.98 @@ -178,17 +179,17 @@ void hvm_set_callback_irq_level(void)
   26.99  
  26.100  void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
  26.101  {
  26.102 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.103 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.104      u8 old_isa_irq;
  26.105  
  26.106      ASSERT((link <= 3) && (isa_irq <= 15));
  26.107  
  26.108      spin_lock(&d->arch.hvm_domain.irq_lock);
  26.109  
  26.110 -    old_isa_irq = hvm_irq->pci_link_route[link];
  26.111 +    old_isa_irq = hvm_irq->pci_link.route[link];
  26.112      if ( old_isa_irq == isa_irq )
  26.113          goto out;
  26.114 -    hvm_irq->pci_link_route[link] = isa_irq;
  26.115 +    hvm_irq->pci_link.route[link] = isa_irq;
  26.116  
  26.117      if ( hvm_irq->pci_link_assert_count[link] == 0 )
  26.118          goto out;
  26.119 @@ -211,7 +212,7 @@ void hvm_set_pci_link_route(struct domai
  26.120  
  26.121  void hvm_set_callback_via(struct domain *d, uint64_t via)
  26.122  {
  26.123 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.124 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.125      unsigned int gsi=0, pdev=0, pintx=0;
  26.126      uint8_t via_type;
  26.127  
  26.128 @@ -335,3 +336,153 @@ int is_isa_irq_masked(struct vcpu *v, in
  26.129               (1 << (isa_irq & 7))) &&
  26.130              domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
  26.131  }
  26.132 +
  26.133 +#if 0 /* Keep for debugging */
  26.134 +static void irq_dump(struct domain *d)
  26.135 +{
  26.136 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.137 +    int i; 
  26.138 +    printk("PCI 0x%16.16"PRIx64"%16.16"PRIx64
  26.139 +           " ISA 0x%8.8"PRIx32" ROUTE %u %u %u %u\n",
  26.140 +           hvm_irq->pci_intx.pad[0],  hvm_irq->pci_intx.pad[1],
  26.141 +           (uint32_t) hvm_irq->isa_irq.pad[0], 
  26.142 +           hvm_irq->pci_link.route[0], hvm_irq->pci_link.route[1],
  26.143 +           hvm_irq->pci_link.route[2], hvm_irq->pci_link.route[3]);
  26.144 +    for ( i = 0 ; i < VIOAPIC_NUM_PINS; i += 8 )
  26.145 +        printk("GSI  %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8
  26.146 +               " %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
  26.147 +               hvm_irq->gsi_assert_count[i+0],
  26.148 +               hvm_irq->gsi_assert_count[i+1],
  26.149 +               hvm_irq->gsi_assert_count[i+2],
  26.150 +               hvm_irq->gsi_assert_count[i+3],
  26.151 +               hvm_irq->gsi_assert_count[i+4],
  26.152 +               hvm_irq->gsi_assert_count[i+5],
  26.153 +               hvm_irq->gsi_assert_count[i+6],
  26.154 +               hvm_irq->gsi_assert_count[i+7]);
  26.155 +    printk("Link %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8" %2.2"PRIu8"\n",
  26.156 +           hvm_irq->pci_link_assert_count[0],
  26.157 +           hvm_irq->pci_link_assert_count[1],
  26.158 +           hvm_irq->pci_link_assert_count[2],
  26.159 +           hvm_irq->pci_link_assert_count[3]);
  26.160 +    printk("Callback via %i:0x%"PRIx32",%s asserted\n", 
  26.161 +           hvm_irq->callback_via_type, hvm_irq->callback_via.gsi, 
  26.162 +           hvm_irq->callback_via_asserted ? "" : " not");
  26.163 +}
  26.164 +#endif
  26.165 +
  26.166 +static int irq_save_pci(struct domain *d, hvm_domain_context_t *h)
  26.167 +{
  26.168 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.169 +
  26.170 +    /* Save PCI IRQ lines */
  26.171 +    return ( hvm_save_entry(PCI_IRQ, 0, h, &hvm_irq->pci_intx) );
  26.172 +}
  26.173 +
  26.174 +static int irq_save_isa(struct domain *d, hvm_domain_context_t *h)
  26.175 +{
  26.176 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.177 +
  26.178 +    /* Save ISA IRQ lines */
  26.179 +    return ( hvm_save_entry(ISA_IRQ, 0, h, &hvm_irq->isa_irq) );
  26.180 +}
  26.181 +
  26.182 +static int irq_save_link(struct domain *d, hvm_domain_context_t *h)
  26.183 +{
  26.184 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.185 +
  26.186 +    /* Save PCI-ISA link state */
  26.187 +    return ( hvm_save_entry(PCI_LINK, 0, h, &hvm_irq->pci_link) );
  26.188 +}
  26.189 +
  26.190 +static int irq_load_pci(struct domain *d, hvm_domain_context_t *h)
  26.191 +{
  26.192 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.193 +    int link, dev, intx, gsi;
  26.194 +
  26.195 +    /* Load the PCI IRQ lines */
  26.196 +    if ( hvm_load_entry(PCI_IRQ, h, &hvm_irq->pci_intx) != 0 )
  26.197 +        return -EINVAL;
  26.198 +
  26.199 +    /* Clear the PCI link assert counts */
  26.200 +    for ( link = 0; link < 4; link++ )
  26.201 +        hvm_irq->pci_link_assert_count[link] = 0;
  26.202 +    
  26.203 +    /* Clear the GSI link assert counts */
  26.204 +    for ( gsi = 0; gsi < VIOAPIC_NUM_PINS; gsi++ )
  26.205 +        hvm_irq->gsi_assert_count[gsi] = 0;
  26.206 +
  26.207 +    /* Recalculate the counts from the IRQ line state */
  26.208 +    for ( dev = 0; dev < 32; dev++ )
  26.209 +        for ( intx = 0; intx < 4; intx++ )
  26.210 +            if ( test_bit(dev*4 + intx, &hvm_irq->pci_intx.i) )
  26.211 +            {
  26.212 +                /* Direct GSI assert */
  26.213 +                gsi = hvm_pci_intx_gsi(dev, intx);
  26.214 +                hvm_irq->gsi_assert_count[gsi]++;
  26.215 +                /* PCI-ISA bridge assert */
  26.216 +                link = hvm_pci_intx_link(dev, intx);
  26.217 +                hvm_irq->pci_link_assert_count[link]++;
  26.218 +            }
  26.219 +
  26.220 +    return 0;
  26.221 +}
  26.222 +
  26.223 +static int irq_load_isa(struct domain *d, hvm_domain_context_t *h)
  26.224 +{
  26.225 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.226 +    int irq;
  26.227 +
  26.228 +    /* Load the ISA IRQ lines */
  26.229 +    if ( hvm_load_entry(ISA_IRQ, h, &hvm_irq->isa_irq) != 0 )
  26.230 +        return -EINVAL;
  26.231 +
  26.232 +    /* Adjust the GSI assert counts for the ISA IRQ line state.
  26.233 +     * This relies on the PCI IRQ state being loaded first. */
  26.234 +    for ( irq = 0; irq < 16; irq++ )
  26.235 +        if ( test_bit(irq, &hvm_irq->isa_irq.i) )
  26.236 +            hvm_irq->gsi_assert_count[hvm_isa_irq_to_gsi(irq)]++;
  26.237 +
  26.238 +    return 0;
  26.239 +}
  26.240 +
  26.241 +
  26.242 +static int irq_load_link(struct domain *d, hvm_domain_context_t *h)
  26.243 +{
  26.244 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
  26.245 +    int link, gsi;
  26.246 +
  26.247 +    /* Load the PCI-ISA IRQ link routing table */
  26.248 +    if ( hvm_load_entry(PCI_LINK, h, &hvm_irq->pci_link) != 0 )
  26.249 +        return -EINVAL;
  26.250 +
  26.251 +    /* Sanity check */
  26.252 +    for ( link = 0; link < 4; link++ )
  26.253 +        if ( hvm_irq->pci_link.route[link] > 15 )
  26.254 +        {
  26.255 +            gdprintk(XENLOG_ERR, 
  26.256 +                     "HVM restore: PCI-ISA link %u out of range (%u)\n",
  26.257 +                     link, hvm_irq->pci_link.route[link]);
  26.258 +            return -EINVAL;
  26.259 +        }
  26.260 +
  26.261 +    /* Adjust the GSI assert counts for the link outputs.
  26.262 +     * This relies on the PCI and ISA IRQ state being loaded first */
  26.263 +    for ( link = 0; link < 4; link++ )
  26.264 +    {
  26.265 +        if ( hvm_irq->pci_link_assert_count[link] != 0 )
  26.266 +        {
  26.267 +            gsi = hvm_irq->pci_link.route[link];
  26.268 +            if ( gsi != 0 )
  26.269 +                hvm_irq->gsi_assert_count[gsi]++;
  26.270 +        }
  26.271 +    }
  26.272 +
  26.273 +    return 0;
  26.274 +}
  26.275 +
  26.276 +HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
  26.277 +                          1, HVMSR_PER_DOM);
  26.278 +HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa, 
  26.279 +                          1, HVMSR_PER_DOM);
  26.280 +HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
  26.281 +                          1, HVMSR_PER_DOM);
    27.1 --- a/xen/arch/x86/hvm/rtc.c	Wed Feb 07 10:14:41 2007 -0700
    27.2 +++ b/xen/arch/x86/hvm/rtc.c	Wed Feb 07 10:46:18 2007 -0700
    27.3 @@ -417,7 +417,7 @@ static int rtc_load(struct domain *d, hv
    27.4      return 0;
    27.5  }
    27.6  
    27.7 -HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load);
    27.8 +HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load, 1, HVMSR_PER_DOM);
    27.9  
   27.10  
   27.11  void rtc_init(struct vcpu *v, int base)
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/xen/arch/x86/hvm/save.c	Wed Feb 07 10:46:18 2007 -0700
    28.3 @@ -0,0 +1,229 @@
    28.4 +/*
    28.5 + * hvm/save.c: Save and restore HVM guest's emulated hardware state.
    28.6 + *
    28.7 + * Copyright (c) 2004, Intel Corporation.
    28.8 + * Copyright (c) 2007, XenSource Inc.
    28.9 + *
   28.10 + * This program is free software; you can redistribute it and/or modify it
   28.11 + * under the terms and conditions of the GNU General Public License,
   28.12 + * version 2, as published by the Free Software Foundation.
   28.13 + *
   28.14 + * This program is distributed in the hope it will be useful, but WITHOUT
   28.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   28.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   28.17 + * more details.
   28.18 + *
   28.19 + * You should have received a copy of the GNU General Public License along with
   28.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   28.21 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   28.22 + */
   28.23 +
   28.24 +#include <xen/config.h>
   28.25 +#include <xen/compile.h>
   28.26 +#include <xen/lib.h>
   28.27 +#include <public/version.h>
   28.28 +#include <xen/sched.h>
   28.29 +
   28.30 +#include <asm/hvm/hvm.h>
   28.31 +#include <asm/hvm/support.h>
   28.32 +#include <asm/hvm/domain.h>
   28.33 +#include <asm/current.h>
   28.34 +
   28.35 +
   28.36 +/* List of handlers for various HVM save and restore types */
   28.37 +static struct { 
   28.38 +    hvm_save_handler save;
   28.39 +    hvm_load_handler load; 
   28.40 +    const char *name;
   28.41 +    size_t size;
   28.42 +    int kind;
   28.43 +} hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
   28.44 +
   28.45 +/* Init-time function to add entries to that list */
   28.46 +void hvm_register_savevm(uint16_t typecode, 
   28.47 +                         const char *name,
   28.48 +                         hvm_save_handler save_state,
   28.49 +                         hvm_load_handler load_state,
   28.50 +                         size_t size, int kind)
   28.51 +{
   28.52 +    ASSERT(typecode <= HVM_SAVE_CODE_MAX);
   28.53 +    ASSERT(hvm_sr_handlers[typecode].save == NULL);
   28.54 +    ASSERT(hvm_sr_handlers[typecode].load == NULL);
   28.55 +    hvm_sr_handlers[typecode].save = save_state;
   28.56 +    hvm_sr_handlers[typecode].load = load_state;
   28.57 +    hvm_sr_handlers[typecode].name = name;
   28.58 +    hvm_sr_handlers[typecode].size = size;
   28.59 +    hvm_sr_handlers[typecode].kind = kind;
   28.60 +}
   28.61 +
   28.62 +size_t hvm_save_size(struct domain *d) 
   28.63 +{
   28.64 +    struct vcpu *v;
   28.65 +    size_t sz;
   28.66 +    int i;
   28.67 +    
   28.68 +    /* Basic overhead for header and footer */
   28.69 +    sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
   28.70 +
   28.71 +    /* Plus space for each thing we will be saving */
   28.72 +    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
   28.73 +        if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
   28.74 +            for_each_vcpu(d, v)
   28.75 +                sz += hvm_sr_handlers[i].size;
   28.76 +        else 
   28.77 +            sz += hvm_sr_handlers[i].size;
   28.78 +
   28.79 +    return sz;
   28.80 +}
   28.81 +
   28.82 +
   28.83 +int hvm_save(struct domain *d, hvm_domain_context_t *h)
   28.84 +{
   28.85 +    uint32_t eax, ebx, ecx, edx;
   28.86 +    char *c;
   28.87 +    struct hvm_save_header hdr;
   28.88 +    struct hvm_save_end end;
   28.89 +    hvm_save_handler handler;
   28.90 +    uint16_t i;
   28.91 +
   28.92 +    hdr.magic = HVM_FILE_MAGIC;
   28.93 +    hdr.version = HVM_FILE_VERSION;
   28.94 +
   28.95 +    /* Save some CPUID bits */
   28.96 +    cpuid(1, &eax, &ebx, &ecx, &edx);
   28.97 +    hdr.cpuid = eax;
   28.98 +
   28.99 +    /* Save xen changeset */
  28.100 +    c = strrchr(XEN_CHANGESET, ':');
  28.101 +    if ( c )
  28.102 +        hdr.changeset = simple_strtoll(c, NULL, 16);
  28.103 +    else 
  28.104 +        hdr.changeset = -1ULL; /* Unknown */
  28.105 +
  28.106 +    if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 )
  28.107 +    {
  28.108 +        gdprintk(XENLOG_ERR, "HVM save: failed to write header\n");
  28.109 +        return -EFAULT;
  28.110 +    } 
  28.111 +
  28.112 +    /* Save all available kinds of state */
  28.113 +    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
  28.114 +    {
  28.115 +        handler = hvm_sr_handlers[i].save;
  28.116 +        if ( handler != NULL ) 
  28.117 +        {
  28.118 +            gdprintk(XENLOG_INFO, "HVM save: %s\n",  hvm_sr_handlers[i].name);
  28.119 +            if ( handler(d, h) != 0 ) 
  28.120 +            {
  28.121 +                gdprintk(XENLOG_ERR, 
  28.122 +                         "HVM save: failed to save type %"PRIu16"\n", i);
  28.123 +                return -EFAULT;
  28.124 +            } 
  28.125 +        }
  28.126 +    }
  28.127 +
  28.128 +    /* Save an end-of-file marker */
  28.129 +    if ( hvm_save_entry(END, 0, h, &end) != 0 )
  28.130 +    {
  28.131 +        /* Run out of data */
  28.132 +        gdprintk(XENLOG_ERR, "HVM save: no room for end marker.\n");
  28.133 +        return -EFAULT;
  28.134 +    }
  28.135 +
  28.136 +    /* Save macros should not have let us overrun */
  28.137 +    ASSERT(h->cur <= h->size);
  28.138 +    return 0;
  28.139 +}
  28.140 +
  28.141 +int hvm_load(struct domain *d, hvm_domain_context_t *h)
  28.142 +{
  28.143 +    uint32_t eax, ebx, ecx, edx;
  28.144 +    char *c;
  28.145 +    uint64_t cset;
  28.146 +    struct hvm_save_header hdr;
  28.147 +    struct hvm_save_descriptor *desc;
  28.148 +    hvm_load_handler handler;
  28.149 +    struct vcpu *v;
  28.150 +    
  28.151 +    /* Read the save header, which must be first */
  28.152 +    if ( hvm_load_entry(HEADER, h, &hdr) != 0 ) 
  28.153 +        return -1;
  28.154 +
  28.155 +    if (hdr.magic != HVM_FILE_MAGIC) {
  28.156 +        gdprintk(XENLOG_ERR, 
  28.157 +                 "HVM restore: bad magic number %#"PRIx32"\n", hdr.magic);
  28.158 +        return -1;
  28.159 +    }
  28.160 +
  28.161 +    if (hdr.version != HVM_FILE_VERSION) {
  28.162 +        gdprintk(XENLOG_ERR, 
  28.163 +                 "HVM restore: unsupported version %u\n", hdr.version);
  28.164 +        return -1;
  28.165 +    }
  28.166 +
  28.167 +    cpuid(1, &eax, &ebx, &ecx, &edx);
  28.168 +    /*TODO: need to define how big a difference is acceptable */
  28.169 +    if (hdr.cpuid != eax)
  28.170 +        gdprintk(XENLOG_WARNING, "HVM restore: saved CPUID (%#"PRIx32") "
  28.171 +               "does not match host (%#"PRIx32").\n", hdr.cpuid, eax);
  28.172 +
  28.173 +
  28.174 +    c = strrchr(XEN_CHANGESET, ':');
  28.175 +    if ( hdr.changeset == -1ULL )
  28.176 +        gdprintk(XENLOG_WARNING, 
  28.177 +                 "HVM restore: Xen changeset was not saved.\n");
  28.178 +    else if ( c == NULL )
  28.179 +        gdprintk(XENLOG_WARNING, 
  28.180 +                 "HVM restore: Xen changeset is not available.\n");
  28.181 +    else
  28.182 +    {
  28.183 +        cset = simple_strtoll(c, NULL, 16);
  28.184 +        if ( hdr.changeset != cset )
  28.185 +        gdprintk(XENLOG_WARNING, "HVM restore: saved Xen changeset (%#"PRIx64
  28.186 +                 ") does not match host (%#"PRIx64").\n", hdr.changeset, cset);
  28.187 +    }
  28.188 +
  28.189 +    /* Down all the vcpus: we only re-enable the ones that had state saved. */
  28.190 +    for_each_vcpu(d, v) 
  28.191 +        if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
  28.192 +            vcpu_sleep_nosync(v);
  28.193 +
  28.194 +    while(1) {
  28.195 +
  28.196 +        if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) )
  28.197 +        {
  28.198 +            /* Run out of data */
  28.199 +            gdprintk(XENLOG_ERR, 
  28.200 +                     "HVM restore: save did not end with a null entry\n");
  28.201 +            return -1;
  28.202 +        }
  28.203 +        
  28.204 +        /* Read the typecode of the next entry  and check for the end-marker */
  28.205 +        desc = (struct hvm_save_descriptor *)(&h->data[h->cur]);
  28.206 +        if ( desc->typecode == 0 )
  28.207 +            return 0; 
  28.208 +        
  28.209 +        /* Find the handler for this entry */
  28.210 +        if ( desc->typecode > HVM_SAVE_CODE_MAX 
  28.211 +             || (handler = hvm_sr_handlers[desc->typecode].load) == NULL ) 
  28.212 +        {
  28.213 +            gdprintk(XENLOG_ERR, 
  28.214 +                     "HVM restore: unknown entry typecode %u\n", 
  28.215 +                     desc->typecode);
  28.216 +            return -1;
  28.217 +        }
  28.218 +
  28.219 +        /* Load the entry */
  28.220 +        gdprintk(XENLOG_INFO, "HVM restore: %s %"PRIu16"\n",  
  28.221 +                 hvm_sr_handlers[desc->typecode].name, desc->instance);
  28.222 +        if ( handler(d, h) != 0 ) 
  28.223 +        {
  28.224 +            gdprintk(XENLOG_ERR, 
  28.225 +                     "HVM restore: failed to load entry %u/%u\n", 
  28.226 +                     desc->typecode, desc->instance);
  28.227 +            return -1;
  28.228 +        }
  28.229 +    }
  28.230 +
  28.231 +    /* Not reached */
  28.232 +}
    29.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Feb 07 10:14:41 2007 -0700
    29.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Feb 07 10:46:18 2007 -0700
    29.3 @@ -591,6 +591,7 @@ void svm_load_cpu_state(struct vcpu *v, 
    29.4  {
    29.5      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    29.6  
    29.7 +    vmcb->kerngsbase = data->shadow_gs;
    29.8      /* MSR_LSTAR, MSR_STAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_EFER */
    29.9      vmcb->lstar  = data->msr_items[0];
   29.10      vmcb->star   = data->msr_items[1];
    30.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Feb 07 10:14:41 2007 -0700
    30.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Feb 07 10:46:18 2007 -0700
    30.3 @@ -209,7 +209,8 @@ int svm_create_vmcb(struct vcpu *v)
    30.4      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
    30.5      int rc;
    30.6  
    30.7 -    if ( (arch_svm->vmcb = alloc_vmcb()) == NULL )
    30.8 +    if ( (arch_svm->vmcb == NULL) &&
    30.9 +         (arch_svm->vmcb = alloc_vmcb()) == NULL )
   30.10      {
   30.11          printk("Failed to create a new VMCB\n");
   30.12          return -ENOMEM;
    31.1 --- a/xen/arch/x86/hvm/vioapic.c	Wed Feb 07 10:14:41 2007 -0700
    31.2 +++ b/xen/arch/x86/hvm/vioapic.c	Wed Feb 07 10:46:18 2007 -0700
    31.3 @@ -125,7 +125,7 @@ static void vioapic_write_redirent(
    31.4      struct hvm_hw_vioapic *vioapic, unsigned int idx, int top_word, uint32_t val)
    31.5  {
    31.6      struct domain *d = vioapic_domain(vioapic);
    31.7 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
    31.8 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
    31.9      union vioapic_redir_entry *pent, ent;
   31.10  
   31.11      spin_lock(&d->arch.hvm_domain.irq_lock);
   31.12 @@ -446,7 +446,7 @@ static int get_eoi_gsi(struct hvm_hw_vio
   31.13  void vioapic_update_EOI(struct domain *d, int vector)
   31.14  {
   31.15      struct hvm_hw_vioapic *vioapic = domain_vioapic(d);
   31.16 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
   31.17 +    struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
   31.18      union vioapic_redir_entry *ent;
   31.19      int gsi;
   31.20  
   31.21 @@ -486,41 +486,10 @@ static void ioapic_info(struct hvm_hw_vi
   31.22      }
   31.23  
   31.24  }
   31.25 -static void hvmirq_info(struct hvm_hw_irq *hvm_irq)
   31.26 -{
   31.27 -    int i;
   31.28 -    printk("*****hvmirq state:*****\n");
   31.29 -    for (i = 0; i < BITS_TO_LONGS(32*4); i++)
   31.30 -        printk("hvmirq pci_intx[%d]:0x%lx.\n", i, hvm_irq->pci_intx[i]);
   31.31 -
   31.32 -    for (i = 0; i < BITS_TO_LONGS(16); i++)
   31.33 -        printk("hvmirq isa_irq[%d]:0x%lx.\n", i, hvm_irq->isa_irq[i]);
   31.34 -
   31.35 -    for (i = 0; i < BITS_TO_LONGS(1); i++)
   31.36 -        printk("hvmirq callback_irq_wire[%d]:0x%lx.\n", i, hvm_irq->callback_irq_wire[i]);
   31.37 -
   31.38 -    printk("hvmirq callback_via_type:0x%x.\n", hvm_irq->callback_via_type);
   31.39 -    printk("hvmirq callback_via:0x%x.\n", hvm_irq->callback_via.gsi);
   31.40 -    
   31.41 -
   31.42 -    for (i = 0; i < 4; i++)
   31.43 -        printk("hvmirq pci_link_route[%d]:0x%"PRIx8".\n", i, hvm_irq->pci_link_route[i]);
   31.44 -
   31.45 -    for (i = 0; i < 4; i++)
   31.46 -        printk("hvmirq pci_link_assert_count[%d]:0x%"PRIx8".\n", i, hvm_irq->pci_link_assert_count[i]);
   31.47 -
   31.48 -    for (i = 0; i < VIOAPIC_NUM_PINS; i++)
   31.49 -        printk("hvmirq gsi_assert_count[%d]:0x%"PRIx8".\n", i, hvm_irq->gsi_assert_count[i]);
   31.50 -
   31.51 -    printk("hvmirq round_robin_prev_vcpu:0x%"PRIx8".\n", hvm_irq->round_robin_prev_vcpu);
   31.52 -}
   31.53  #else
   31.54  static void ioapic_info(struct hvm_hw_vioapic *s)
   31.55  {
   31.56  }
   31.57 -static void hvmirq_info(struct hvm_hw_irq *hvm_irq)
   31.58 -{
   31.59 -}
   31.60  #endif
   31.61  
   31.62  
   31.63 @@ -533,16 +502,6 @@ static int ioapic_save(struct domain *d,
   31.64      return ( hvm_save_entry(IOAPIC, 0, h, s) );
   31.65  }
   31.66  
   31.67 -static int ioapic_save_irqs(struct domain *d, hvm_domain_context_t *h)
   31.68 -{
   31.69 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
   31.70 -    hvmirq_info(hvm_irq);
   31.71 -
   31.72 -    /* save IRQ state*/
   31.73 -    return ( hvm_save_entry(IRQ, 0, h, hvm_irq) );    
   31.74 -}
   31.75 -
   31.76 -
   31.77  static int ioapic_load(struct domain *d, hvm_domain_context_t *h)
   31.78  {
   31.79      struct hvm_hw_vioapic *s = domain_vioapic(d);
   31.80 @@ -555,20 +514,7 @@ static int ioapic_load(struct domain *d,
   31.81      return 0;
   31.82  }
   31.83  
   31.84 -static int ioapic_load_irqs(struct domain *d, hvm_domain_context_t *h)
   31.85 -{
   31.86 -    struct hvm_hw_irq *hvm_irq = &d->arch.hvm_domain.irq;
   31.87 -
   31.88 -    /* restore irq state */
   31.89 -    if ( hvm_load_entry(IRQ, h, hvm_irq) != 0 )
   31.90 -        return -EINVAL;
   31.91 -
   31.92 -    hvmirq_info(hvm_irq);
   31.93 -    return 0;
   31.94 -}
   31.95 -
   31.96 -HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load);
   31.97 -HVM_REGISTER_SAVE_RESTORE(IRQ, ioapic_save_irqs, ioapic_load_irqs);
   31.98 +HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
   31.99  
  31.100  void vioapic_init(struct domain *d)
  31.101  {
    32.1 --- a/xen/arch/x86/hvm/vlapic.c	Wed Feb 07 10:14:41 2007 -0700
    32.2 +++ b/xen/arch/x86/hvm/vlapic.c	Wed Feb 07 10:46:18 2007 -0700
    32.3 @@ -83,8 +83,6 @@ static unsigned int vlapic_lvt_mask[VLAP
    32.4  #define vlapic_base_address(vlapic)                             \
    32.5      (vlapic->hw.apic_base_msr & MSR_IA32_APICBASE_BASE)
    32.6  
    32.7 -static int vlapic_reset(struct vlapic *vlapic);
    32.8 -
    32.9  /*
   32.10   * Generic APIC bitmap vector update & search routines.
   32.11   */
   32.12 @@ -293,8 +291,11 @@ static int vlapic_accept_irq(struct vcpu
   32.13          break;
   32.14  
   32.15      case APIC_DM_SMI:
   32.16 +        gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n");
   32.17 +        break;
   32.18 +
   32.19      case APIC_DM_NMI:
   32.20 -        gdprintk(XENLOG_WARNING, "Ignoring guest SMI/NMI\n");
   32.21 +        gdprintk(XENLOG_WARNING, "Ignoring guest NMI\n");
   32.22          break;
   32.23  
   32.24      case APIC_DM_INIT:
   32.25 @@ -303,10 +304,7 @@ static int vlapic_accept_irq(struct vcpu
   32.26              break;
   32.27          /* FIXME How to check the situation after vcpu reset? */
   32.28          if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   32.29 -        {
   32.30 -            gdprintk(XENLOG_ERR, "Reset hvm vcpu not supported yet\n");
   32.31 -            goto exit_and_crash;
   32.32 -        }
   32.33 +            hvm_vcpu_reset(v);
   32.34          v->arch.hvm_vcpu.init_sipi_sipi_state =
   32.35              HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
   32.36          result = 1;
   32.37 @@ -764,7 +762,7 @@ int cpu_get_apic_interrupt(struct vcpu *
   32.38  }
   32.39  
   32.40  /* Reset the VLPAIC back to its power-on/reset state. */
   32.41 -static int vlapic_reset(struct vlapic *vlapic)
   32.42 +void vlapic_reset(struct vlapic *vlapic)
   32.43  {
   32.44      struct vcpu *v = vlapic_vcpu(vlapic);
   32.45      int i;
   32.46 @@ -793,8 +791,6 @@ static int vlapic_reset(struct vlapic *v
   32.47  
   32.48      vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
   32.49      vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
   32.50 -
   32.51 -    return 1;
   32.52  }
   32.53  
   32.54  #ifdef HVM_DEBUG_SUSPEND
   32.55 @@ -908,8 +904,10 @@ static int lapic_load_regs(struct domain
   32.56      return 0;
   32.57  }
   32.58  
   32.59 -HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden);
   32.60 -HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs);
   32.61 +HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
   32.62 +                          1, HVMSR_PER_VCPU);
   32.63 +HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
   32.64 +                          1, HVMSR_PER_VCPU);
   32.65  
   32.66  int vlapic_init(struct vcpu *v)
   32.67  {
   32.68 @@ -922,7 +920,6 @@ int vlapic_init(struct vcpu *v)
   32.69      {
   32.70          dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n",
   32.71                  v->vcpu_id);
   32.72 -        xfree(vlapic);
   32.73          return -ENOMEM;
   32.74      }
   32.75  
    33.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Feb 07 10:14:41 2007 -0700
    33.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Feb 07 10:46:18 2007 -0700
    33.3 @@ -295,6 +295,11 @@ static void construct_vmcs(struct vcpu *
    33.4  
    33.5      vmx_vmcs_enter(v);
    33.6  
    33.7 +    v->arch.hvm_vmx.cpu_cr2 = 0;
    33.8 +    v->arch.hvm_vmx.cpu_cr3 = 0;
    33.9 +    memset(&v->arch.hvm_vmx.msr_state, 0, sizeof(v->arch.hvm_vmx.msr_state));
   33.10 +    v->arch.hvm_vmx.vmxassist_enabled = 0;
   33.11 +
   33.12      /* VMCS controls. */
   33.13      __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
   33.14      __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
   33.15 @@ -448,10 +453,13 @@ static void construct_vmcs(struct vcpu *
   33.16  
   33.17  int vmx_create_vmcs(struct vcpu *v)
   33.18  {
   33.19 -    if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
   33.20 -        return -ENOMEM;
   33.21 - 
   33.22 -    __vmx_clear_vmcs(v);
   33.23 +    if ( v->arch.hvm_vmx.vmcs == NULL )
   33.24 +    {
   33.25 +        if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
   33.26 +            return -ENOMEM;
   33.27 +
   33.28 +        __vmx_clear_vmcs(v);
   33.29 +    }
   33.30  
   33.31      construct_vmcs(v);
   33.32  
    34.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Feb 07 10:14:41 2007 -0700
    34.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Feb 07 10:46:18 2007 -0700
    34.3 @@ -588,7 +588,7 @@ void vmx_save_cpu_state(struct vcpu *v, 
    34.4      int i = 0;
    34.5  
    34.6      data->shadow_gs = guest_state->shadow_gs;
    34.7 -    data->vmxassist_enabled = v->arch.hvm_vmx.vmxassist_enabled;
    34.8 +
    34.9      /* save msrs */
   34.10      data->flags = guest_flags;
   34.11      for (i = 0; i < VMX_MSR_COUNT; i++)
   34.12 @@ -611,10 +611,7 @@ void vmx_load_cpu_state(struct vcpu *v, 
   34.13  
   34.14      guest_state->shadow_gs = data->shadow_gs;
   34.15  
   34.16 -    /*XXX:no need to restore msrs, current!=vcpu as not called by arch_vmx_do_launch */
   34.17 -/*    vmx_restore_guest_msrs(v);*/
   34.18 -
   34.19 -    v->arch.hvm_vmx.vmxassist_enabled = data->vmxassist_enabled;
   34.20 +    v->arch.hvm_vmx.vmxassist_enabled = !(data->cr0 & X86_CR0_PE);
   34.21  
   34.22      hvm_set_guest_time(v, data->tsc);
   34.23  
    35.1 --- a/xen/arch/x86/hvm/vpic.c	Wed Feb 07 10:14:41 2007 -0700
    35.2 +++ b/xen/arch/x86/hvm/vpic.c	Wed Feb 07 10:46:18 2007 -0700
    35.3 @@ -440,7 +440,7 @@ static int vpic_load(struct domain *d, h
    35.4      return 0;
    35.5  }
    35.6  
    35.7 -HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load);
    35.8 +HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
    35.9  
   35.10  void vpic_init(struct domain *d)
   35.11  {
    36.1 --- a/xen/arch/x86/mm/shadow/multi.c	Wed Feb 07 10:14:41 2007 -0700
    36.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Wed Feb 07 10:46:18 2007 -0700
    36.3 @@ -3875,11 +3875,9 @@ static inline void * emulate_map_dest(st
    36.4          goto page_fault;
    36.5      }
    36.6  
    36.7 -    /* Attempted a write to a bad gfn? This should never happen:
    36.8 -     * after all, we're here because this write is to a page table. */
    36.9 -    BUG_ON(!mfn_valid(mfn));
   36.10 -
   36.11 -    ASSERT(sh_mfn_is_a_page_table(mfn));
   36.12 +    if ( !mfn_valid(mfn) )
   36.13 +        return NULL;
   36.14 +
   36.15      *mfnp = mfn;
   36.16      return sh_map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
   36.17  
    37.1 --- a/xen/include/asm-x86/hvm/domain.h	Wed Feb 07 10:14:41 2007 -0700
    37.2 +++ b/xen/include/asm-x86/hvm/domain.h	Wed Feb 07 10:46:18 2007 -0700
    37.3 @@ -39,7 +39,7 @@ struct hvm_domain {
    37.4  
    37.5      /* Lock protects access to irq, vpic and vioapic. */
    37.6      spinlock_t             irq_lock;
    37.7 -    struct hvm_hw_irq      irq;
    37.8 +    struct hvm_irq         irq;
    37.9      struct hvm_hw_vpic     vpic[2]; /* 0=master; 1=slave */
   37.10      struct hvm_hw_vioapic  vioapic;
   37.11  
    38.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed Feb 07 10:14:41 2007 -0700
    38.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Feb 07 10:46:18 2007 -0700
    38.3 @@ -153,6 +153,7 @@ void hvm_domain_destroy(struct domain *d
    38.4  
    38.5  int hvm_vcpu_initialise(struct vcpu *v);
    38.6  void hvm_vcpu_destroy(struct vcpu *v);
    38.7 +void hvm_vcpu_reset(struct vcpu *vcpu);
    38.8  
    38.9  void hvm_send_assist_req(struct vcpu *v);
   38.10  
    39.1 --- a/xen/include/asm-x86/hvm/irq.h	Wed Feb 07 10:14:41 2007 -0700
    39.2 +++ b/xen/include/asm-x86/hvm/irq.h	Wed Feb 07 10:46:18 2007 -0700
    39.3 @@ -28,6 +28,69 @@
    39.4  #include <asm/hvm/vioapic.h>
    39.5  #include <public/hvm/save.h>
    39.6  
    39.7 +
    39.8 +struct hvm_irq {
    39.9 +    /*
   39.10 +     * Virtual interrupt wires for a single PCI bus.
   39.11 +     * Indexed by: device*4 + INTx#.
   39.12 +     */
   39.13 +    struct hvm_hw_pci_irqs pci_intx;
   39.14 +
   39.15 +    /*
   39.16 +     * Virtual interrupt wires for ISA devices.
   39.17 +     * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
   39.18 +     */
   39.19 +    struct hvm_hw_isa_irqs isa_irq;
   39.20 +
   39.21 +    /*
   39.22 +     * PCI-ISA interrupt router.
   39.23 +     * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
   39.24 +     * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
   39.25 +     * The router provides a programmable mapping from each link to a GSI.
   39.26 +     */
   39.27 +    struct hvm_hw_pci_link pci_link;
   39.28 +
   39.29 +    /* Virtual interrupt and via-link for paravirtual platform driver. */
   39.30 +    uint32_t callback_via_asserted;
   39.31 +    union {
   39.32 +        enum {
   39.33 +            HVMIRQ_callback_none,
   39.34 +            HVMIRQ_callback_gsi,
   39.35 +            HVMIRQ_callback_pci_intx
   39.36 +        } callback_via_type;
   39.37 +        uint32_t pad; /* So the next field will be aligned */
   39.38 +    };
   39.39 +    union {
   39.40 +        uint32_t gsi;
   39.41 +        struct { uint8_t dev, intx; } pci;
   39.42 +    } callback_via;
   39.43 +
   39.44 +    /* Number of INTx wires asserting each PCI-ISA link. */
   39.45 +    u8 pci_link_assert_count[4];
   39.46 +
   39.47 +    /*
   39.48 +     * Number of wires asserting each GSI.
   39.49 +     * 
   39.50 +     * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space
   39.51 +     * except ISA IRQ 0, which is connected to GSI 2.
   39.52 +     * PCI links map into this space via the PCI-ISA bridge.
   39.53 +     * 
   39.54 +     * GSIs 16+ are used only be PCI devices. The mapping from PCI device to
   39.55 +     * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16
   39.56 +     */
   39.57 +    u8 gsi_assert_count[VIOAPIC_NUM_PINS];
   39.58 +
   39.59 +    /*
   39.60 +     * GSIs map onto PIC/IO-APIC in the usual way:
   39.61 +     *  0-7:  Master 8259 PIC, IO-APIC pins 0-7
   39.62 +     *  8-15: Slave  8259 PIC, IO-APIC pins 8-15
   39.63 +     *  16+ : IO-APIC pins 16+
   39.64 +     */
   39.65 +
   39.66 +    /* Last VCPU that was delivered a LowestPrio interrupt. */
   39.67 +    u8 round_robin_prev_vcpu;
   39.68 +};
   39.69 +
   39.70  #define hvm_pci_intx_gsi(dev, intx)  \
   39.71      (((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16)
   39.72  #define hvm_pci_intx_link(dev, intx) \
    40.1 --- a/xen/include/asm-x86/hvm/support.h	Wed Feb 07 10:14:41 2007 -0700
    40.2 +++ b/xen/include/asm-x86/hvm/support.h	Wed Feb 07 10:46:18 2007 -0700
    40.3 @@ -221,23 +221,37 @@ typedef int (*hvm_save_handler) (struct 
    40.4  typedef int (*hvm_load_handler) (struct domain *d,
    40.5                                   hvm_domain_context_t *h);
    40.6  
    40.7 -/* Init-time function to declare a pair of handlers for a type */
    40.8 +/* Init-time function to declare a pair of handlers for a type,
    40.9 + * and the maximum buffer space needed to save this type of state */
   40.10  void hvm_register_savevm(uint16_t typecode,
   40.11                           const char *name, 
   40.12                           hvm_save_handler save_state,
   40.13 -                         hvm_load_handler load_state);
   40.14 +                         hvm_load_handler load_state,
   40.15 +                         size_t size, int kind);
   40.16 +
   40.17 +/* The space needed for saving can be per-domain or per-vcpu: */
   40.18 +#define HVMSR_PER_DOM  0
   40.19 +#define HVMSR_PER_VCPU 1
   40.20  
   40.21 -/* Syntactic sugar around that function */
   40.22 -#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load)                     \
   40.23 -static int __hvm_register_##_x##_save_and_restore(void)                 \
   40.24 -{                                                                       \
   40.25 -    hvm_register_savevm(HVM_SAVE_CODE(_x), #_x, &_save, &_load);        \
   40.26 -    return 0;                                                           \
   40.27 -}                                                                       \
   40.28 +/* Syntactic sugar around that function: specify the max number of
   40.29 + * saves, and this calculates the size of buffer needed */
   40.30 +#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k)             \
   40.31 +static int __hvm_register_##_x##_save_and_restore(void)                   \
   40.32 +{                                                                         \
   40.33 +    hvm_register_savevm(HVM_SAVE_CODE(_x),                                \
   40.34 +                        #_x,                                              \
   40.35 +                        &_save,                                           \
   40.36 +                        &_load,                                           \
   40.37 +                        (_num) * (HVM_SAVE_LENGTH(_x)                     \
   40.38 +                                  + sizeof (struct hvm_save_descriptor)), \
   40.39 +                        _k);                                              \
   40.40 +    return 0;                                                             \
   40.41 +}                                                                         \
   40.42  __initcall(__hvm_register_##_x##_save_and_restore);
   40.43  
   40.44  
   40.45  /* Entry points for saving and restoring HVM domain state */
   40.46 +size_t hvm_save_size(struct domain *d);
   40.47  int hvm_save(struct domain *d, hvm_domain_context_t *h);
   40.48  int hvm_load(struct domain *d, hvm_domain_context_t *h);
   40.49  
    41.1 --- a/xen/include/asm-x86/hvm/vlapic.h	Wed Feb 07 10:14:41 2007 -0700
    41.2 +++ b/xen/include/asm-x86/hvm/vlapic.h	Wed Feb 07 10:46:18 2007 -0700
    41.3 @@ -78,6 +78,8 @@ int cpu_get_apic_interrupt(struct vcpu *
    41.4  int  vlapic_init(struct vcpu *v);
    41.5  void vlapic_destroy(struct vcpu *v);
    41.6  
    41.7 +void vlapic_reset(struct vlapic *vlapic);
    41.8 +
    41.9  void vlapic_msr_set(struct vlapic *vlapic, uint64_t value);
   41.10  
   41.11  int vlapic_accept_pic_intr(struct vcpu *v);
    42.1 --- a/xen/include/asm-x86/hvm/vpt.h	Wed Feb 07 10:14:41 2007 -0700
    42.2 +++ b/xen/include/asm-x86/hvm/vpt.h	Wed Feb 07 10:46:18 2007 -0700
    42.3 @@ -66,7 +66,7 @@ struct periodic_time {
    42.4      u64 last_plt_gtime;         /* platform time when last IRQ is injected */
    42.5      struct timer timer;         /* ac_timer */
    42.6      time_cb *cb;
    42.7 -    void *priv;                 /* ponit back to platform time source */
    42.8 +    void *priv;                 /* point back to platform time source */
    42.9  };
   42.10  
   42.11  
   42.12 @@ -76,6 +76,8 @@ struct periodic_time {
   42.13  typedef struct PITState {
   42.14      /* Hardware state */
   42.15      struct hvm_hw_pit hw;
   42.16 +    /* Last time the counters read zero, for calcuating counter reads */
   42.17 +    int64_t count_load_time[3];
   42.18      /* irq handling */
   42.19      struct periodic_time pt[3];
   42.20  } PITState;
    43.1 --- a/xen/include/public/domctl.h	Wed Feb 07 10:14:41 2007 -0700
    43.2 +++ b/xen/include/public/domctl.h	Wed Feb 07 10:46:18 2007 -0700
    43.3 @@ -390,7 +390,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_setti
    43.4  #define XEN_DOMCTL_sethvmcontext   34
    43.5  typedef struct xen_domctl_hvmcontext {
    43.6      uint32_t size; /* IN/OUT: size of buffer / bytes filled */
    43.7 -    XEN_GUEST_HANDLE(uint8_t) buffer; /* IN/OUT */
    43.8 +    XEN_GUEST_HANDLE(uint8_t) buffer; /* IN/OUT: data, or call gethvmcontext 
    43.9 +                                       * with NULL buffer to get size req'd */
   43.10  } xen_domctl_hvmcontext_t;
   43.11  DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
   43.12  
    44.1 --- a/xen/include/public/hvm/save.h	Wed Feb 07 10:14:41 2007 -0700
    44.2 +++ b/xen/include/public/hvm/save.h	Wed Feb 07 10:46:18 2007 -0700
    44.3 @@ -140,13 +140,10 @@ struct hvm_hw_cpu {
    44.4      uint64_t sysenter_esp;
    44.5      uint64_t sysenter_eip;
    44.6  
    44.7 -    /* msr for em64t */
    44.8 +    /* MSRs */
    44.9      uint64_t shadow_gs;
   44.10      uint64_t flags;
   44.11 -
   44.12 -    /* same size as VMX_MSR_COUNT */
   44.13      uint64_t msr_items[6];
   44.14 -    uint64_t vmxassist_enabled;
   44.15  
   44.16      /* guest's idea of what rdtsc() would return */
   44.17      uint64_t tsc;
   44.18 @@ -155,32 +152,6 @@ struct hvm_hw_cpu {
   44.19  DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu);
   44.20  
   44.21  
   44.22 -/* 
   44.23 - *  PIT
   44.24 - */
   44.25 -
   44.26 -struct hvm_hw_pit {
   44.27 -    struct hvm_hw_pit_channel {
   44.28 -        int64_t count_load_time;
   44.29 -        uint32_t count; /* can be 65536 */
   44.30 -        uint16_t latched_count;
   44.31 -        uint8_t count_latched;
   44.32 -        uint8_t status_latched;
   44.33 -        uint8_t status;
   44.34 -        uint8_t read_state;
   44.35 -        uint8_t write_state;
   44.36 -        uint8_t write_latch;
   44.37 -        uint8_t rw_mode;
   44.38 -        uint8_t mode;
   44.39 -        uint8_t bcd; /* not supported */
   44.40 -        uint8_t gate; /* timer start */
   44.41 -    } channels[3];  /* 3 x 24 bytes */
   44.42 -    uint32_t speaker_data_on;
   44.43 -};
   44.44 -
   44.45 -DECLARE_HVM_SAVE_TYPE(PIT, 3, struct hvm_hw_pit);
   44.46 -
   44.47 -
   44.48  /*
   44.49   * PIC
   44.50   */
   44.51 @@ -233,7 +204,7 @@ struct hvm_hw_vpic {
   44.52      uint8_t int_output;
   44.53  };
   44.54  
   44.55 -DECLARE_HVM_SAVE_TYPE(PIC, 4, struct hvm_hw_vpic);
   44.56 +DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
   44.57  
   44.58  
   44.59  /*
   44.60 @@ -275,76 +246,8 @@ struct hvm_hw_vioapic {
   44.61      } redirtbl[VIOAPIC_NUM_PINS];
   44.62  };
   44.63  
   44.64 -DECLARE_HVM_SAVE_TYPE(IOAPIC, 5, struct hvm_hw_vioapic);
   44.65 -
   44.66 -
   44.67 -/*
   44.68 - * IRQ
   44.69 - */
   44.70 -
   44.71 -struct hvm_hw_irq {
   44.72 -    /*
   44.73 -     * Virtual interrupt wires for a single PCI bus.
   44.74 -     * Indexed by: device*4 + INTx#.
   44.75 -     */
   44.76 -    DECLARE_BITMAP(pci_intx, 32*4);
   44.77 -
   44.78 -    /*
   44.79 -     * Virtual interrupt wires for ISA devices.
   44.80 -     * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
   44.81 -     */
   44.82 -    DECLARE_BITMAP(isa_irq, 16);
   44.83 -
   44.84 -    /* Virtual interrupt and via-link for paravirtual platform driver. */
   44.85 -    uint32_t callback_via_asserted;
   44.86 -    union {
   44.87 -        enum {
   44.88 -            HVMIRQ_callback_none,
   44.89 -            HVMIRQ_callback_gsi,
   44.90 -            HVMIRQ_callback_pci_intx
   44.91 -        } callback_via_type;
   44.92 -        uint32_t pad; /* So the next field will be aligned */
   44.93 -    };
   44.94 -    union {
   44.95 -        uint32_t gsi;
   44.96 -        struct { uint8_t dev, intx; } pci;
   44.97 -    } callback_via;
   44.98 +DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
   44.99  
  44.100 -    /*
  44.101 -     * PCI-ISA interrupt router.
  44.102 -     * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
  44.103 -     * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
  44.104 -     * The router provides a programmable mapping from each link to a GSI.
  44.105 -     */
  44.106 -    u8 pci_link_route[4];
  44.107 -
  44.108 -    /* Number of INTx wires asserting each PCI-ISA link. */
  44.109 -    u8 pci_link_assert_count[4];
  44.110 -
  44.111 -    /*
  44.112 -     * Number of wires asserting each GSI.
  44.113 -     * 
  44.114 -     * GSIs 0-15 are the ISA IRQs. ISA devices map directly into this space
  44.115 -     * except ISA IRQ 0, which is connected to GSI 2.
  44.116 -     * PCI links map into this space via the PCI-ISA bridge.
  44.117 -     * 
  44.118 -     * GSIs 16+ are used only be PCI devices. The mapping from PCI device to
  44.119 -     * GSI is as follows: ((device*4 + device/8 + INTx#) & 31) + 16
  44.120 -     */
  44.121 -    u8 gsi_assert_count[VIOAPIC_NUM_PINS];
  44.122 -
  44.123 -    /*
  44.124 -     * GSIs map onto PIC/IO-APIC in the usual way:
  44.125 -     *  0-7:  Master 8259 PIC, IO-APIC pins 0-7
  44.126 -     *  8-15: Slave  8259 PIC, IO-APIC pins 8-15
  44.127 -     *  16+ : IO-APIC pins 16+
  44.128 -     */
  44.129 -
  44.130 -    /* Last VCPU that was delivered a LowestPrio interrupt. */
  44.131 -    u8 round_robin_prev_vcpu;
  44.132 -};
  44.133 -
  44.134 -DECLARE_HVM_SAVE_TYPE(IRQ, 6, struct hvm_hw_irq);
  44.135  
  44.136  /*
  44.137   * LAPIC
  44.138 @@ -356,14 +259,82 @@ struct hvm_hw_lapic {
  44.139      uint32_t             timer_divisor;
  44.140  };
  44.141  
  44.142 -DECLARE_HVM_SAVE_TYPE(LAPIC, 7, struct hvm_hw_lapic);
  44.143 +DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
  44.144  
  44.145  struct hvm_hw_lapic_regs {
  44.146      /* A 4k page of register state */
  44.147      uint8_t  data[0x400];
  44.148  };
  44.149  
  44.150 -DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 8, struct hvm_hw_lapic_regs);
  44.151 +DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
  44.152 +
  44.153 +
  44.154 +/*
  44.155 + * IRQs
  44.156 + */
  44.157 +
  44.158 +struct hvm_hw_pci_irqs {
  44.159 +    /*
  44.160 +     * Virtual interrupt wires for a single PCI bus.
  44.161 +     * Indexed by: device*4 + INTx#.
  44.162 +     */
  44.163 +    union {
  44.164 +        DECLARE_BITMAP(i, 32*4);
  44.165 +        uint64_t pad[2];
  44.166 +    };
  44.167 +};
  44.168 +
  44.169 +DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
  44.170 +
  44.171 +struct hvm_hw_isa_irqs {
  44.172 +    /*
  44.173 +     * Virtual interrupt wires for ISA devices.
  44.174 +     * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
  44.175 +     */
  44.176 +    union {
  44.177 +        DECLARE_BITMAP(i, 16);
  44.178 +        uint64_t pad[1];
  44.179 +    };
  44.180 +};
  44.181 +
  44.182 +DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
  44.183 +
  44.184 +struct hvm_hw_pci_link {
  44.185 +    /*
  44.186 +     * PCI-ISA interrupt router.
  44.187 +     * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
  44.188 +     * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
  44.189 +     * The router provides a programmable mapping from each link to a GSI.
  44.190 +     */
  44.191 +    u8 route[4];
  44.192 +};
  44.193 +
  44.194 +DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
  44.195 +
  44.196 +
  44.197 +/* 
  44.198 + *  PIT
  44.199 + */
  44.200 +
  44.201 +struct hvm_hw_pit {
  44.202 +    struct hvm_hw_pit_channel {
  44.203 +        uint32_t count; /* can be 65536 */
  44.204 +        uint16_t latched_count;
  44.205 +        uint8_t count_latched;
  44.206 +        uint8_t status_latched;
  44.207 +        uint8_t status;
  44.208 +        uint8_t read_state;
  44.209 +        uint8_t write_state;
  44.210 +        uint8_t write_latch;
  44.211 +        uint8_t rw_mode;
  44.212 +        uint8_t mode;
  44.213 +        uint8_t bcd; /* not supported */
  44.214 +        uint8_t gate; /* timer start */
  44.215 +    } channels[3];  /* 3 x 16 bytes */
  44.216 +    uint32_t speaker_data_on;
  44.217 +};
  44.218 +
  44.219 +DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
  44.220  
  44.221  
  44.222  /* 
  44.223 @@ -378,7 +349,7 @@ struct hvm_hw_rtc {
  44.224      uint8_t cmos_index;
  44.225  };
  44.226  
  44.227 -DECLARE_HVM_SAVE_TYPE(RTC, 9, struct hvm_hw_rtc);
  44.228 +DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
  44.229  
  44.230  
  44.231  /*
  44.232 @@ -408,13 +379,13 @@ struct hvm_hw_hpet {
  44.233      uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
  44.234  };
  44.235  
  44.236 -DECLARE_HVM_SAVE_TYPE(HPET, 10, struct hvm_hw_hpet);
  44.237 +DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
  44.238  
  44.239  
  44.240  /* 
  44.241   * Largest type-code in use
  44.242   */
  44.243 -#define HVM_SAVE_CODE_MAX 10
  44.244 +#define HVM_SAVE_CODE_MAX 12
  44.245  
  44.246  
  44.247  /*