ia64/xen-unstable

changeset 12444:f78e499dd669

merge with xen-unstable.hg
author awilliam@xenbuild.aw
date Tue Nov 14 14:59:37 2006 -0700 (2006-11-14)
parents c10d4c6df482 f026d4091322
children 05d227d81935 fb107b9eee86
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c	Tue Nov 14 12:46:33 2006 -0700
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c	Tue Nov 14 14:59:37 2006 -0700
     1.3 @@ -249,7 +249,7 @@ void __iomem * __ioremap(unsigned long p
     1.4  		return NULL;
     1.5  	area->phys_addr = phys_addr;
     1.6  	addr = (void __iomem *) area->addr;
     1.7 -	flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
     1.8 +	flags |= _KERNPG_TABLE;
     1.9  	if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
    1.10  				     phys_addr>>PAGE_SHIFT,
    1.11  				     size, __pgprot(flags), domid)) {
     2.1 --- a/linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c	Tue Nov 14 12:46:33 2006 -0700
     2.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c	Tue Nov 14 14:59:37 2006 -0700
     2.3 @@ -56,6 +56,11 @@
     2.4  struct dma_mapping_ops* dma_ops;
     2.5  EXPORT_SYMBOL(dma_ops);
     2.6  
     2.7 +#ifdef CONFIG_XEN_COMPAT_030002
     2.8 +unsigned int __kernel_page_user;
     2.9 +EXPORT_SYMBOL(__kernel_page_user);
    2.10 +#endif
    2.11 +
    2.12  extern unsigned long *contiguous_bitmap;
    2.13  
    2.14  static unsigned long dma_reserve __initdata;
    2.15 @@ -527,6 +532,33 @@ void __init xen_init_pt(void)
    2.16  	addr = page[pud_index(__START_KERNEL_map)];
    2.17  	addr_to_page(addr, page);
    2.18  
    2.19 +#ifdef CONFIG_XEN_COMPAT_030002
    2.20 +	/* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
    2.21 +	   in kernel PTEs. We check that here. */
    2.22 +	if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
    2.23 +		unsigned long *pg;
    2.24 +		pte_t pte;
    2.25 +
    2.26 +		/* Mess with the initial mapping of page 0. It's not needed. */
    2.27 +		BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
    2.28 +		addr = page[pmd_index(__START_KERNEL_map)];
    2.29 +		addr_to_page(addr, pg);
    2.30 +		pte.pte = pg[pte_index(__START_KERNEL_map)];
    2.31 +		BUG_ON(!(pte.pte & _PAGE_PRESENT));
    2.32 +
    2.33 +		/* If _PAGE_USER isn't set, we obviously do not need it. */
    2.34 +		if (pte.pte & _PAGE_USER) {
    2.35 +			/* _PAGE_USER is needed, but is it set implicitly? */
    2.36 +			pte.pte &= ~_PAGE_USER;
    2.37 +			if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
    2.38 +							  pte, 0) != 0) ||
    2.39 +			    !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
    2.40 +				/* We need to explicitly specify _PAGE_USER. */
    2.41 +				__kernel_page_user = _PAGE_USER;
    2.42 +		}
    2.43 +	}
    2.44 +#endif
    2.45 +
    2.46  	/* Construct mapping of initial pte page in our own directories. */
    2.47  	init_level4_pgt[pgd_index(__START_KERNEL_map)] = 
    2.48  		mk_kernel_pgd(__pa_symbol(level3_kernel_pgt));
     3.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/maddr.h	Tue Nov 14 12:46:33 2006 -0700
     3.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/maddr.h	Tue Nov 14 14:59:37 2006 -0700
     3.3 @@ -127,6 +127,7 @@ static inline maddr_t phys_to_machine(pa
     3.4  	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
     3.5  	return machine;
     3.6  }
     3.7 +
     3.8  static inline paddr_t machine_to_phys(maddr_t machine)
     3.9  {
    3.10  	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
    3.11 @@ -134,6 +135,19 @@ static inline paddr_t machine_to_phys(ma
    3.12  	return phys;
    3.13  }
    3.14  
    3.15 +static inline paddr_t pte_machine_to_phys(maddr_t machine)
    3.16 +{
    3.17 +	/*
    3.18 +	 * In PAE mode, the NX bit needs to be dealt with in the value
    3.19 +	 * passed to mfn_to_pfn(). On x86_64, we need to mask it off,
    3.20 +	 * but for i386 the conversion to ulong for the argument will
    3.21 +	 * clip it off.
    3.22 +	 */
    3.23 +	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
    3.24 +	phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
    3.25 +	return phys;
    3.26 +}
    3.27 +
    3.28  /* VIRT <-> MACHINE conversion */
    3.29  #define virt_to_machine(v)	(phys_to_machine(__pa(v)))
    3.30  #define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
     4.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h	Tue Nov 14 12:46:33 2006 -0700
     4.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/page.h	Tue Nov 14 14:59:37 2006 -0700
     4.3 @@ -6,6 +6,16 @@
     4.4  #define PAGE_SIZE	(1UL << PAGE_SHIFT)
     4.5  #define PAGE_MASK	(~(PAGE_SIZE-1))
     4.6  
     4.7 +#ifdef CONFIG_X86_PAE
     4.8 +#define __PHYSICAL_MASK_SHIFT	36
     4.9 +#define __PHYSICAL_MASK		((1ULL << __PHYSICAL_MASK_SHIFT) - 1)
    4.10 +#define PHYSICAL_PAGE_MASK	(~((1ULL << PAGE_SHIFT) - 1) & __PHYSICAL_MASK)
    4.11 +#else
    4.12 +#define __PHYSICAL_MASK_SHIFT	32
    4.13 +#define __PHYSICAL_MASK		(~0UL)
    4.14 +#define PHYSICAL_PAGE_MASK	(PAGE_MASK & __PHYSICAL_MASK)
    4.15 +#endif
    4.16 +
    4.17  #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
    4.18  #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
    4.19  
    4.20 @@ -85,7 +95,7 @@ static inline unsigned long long pte_val
    4.21  
    4.22  	if (x.pte_low) {
    4.23  		ret = x.pte_low | (unsigned long long)x.pte_high << 32;
    4.24 -		ret = machine_to_phys(ret) | 1;
    4.25 +		ret = pte_machine_to_phys(ret) | 1;
    4.26  	} else {
    4.27  		ret = 0;
    4.28  	}
    4.29 @@ -94,13 +104,13 @@ static inline unsigned long long pte_val
    4.30  static inline unsigned long long pmd_val(pmd_t x)
    4.31  {
    4.32  	unsigned long long ret = x.pmd;
    4.33 -	if (ret) ret = machine_to_phys(ret) | 1;
    4.34 +	if (ret) ret = pte_machine_to_phys(ret) | 1;
    4.35  	return ret;
    4.36  }
    4.37  static inline unsigned long long pgd_val(pgd_t x)
    4.38  {
    4.39  	unsigned long long ret = x.pgd;
    4.40 -	if (ret) ret = machine_to_phys(ret) | 1;
    4.41 +	if (ret) ret = pte_machine_to_phys(ret) | 1;
    4.42  	return ret;
    4.43  }
    4.44  static inline unsigned long long pte_val_ma(pte_t x)
    4.45 @@ -115,7 +125,8 @@ typedef struct { unsigned long pgprot; }
    4.46  #define pgprot_val(x)	((x).pgprot)
    4.47  #include <asm/maddr.h>
    4.48  #define boot_pte_t pte_t /* or would you rather have a typedef */
    4.49 -#define pte_val(x)	(((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
    4.50 +#define pte_val(x)	(((x).pte_low & 1) ? \
    4.51 +			 pte_machine_to_phys((x).pte_low) : \
    4.52  			 (x).pte_low)
    4.53  #define pte_val_ma(x)	((x).pte_low)
    4.54  #define __pte(x) ({ unsigned long _x = (x); \
    4.55 @@ -125,7 +136,7 @@ typedef struct { unsigned long pgprot; }
    4.56  static inline unsigned long pgd_val(pgd_t x)
    4.57  {
    4.58  	unsigned long ret = x.pgd;
    4.59 -	if (ret) ret = machine_to_phys(ret) | 1;
    4.60 +	if (ret) ret = pte_machine_to_phys(ret) | 1;
    4.61  	return ret;
    4.62  }
    4.63  #define HPAGE_SHIFT	22
     5.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/maddr.h	Tue Nov 14 12:46:33 2006 -0700
     5.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/maddr.h	Tue Nov 14 14:59:37 2006 -0700
     5.3 @@ -127,6 +127,14 @@ static inline paddr_t machine_to_phys(ma
     5.4  	return phys;
     5.5  }
     5.6  
     5.7 +static inline paddr_t pte_machine_to_phys(maddr_t machine)
     5.8 +{
     5.9 +	paddr_t phys;
    5.10 +	phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
    5.11 +	phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
    5.12 +	return phys;
    5.13 +}
    5.14 +
    5.15  /* VIRT <-> MACHINE conversion */
    5.16  #define virt_to_machine(v)	(phys_to_machine(__pa(v)))
    5.17  #define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
     6.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/page.h	Tue Nov 14 12:46:33 2006 -0700
     6.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/page.h	Tue Nov 14 14:59:37 2006 -0700
     6.3 @@ -33,6 +33,13 @@
     6.4  #define PAGE_SIZE	(1UL << PAGE_SHIFT)
     6.5  #endif
     6.6  #define PAGE_MASK	(~(PAGE_SIZE-1))
     6.7 +
     6.8 +/* See Documentation/x86_64/mm.txt for a description of the memory map. */
     6.9 +#define __PHYSICAL_MASK_SHIFT	46
    6.10 +#define __PHYSICAL_MASK		((1UL << __PHYSICAL_MASK_SHIFT) - 1)
    6.11 +#define __VIRTUAL_MASK_SHIFT	48
    6.12 +#define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
    6.13 +
    6.14  #define PHYSICAL_PAGE_MASK	(~(PAGE_SIZE-1) & __PHYSICAL_MASK)
    6.15  
    6.16  #define THREAD_ORDER 1 
    6.17 @@ -90,28 +97,28 @@ typedef struct { unsigned long pgd; } pg
    6.18  
    6.19  typedef struct { unsigned long pgprot; } pgprot_t;
    6.20  
    6.21 -#define pte_val(x)	(((x).pte & 1) ? machine_to_phys((x).pte) : \
    6.22 +#define pte_val(x)	(((x).pte & 1) ? pte_machine_to_phys((x).pte) : \
    6.23  			 (x).pte)
    6.24  #define pte_val_ma(x)	((x).pte)
    6.25  
    6.26  static inline unsigned long pmd_val(pmd_t x)
    6.27  {
    6.28  	unsigned long ret = x.pmd;
    6.29 -	if (ret) ret = machine_to_phys(ret);
    6.30 +	if (ret) ret = pte_machine_to_phys(ret);
    6.31  	return ret;
    6.32  }
    6.33  
    6.34  static inline unsigned long pud_val(pud_t x)
    6.35  {
    6.36  	unsigned long ret = x.pud;
    6.37 -	if (ret) ret = machine_to_phys(ret);
    6.38 +	if (ret) ret = pte_machine_to_phys(ret);
    6.39  	return ret;
    6.40  }
    6.41  
    6.42  static inline unsigned long pgd_val(pgd_t x)
    6.43  {
    6.44  	unsigned long ret = x.pgd;
    6.45 -	if (ret) ret = machine_to_phys(ret);
    6.46 +	if (ret) ret = pte_machine_to_phys(ret);
    6.47  	return ret;
    6.48  }
    6.49  
    6.50 @@ -163,12 +170,6 @@ static inline pgd_t __pgd(unsigned long 
    6.51  /* to align the pointer to the (next) page boundary */
    6.52  #define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
    6.53  
    6.54 -/* See Documentation/x86_64/mm.txt for a description of the memory map. */
    6.55 -#define __PHYSICAL_MASK_SHIFT	46
    6.56 -#define __PHYSICAL_MASK		((1UL << __PHYSICAL_MASK_SHIFT) - 1)
    6.57 -#define __VIRTUAL_MASK_SHIFT	48
    6.58 -#define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
    6.59 -
    6.60  #define KERNEL_TEXT_SIZE  (40UL*1024*1024)
    6.61  #define KERNEL_TEXT_START 0xffffffff80000000UL 
    6.62  
     7.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgtable.h	Tue Nov 14 12:46:33 2006 -0700
     7.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgtable.h	Tue Nov 14 14:59:37 2006 -0700
     7.3 @@ -205,8 +205,14 @@ static inline pte_t ptep_get_and_clear_f
     7.4  #define _PAGE_PROTNONE	0x080	/* If not present */
     7.5  #define _PAGE_NX        (1UL<<_PAGE_BIT_NX)
     7.6  
     7.7 +#ifdef CONFIG_XEN_COMPAT_030002
     7.8 +extern unsigned int __kernel_page_user;
     7.9 +#else
    7.10 +#define __kernel_page_user 0
    7.11 +#endif
    7.12 +
    7.13  #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
    7.14 -#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
    7.15 +#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
    7.16  
    7.17  #define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
    7.18  
    7.19 @@ -219,13 +225,13 @@ static inline pte_t ptep_get_and_clear_f
    7.20  #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
    7.21  #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
    7.22  #define __PAGE_KERNEL \
    7.23 -	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
    7.24 +	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
    7.25  #define __PAGE_KERNEL_EXEC \
    7.26 -	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
    7.27 +	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
    7.28  #define __PAGE_KERNEL_NOCACHE \
    7.29 -	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
    7.30 +	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
    7.31  #define __PAGE_KERNEL_RO \
    7.32 -	(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
    7.33 +	(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
    7.34  #define __PAGE_KERNEL_VSYSCALL \
    7.35  	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
    7.36  #define __PAGE_KERNEL_VSYSCALL_NOCACHE \
    7.37 @@ -422,7 +428,8 @@ static inline pud_t *pud_offset_k(pgd_t 
    7.38     can temporarily clear it. */
    7.39  #define pmd_present(x)	(pmd_val(x))
    7.40  #define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
    7.41 -#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
    7.42 +#define pmd_bad(x) ((pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
    7.43 +		    != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
    7.44  #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
    7.45  #define pmd_pfn(x)  ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
    7.46  
     8.1 --- a/tools/firmware/vmxassist/vm86.c	Tue Nov 14 12:46:33 2006 -0700
     8.2 +++ b/tools/firmware/vmxassist/vm86.c	Tue Nov 14 14:59:37 2006 -0700
     8.3 @@ -816,9 +816,7 @@ pop(struct regs *regs, unsigned prefix, 
     8.4  static int
     8.5  mov_to_seg(struct regs *regs, unsigned prefix, unsigned opc)
     8.6  {
     8.7 -	unsigned eip = regs->eip - 1;
     8.8  	unsigned modrm = fetch8(regs);
     8.9 -	unsigned addr = operand(prefix, regs, modrm);
    8.10  
    8.11  	/* Only need to emulate segment loads in real->protected mode. */
    8.12  	if (mode != VM86_REAL_TO_PROTECTED)
     9.1 --- a/tools/ioemu/vl.c	Tue Nov 14 12:46:33 2006 -0700
     9.2 +++ b/tools/ioemu/vl.c	Tue Nov 14 14:59:37 2006 -0700
     9.3 @@ -6489,9 +6489,9 @@ int main(int argc, char **argv)
     9.4      }
     9.5  
     9.6      if (ram_size > MMIO_START) {	
     9.7 -        for (i = 0 ; i < MEM_G >> PAGE_SHIFT; i++)
     9.8 -            page_array[MMIO_START >> PAGE_SHIFT + i] =
     9.9 -                page_array[IO_PAGE_START >> PAGE_SHIFT + 1];
    9.10 +        for (i = 0 ; i < (MEM_G >> PAGE_SHIFT); i++)
    9.11 +            page_array[(MMIO_START >> PAGE_SHIFT) + i] =
    9.12 +                page_array[(IO_PAGE_START >> PAGE_SHIFT) + 1];
    9.13      }
    9.14  
    9.15      phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
    10.1 --- a/tools/misc/xenperf.c	Tue Nov 14 12:46:33 2006 -0700
    10.2 +++ b/tools/misc/xenperf.c	Tue Nov 14 14:59:37 2006 -0700
    10.3 @@ -10,7 +10,6 @@
    10.4   * Description: 
    10.5   */
    10.6  
    10.7 -
    10.8  #include <xenctrl.h>
    10.9  #include <stdio.h>
   10.10  #include <stdlib.h>
   10.11 @@ -18,6 +17,57 @@
   10.12  #include <errno.h>
   10.13  #include <string.h>
   10.14  
   10.15 +#define X(name) [__HYPERVISOR_##name] = #name
   10.16 +const char *hypercall_name_table[64] =
   10.17 +{
   10.18 +    X(set_trap_table),
   10.19 +    X(mmu_update),
   10.20 +    X(set_gdt),
   10.21 +    X(stack_switch),
   10.22 +    X(set_callbacks),
   10.23 +    X(fpu_taskswitch),
   10.24 +    X(sched_op_compat),
   10.25 +    X(platform_op),
   10.26 +    X(set_debugreg),
   10.27 +    X(get_debugreg),
   10.28 +    X(update_descriptor),
   10.29 +    X(memory_op),
   10.30 +    X(multicall),
   10.31 +    X(update_va_mapping),
   10.32 +    X(set_timer_op),
   10.33 +    X(event_channel_op_compat),
   10.34 +    X(xen_version),
   10.35 +    X(console_io),
   10.36 +    X(physdev_op_compat),
   10.37 +    X(grant_table_op),
   10.38 +    X(vm_assist),
   10.39 +    X(update_va_mapping_otherdomain),
   10.40 +    X(iret),
   10.41 +    X(vcpu_op),
   10.42 +    X(set_segment_base),
   10.43 +    X(mmuext_op),
   10.44 +    X(acm_op),
   10.45 +    X(nmi_op),
   10.46 +    X(sched_op),
   10.47 +    X(callback_op),
   10.48 +    X(xenoprof_op),
   10.49 +    X(event_channel_op),
   10.50 +    X(physdev_op),
   10.51 +    X(hvm_op),
   10.52 +    X(sysctl),
   10.53 +    X(domctl),
   10.54 +    X(kexec_op),
   10.55 +    X(arch_0),
   10.56 +    X(arch_1),
   10.57 +    X(arch_2),
   10.58 +    X(arch_3),
   10.59 +    X(arch_4),
   10.60 +    X(arch_5),
   10.61 +    X(arch_6),
   10.62 +    X(arch_7),
   10.63 +};
   10.64 +#undef X
   10.65 +
   10.66  int lock_pages(void *addr, size_t len)
   10.67  {
   10.68      int e = 0;
   10.69 @@ -30,7 +80,7 @@ int lock_pages(void *addr, size_t len)
   10.70  void unlock_pages(void *addr, size_t len)
   10.71  {
   10.72  #ifndef __sun__
   10.73 -	munlock(addr, len);
   10.74 +    munlock(addr, len);
   10.75  #endif
   10.76  }
   10.77  
   10.78 @@ -38,10 +88,11 @@ int main(int argc, char *argv[])
   10.79  {
   10.80      int              i, j, xc_handle;
   10.81      xc_perfc_desc_t *pcd;
   10.82 -	xc_perfc_val_t  *pcv;
   10.83 -	xc_perfc_val_t  *val;
   10.84 -	int num_desc, num_val;
   10.85 -    unsigned int    sum, reset = 0, full = 0;
   10.86 +    xc_perfc_val_t  *pcv;
   10.87 +    xc_perfc_val_t  *val;
   10.88 +    int num_desc, num_val;
   10.89 +    unsigned int    sum, reset = 0, full = 0, pretty = 0;
   10.90 +    char hypercall_name[36];
   10.91  
   10.92      if ( argc > 1 )
   10.93      {
   10.94 @@ -53,6 +104,10 @@ int main(int argc, char *argv[])
   10.95              case 'f':
   10.96                  full = 1;
   10.97                  break;
   10.98 +            case 'p':
   10.99 +                full = 1;
  10.100 +                pretty = 1;
  10.101 +                break;
  10.102              case 'r':
  10.103                  reset = 1;
  10.104                  break;
  10.105 @@ -66,6 +121,7 @@ int main(int argc, char *argv[])
  10.106              printf("%s: [-r]\n", argv[0]);
  10.107              printf("no args: print digested counters\n");
  10.108              printf("    -f : print full arrays/histograms\n");
  10.109 +            printf("    -p : print full arrays/histograms in pretty format\n");
  10.110              printf("    -r : reset counters\n");
  10.111              return 0;
  10.112          }
  10.113 @@ -91,21 +147,21 @@ int main(int argc, char *argv[])
  10.114          return 0;
  10.115      }
  10.116  
  10.117 -	if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_query,
  10.118 -						  NULL, NULL, &num_desc, &num_val) != 0 )
  10.119 -        {
  10.120 -            fprintf(stderr, "Error getting number of perf counters: %d (%s)\n",
  10.121 -                    errno, strerror(errno));
  10.122 -            return 1;
  10.123 -        }
  10.124 +    if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_query,
  10.125 +                          NULL, NULL, &num_desc, &num_val) != 0 )
  10.126 +    {
  10.127 +        fprintf(stderr, "Error getting number of perf counters: %d (%s)\n",
  10.128 +                errno, strerror(errno));
  10.129 +        return 1;
  10.130 +    }
  10.131  
  10.132      pcd = malloc(sizeof(*pcd) * num_desc);
  10.133 -	pcv = malloc(sizeof(*pcv) * num_val);
  10.134 +    pcv = malloc(sizeof(*pcv) * num_val);
  10.135  
  10.136      if ( pcd == NULL
  10.137 -		 || lock_pages(pcd, sizeof(*pcd) * num_desc) != 0
  10.138 -		 || pcv == NULL
  10.139 -		 || lock_pages(pcd, sizeof(*pcv) * num_val) != 0)
  10.140 +         || lock_pages(pcd, sizeof(*pcd) * num_desc) != 0
  10.141 +         || pcv == NULL
  10.142 +         || lock_pages(pcd, sizeof(*pcv) * num_val) != 0)
  10.143      {
  10.144          fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n",
  10.145                  errno, strerror(errno));
  10.146 @@ -113,7 +169,7 @@ int main(int argc, char *argv[])
  10.147      }
  10.148  
  10.149      if ( xc_perfc_control(xc_handle, XEN_SYSCTL_PERFCOP_query,
  10.150 -						  pcd, pcv, NULL, NULL) != 0 )
  10.151 +                          pcd, pcv, NULL, NULL) != 0 )
  10.152      {
  10.153          fprintf(stderr, "Error getting perf counter: %d (%s)\n",
  10.154                  errno, strerror(errno));
  10.155 @@ -123,7 +179,7 @@ int main(int argc, char *argv[])
  10.156      unlock_pages(pcd, sizeof(*pcd) * num_desc);
  10.157      unlock_pages(pcv, sizeof(*pcv) * num_val);
  10.158  
  10.159 -	val = pcv;
  10.160 +    val = pcv;
  10.161      for ( i = 0; i < num_desc; i++ )
  10.162      {
  10.163          printf ("%-35s ", pcd[i].name);
  10.164 @@ -134,11 +190,37 @@ int main(int argc, char *argv[])
  10.165          printf ("T=%10u ", (unsigned int)sum);
  10.166  
  10.167          if ( full || (pcd[i].nr_vals <= 4) )
  10.168 -            for ( j = 0; j < pcd[i].nr_vals; j++ )
  10.169 -                printf(" %10u", (unsigned int)val[j]);
  10.170 +        {
  10.171 +            if ( pretty && (strcmp(pcd[i].name, "hypercalls") == 0) )
  10.172 +            {
  10.173 +                printf("\n");
  10.174 +                for( j = 0; j < pcd[i].nr_vals; j++ )
  10.175 +                {
  10.176 +                    if ( val[j] == 0 )
  10.177 +                        continue;
  10.178 +                    if ( (j < 64) && hypercall_name_table[j] )
  10.179 +                        strncpy(hypercall_name, hypercall_name_table[j],
  10.180 +                                sizeof(hypercall_name));
  10.181 +                    else
  10.182 +                        sprintf(hypercall_name, "[%d]", j);
  10.183 +                    hypercall_name[sizeof(hypercall_name)-1]='\0';
  10.184 +                    printf("%-35s ", hypercall_name);
  10.185 +                    printf("%12u\n", (unsigned int)val[j]);
  10.186 +                }
  10.187 +            }
  10.188 +            else
  10.189 +            {
  10.190 +                for ( j = 0; j < pcd[i].nr_vals; j++ )
  10.191 +                    printf(" %10u", (unsigned int)val[j]);
  10.192 +                printf("\n");
  10.193 +            }
  10.194 +        }
  10.195 +        else
  10.196 +        {
  10.197 +            printf("\n");
  10.198 +        }
  10.199  
  10.200 -        printf("\n");
  10.201 -		val += pcd[i].nr_vals;
  10.202 +        val += pcd[i].nr_vals;
  10.203      }
  10.204  
  10.205      return 0;
    11.1 --- a/tools/python/xen/xend/XendAPI.py	Tue Nov 14 12:46:33 2006 -0700
    11.2 +++ b/tools/python/xen/xend/XendAPI.py	Tue Nov 14 14:59:37 2006 -0700
    11.3 @@ -481,7 +481,8 @@ class XendAPI:
    11.4      def host_get_record(self, session, host_ref):
    11.5          node = XendNode.instance()
    11.6          dom = XendDomain.instance()
    11.7 -        record = {'name_label': node.name,
    11.8 +        record = {'uuid': node.uuid,
    11.9 +                  'name_label': node.name,
   11.10                    'name_description': '',
   11.11                    'software_version': node.xen_version(),
   11.12                    'resident_VMs': dom.get_domain_refs(),
    12.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Tue Nov 14 12:46:33 2006 -0700
    12.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Tue Nov 14 14:59:37 2006 -0700
    12.3 @@ -520,7 +520,7 @@ class XendDomainInfo:
    12.4          self._waitForDevice(dev_type, devid)
    12.5          return self.getDeviceController(dev_type).sxpr(devid)
    12.6  
    12.7 -    def device_configure(self, dev_config, devid):
    12.8 +    def device_configure(self, dev_config, devid = None):
    12.9          """Configure an existing device.
   12.10          
   12.11          @param dev_config: device configuration
    13.1 --- a/tools/python/xen/xend/XendStorageRepository.py	Tue Nov 14 12:46:33 2006 -0700
    13.2 +++ b/tools/python/xen/xend/XendStorageRepository.py	Tue Nov 14 14:59:37 2006 -0700
    13.3 @@ -31,10 +31,8 @@ XEND_STORAGE_MAX_IGNORE = -1
    13.4  XEND_STORAGE_DIR = "/var/lib/xend/storage/"
    13.5  XEND_STORAGE_QCOW_FILENAME = "%s.qcow"
    13.6  XEND_STORAGE_VDICFG_FILENAME = "%s.vdi.xml"
    13.7 -DF_COMMAND = "df -lPk"
    13.8  QCOW_CREATE_COMMAND = "/usr/sbin/qcow-create %d %s"
    13.9  
   13.10 -KB = 1024
   13.11  MB = 1024 *1024
   13.12  
   13.13  class DeviceInvalidError(Exception):
   13.14 @@ -151,23 +149,6 @@ class XendStorageRepository:
   13.15          finally:
   13.16              self.lock.release()
   13.17  
   13.18 -    def _get_df(self):
   13.19 -        """Returns the output of 'df' in a dictionary where the keys
   13.20 -        are the Linux device numbers, and the values are it's corresponding
   13.21 -        free space in bytes
   13.22 -
   13.23 -        @rtype: dictionary
   13.24 -        """
   13.25 -        df = commands.getoutput(DF_COMMAND)
   13.26 -        devnum_free = {}
   13.27 -        for line in df.split('\n')[1:]:
   13.28 -            words = line.split()
   13.29 -            mount_point = words[-1]
   13.30 -            dev_no = os.stat(mount_point).st_dev
   13.31 -            free_kb = int(words[3])
   13.32 -            devnum_free[dev_no] = free_kb * KB
   13.33 -        return devnum_free
   13.34 -
   13.35      def _get_free_space(self):
   13.36          """Returns the amount of free space in bytes available in the storage
   13.37          partition. Note that this may not be used if the storage repository
   13.38 @@ -175,12 +156,8 @@ class XendStorageRepository:
   13.39  
   13.40          @rtype: int
   13.41          """
   13.42 -        df = self._get_df()
   13.43 -        devnum = os.stat(self.storage_dir).st_dev
   13.44 -        if df.has_key(devnum):
   13.45 -            return df[devnum]
   13.46 -        raise DeviceInvalidError("Device not found for storage path: %s" %
   13.47 -                                 self.storage_dir)
   13.48 +        stfs = os.statvfs(self.storage_dir)
   13.49 +        return stfs.f_bavail * stfs.f_frsize
   13.50  
   13.51      def _has_space_available_for(self, size_bytes):
   13.52          """Returns whether there is enough space for an image in the
    14.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Nov 14 12:46:33 2006 -0700
    14.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue Nov 14 14:59:37 2006 -0700
    14.3 @@ -990,7 +990,7 @@ static void svm_vmexit_do_cpuid(struct v
    14.4          cpuid(input, &eax, &ebx, &ecx, &edx);       
    14.5          if (input == 0x00000001 || input == 0x80000001 )
    14.6          {
    14.7 -            if ( !vlapic_global_enabled(vcpu_vlapic(v)) )
    14.8 +            if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
    14.9              {
   14.10                  /* Since the apic is disabled, avoid any confusion 
   14.11                     about SMP cpus being available */
    15.1 --- a/xen/arch/x86/hvm/vlapic.c	Tue Nov 14 12:46:33 2006 -0700
    15.2 +++ b/xen/arch/x86/hvm/vlapic.c	Tue Nov 14 14:59:37 2006 -0700
    15.3 @@ -71,18 +71,23 @@ static unsigned int vlapic_lvt_mask[VLAP
    15.4  #define APIC_DEST_NOSHORT                0x0
    15.5  #define APIC_DEST_MASK                   0x800
    15.6  
    15.7 -#define vlapic_lvt_enabled(vlapic, lvt_type)    \
    15.8 +#define vlapic_lvt_enabled(vlapic, lvt_type)                    \
    15.9      (!(vlapic_get_reg(vlapic, lvt_type) & APIC_LVT_MASKED))
   15.10  
   15.11 -#define vlapic_lvt_vector(vlapic, lvt_type)     \
   15.12 +#define vlapic_lvt_vector(vlapic, lvt_type)                     \
   15.13      (vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK)
   15.14  
   15.15 -#define vlapic_lvt_dm(vlapic, lvt_type)           \
   15.16 +#define vlapic_lvt_dm(vlapic, lvt_type)                         \
   15.17      (vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK)
   15.18  
   15.19 -#define vlapic_lvtt_period(vlapic)     \
   15.20 +#define vlapic_lvtt_period(vlapic)                              \
   15.21      (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC)
   15.22  
   15.23 +#define vlapic_base_address(vlapic)                             \
   15.24 +    (vlapic->apic_base_msr & MSR_IA32_APICBASE_BASE)
   15.25 +
   15.26 +static int vlapic_reset(struct vlapic *vlapic);
   15.27 +
   15.28  /*
   15.29   * Generic APIC bitmap vector update & search routines.
   15.30   */
   15.31 @@ -238,8 +243,7 @@ static int vlapic_match_dest(struct vcpu
   15.32          if ( dest_mode == 0 )
   15.33          {
   15.34              /* Physical mode. */
   15.35 -            if ( (dest == 0xFF) || /* broadcast? */
   15.36 -                 (GET_APIC_ID(vlapic_get_reg(target, APIC_ID)) == dest) )
   15.37 +            if ( (dest == 0xFF) || (dest == v->vcpu_id) )
   15.38                  result = 1;
   15.39          }
   15.40          else
   15.41 @@ -283,7 +287,7 @@ static int vlapic_accept_irq(struct vcpu
   15.42      case APIC_DM_FIXED:
   15.43      case APIC_DM_LOWEST:
   15.44          /* FIXME add logic for vcpu on reset */
   15.45 -        if ( unlikely(vlapic == NULL || !vlapic_enabled(vlapic)) )
   15.46 +        if ( unlikely(!vlapic_enabled(vlapic)) )
   15.47              break;
   15.48  
   15.49          if ( vlapic_test_and_set_irr(vector, vlapic) && trig_mode )
   15.50 @@ -319,7 +323,7 @@ static int vlapic_accept_irq(struct vcpu
   15.51          if ( trig_mode && !(level & APIC_INT_ASSERT) )
   15.52              break;
   15.53          /* FIXME How to check the situation after vcpu reset? */
   15.54 -        if ( test_and_clear_bit(_VCPUF_initialised, &v->vcpu_flags) )
   15.55 +        if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   15.56          {
   15.57              gdprintk(XENLOG_ERR, "Reset hvm vcpu not supported yet\n");
   15.58              goto exit_and_crash;
   15.59 @@ -371,21 +375,15 @@ struct vlapic *apic_round_robin(
   15.60  
   15.61      old = next = d->arch.hvm_domain.round_info[vector];
   15.62  
   15.63 -    /* the vcpu array is arranged according to vcpu_id */
   15.64      do {
   15.65          if ( ++next == MAX_VIRT_CPUS ) 
   15.66              next = 0;
   15.67 -        if ( (d->vcpu[next] == NULL) ||
   15.68 -             !test_bit(_VCPUF_initialised, &d->vcpu[next]->vcpu_flags) )
   15.69 +        if ( (d->vcpu[next] == NULL) || !test_bit(next, &bitmap) )
   15.70              continue;
   15.71 -
   15.72 -        if ( test_bit(next, &bitmap) )
   15.73 -        {
   15.74 -            target = vcpu_vlapic(d->vcpu[next]);
   15.75 -            if ( vlapic_enabled(target) )
   15.76 -                break;
   15.77 -            target = NULL;
   15.78 -        }
   15.79 +        target = vcpu_vlapic(d->vcpu[next]);
   15.80 +        if ( vlapic_enabled(target) )
   15.81 +            break;
   15.82 +        target = NULL;
   15.83      } while ( next != old );
   15.84  
   15.85      d->arch.hvm_domain.round_info[vector] = next;
   15.86 @@ -398,10 +396,9 @@ void vlapic_EOI_set(struct vlapic *vlapi
   15.87  {
   15.88      int vector = vlapic_find_highest_isr(vlapic);
   15.89  
   15.90 -    /* Not every write EOI will has correpsoning ISR,
   15.91 -       one example is when Kernel check timer on setup_IO_APIC */
   15.92 +    /* Some EOI writes may not have a matching to an in-service interrupt. */
   15.93      if ( vector == -1 )
   15.94 -        return ;
   15.95 +        return;
   15.96  
   15.97      vlapic_clear_vector(vector, vlapic->regs + APIC_ISR);
   15.98  
   15.99 @@ -538,7 +535,7 @@ static unsigned long vlapic_read(struct 
  15.100      unsigned int tmp;
  15.101      unsigned long result;
  15.102      struct vlapic *vlapic = vcpu_vlapic(v);
  15.103 -    unsigned int offset = address - vlapic->base_address;
  15.104 +    unsigned int offset = address - vlapic_base_address(vlapic);
  15.105  
  15.106      if ( offset > APIC_TDCR )
  15.107          return 0;
  15.108 @@ -588,7 +585,7 @@ static void vlapic_write(struct vcpu *v,
  15.109                           unsigned long len, unsigned long val)
  15.110  {
  15.111      struct vlapic *vlapic = vcpu_vlapic(v);
  15.112 -    unsigned int offset = address - vlapic->base_address;
  15.113 +    unsigned int offset = address - vlapic_base_address(vlapic);
  15.114  
  15.115      if ( offset != 0xb0 )
  15.116          HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
  15.117 @@ -641,10 +638,6 @@ static void vlapic_write(struct vcpu *v,
  15.118  
  15.119      switch ( offset )
  15.120      {
  15.121 -    case APIC_ID:   /* Local APIC ID */
  15.122 -        vlapic_set_reg(vlapic, APIC_ID, val);
  15.123 -        break;
  15.124 -
  15.125      case APIC_TASKPRI:
  15.126          vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
  15.127          vlapic->flush_tpr_threshold = 1;
  15.128 @@ -670,7 +663,7 @@ static void vlapic_write(struct vcpu *v,
  15.129              int i;
  15.130              uint32_t lvt_val;
  15.131  
  15.132 -            vlapic->status |= VLAPIC_SOFTWARE_DISABLE_MASK;
  15.133 +            vlapic->disabled |= VLAPIC_SW_DISABLED;
  15.134  
  15.135              for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
  15.136              {
  15.137 @@ -678,17 +671,11 @@ static void vlapic_write(struct vcpu *v,
  15.138                  vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
  15.139                                 lvt_val | APIC_LVT_MASKED);
  15.140              }
  15.141 -
  15.142 -            if ( (vlapic_get_reg(vlapic, APIC_LVT0) & APIC_MODE_MASK)
  15.143 -                 == APIC_DM_EXTINT )
  15.144 -                clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
  15.145          }
  15.146          else
  15.147          {
  15.148 -            vlapic->status &= ~VLAPIC_SOFTWARE_DISABLE_MASK;
  15.149 -            if ( (vlapic_get_reg(vlapic, APIC_LVT0) & APIC_MODE_MASK)
  15.150 -                  == APIC_DM_EXTINT )
  15.151 -                set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
  15.152 +            vlapic->disabled &= ~VLAPIC_SW_DISABLED;
  15.153 +            vlapic->flush_tpr_threshold = 1;
  15.154          }
  15.155          break;
  15.156  
  15.157 @@ -712,26 +699,11 @@ static void vlapic_write(struct vcpu *v,
  15.158      case APIC_LVT0:         /* LVT LINT0 Reg */
  15.159      case APIC_LVT1:         /* LVT Lint1 Reg */
  15.160      case APIC_LVTERR:       /* LVT Error Reg */
  15.161 -    {
  15.162 -        if ( vlapic->status & VLAPIC_SOFTWARE_DISABLE_MASK )
  15.163 +        if ( vlapic_sw_disabled(vlapic) )
  15.164              val |= APIC_LVT_MASKED;
  15.165 -
  15.166          val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
  15.167 -
  15.168          vlapic_set_reg(vlapic, offset, val);
  15.169 -
  15.170 -        if ( (vlapic_vcpu(vlapic)->vcpu_id == 0) && (offset == APIC_LVT0) )
  15.171 -        {
  15.172 -            if ( (val & APIC_MODE_MASK) == APIC_DM_EXTINT )
  15.173 -                if ( val & APIC_LVT_MASKED)
  15.174 -                    clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
  15.175 -                else
  15.176 -                    set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
  15.177 -            else
  15.178 -                clear_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
  15.179 -        }
  15.180 -    }
  15.181 -    break;
  15.182 +        break;
  15.183  
  15.184      case APIC_TMICT:
  15.185      {
  15.186 @@ -773,10 +745,8 @@ static void vlapic_write(struct vcpu *v,
  15.187  static int vlapic_range(struct vcpu *v, unsigned long addr)
  15.188  {
  15.189      struct vlapic *vlapic = vcpu_vlapic(v);
  15.190 -
  15.191 -    return (vlapic_global_enabled(vlapic) &&
  15.192 -            (addr >= vlapic->base_address) &&
  15.193 -            (addr < vlapic->base_address + PAGE_SIZE));
  15.194 +    unsigned long offset  = addr - vlapic_base_address(vlapic);
  15.195 +    return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
  15.196  }
  15.197  
  15.198  struct hvm_mmio_handler vlapic_mmio_handler = {
  15.199 @@ -787,17 +757,23 @@ struct hvm_mmio_handler vlapic_mmio_hand
  15.200  
  15.201  void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
  15.202  {
  15.203 -    vlapic->apic_base_msr = value;
  15.204 -    vlapic->base_address  = vlapic->apic_base_msr & MSR_IA32_APICBASE_BASE;
  15.205 +    if ( (vlapic->apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
  15.206 +    {
  15.207 +        if ( value & MSR_IA32_APICBASE_ENABLE )
  15.208 +        {
  15.209 +            vlapic_reset(vlapic);
  15.210 +            vlapic->disabled &= ~VLAPIC_HW_DISABLED;
  15.211 +        }
  15.212 +        else
  15.213 +        {
  15.214 +            vlapic->disabled |= VLAPIC_HW_DISABLED;
  15.215 +        }
  15.216 +    }
  15.217  
  15.218 -    if ( !(value & MSR_IA32_APICBASE_ENABLE) )
  15.219 -        set_bit(_VLAPIC_GLOB_DISABLE, &vlapic->status );
  15.220 -    else
  15.221 -        clear_bit(_VLAPIC_GLOB_DISABLE, &vlapic->status);
  15.222 +    vlapic->apic_base_msr = value;
  15.223  
  15.224      HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
  15.225 -                "apic base msr is 0x%016"PRIx64", and base address is 0x%lx.",
  15.226 -                vlapic->apic_base_msr, vlapic->base_address);
  15.227 +                "apic base msr is 0x%016"PRIx64".", vlapic->apic_base_msr);
  15.228  }
  15.229  
  15.230  void vlapic_timer_fn(void *data)
  15.231 @@ -845,8 +821,15 @@ void vlapic_timer_fn(void *data)
  15.232  int vlapic_accept_pic_intr(struct vcpu *v)
  15.233  {
  15.234      struct vlapic *vlapic = vcpu_vlapic(v);
  15.235 +    uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0);
  15.236  
  15.237 -    return vlapic ? test_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status) : 1;
  15.238 +    /*
  15.239 +     * Only CPU0 is wired to the 8259A. INTA cycles occur if LINT0 is set up
  15.240 +     * accept ExtInts, or if the LAPIC is disabled (so LINT0 behaves as INTR).
  15.241 +     */
  15.242 +    return ((v->vcpu_id == 0) &&
  15.243 +            (((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
  15.244 +             vlapic_hw_disabled(vlapic)));
  15.245  }
  15.246  
  15.247  int cpu_get_apic_interrupt(struct vcpu *v, int *mode)
  15.248 @@ -854,7 +837,7 @@ int cpu_get_apic_interrupt(struct vcpu *
  15.249      struct vlapic *vlapic = vcpu_vlapic(v);
  15.250      int highest_irr;
  15.251  
  15.252 -    if ( !vlapic || !vlapic_enabled(vlapic) )
  15.253 +    if ( !vlapic_enabled(vlapic) )
  15.254          return -1;
  15.255  
  15.256      highest_irr = vlapic_find_highest_irr(vlapic);
  15.257 @@ -887,9 +870,6 @@ void vlapic_post_injection(struct vcpu *
  15.258  {
  15.259      struct vlapic *vlapic = vcpu_vlapic(v);
  15.260  
  15.261 -    if ( unlikely(vlapic == NULL) )
  15.262 -        return;
  15.263 -
  15.264      switch ( deliver_mode )
  15.265      {
  15.266      case APIC_DM_FIXED:
  15.267 @@ -920,36 +900,38 @@ void vlapic_post_injection(struct vcpu *
  15.268      }
  15.269  }
  15.270  
  15.271 +/* Reset the VLPAIC back to its power-on/reset state. */
  15.272  static int vlapic_reset(struct vlapic *vlapic)
  15.273  {
  15.274      struct vcpu *v = vlapic_vcpu(vlapic);
  15.275      int i;
  15.276  
  15.277 -    vlapic_set_reg(vlapic, APIC_ID, v->vcpu_id << 24);
  15.278 +    vlapic_set_reg(vlapic, APIC_ID,  v->vcpu_id << 24);
  15.279 +    vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
  15.280  
  15.281 -    vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
  15.282 +    for ( i = 0; i < 8; i++ )
  15.283 +    {
  15.284 +        vlapic_set_reg(vlapic, APIC_IRR + 0x10 * i, 0);
  15.285 +        vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0);
  15.286 +        vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0);
  15.287 +    }
  15.288 +    vlapic_set_reg(vlapic, APIC_ICR,     0);
  15.289 +    vlapic_set_reg(vlapic, APIC_ICR2,    0);
  15.290 +    vlapic_set_reg(vlapic, APIC_LDR,     0);
  15.291 +    vlapic_set_reg(vlapic, APIC_TASKPRI, 0);
  15.292 +    vlapic_set_reg(vlapic, APIC_TMICT,   0);
  15.293 +    vlapic_set_reg(vlapic, APIC_TMCCT,   0);
  15.294 +    vlapic_set_tdcr(vlapic, 0);
  15.295 +
  15.296 +    vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
  15.297  
  15.298      for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
  15.299          vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
  15.300  
  15.301 -    vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
  15.302 -
  15.303      vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
  15.304 -
  15.305 -    vlapic->apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
  15.306 -
  15.307 -    vlapic->flush_tpr_threshold = 0;
  15.308 +    vlapic->disabled |= VLAPIC_SW_DISABLED;
  15.309  
  15.310 -    vlapic_set_tdcr(vlapic, 0);
  15.311 -
  15.312 -    vlapic->base_address = vlapic->apic_base_msr &
  15.313 -                           MSR_IA32_APICBASE_BASE;
  15.314 -
  15.315 -    HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
  15.316 -                "vcpu=%p, id=%d, vlapic_apic_base_msr=0x%016"PRIx64", "
  15.317 -                "base_address=0x%0lx.",
  15.318 -                v,  GET_APIC_ID(vlapic_get_reg(vlapic, APIC_ID)),
  15.319 -                vlapic->apic_base_msr, vlapic->base_address);
  15.320 +    vlapic->flush_tpr_threshold = 1;
  15.321  
  15.322      return 1;
  15.323  }
  15.324 @@ -974,6 +956,7 @@ int vlapic_init(struct vcpu *v)
  15.325  
  15.326      vlapic_reset(vlapic);
  15.327  
  15.328 +    vlapic->apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
  15.329      if ( v->vcpu_id == 0 )
  15.330          vlapic->apic_base_msr |= MSR_IA32_APICBASE_BSP;
  15.331  
  15.332 @@ -986,7 +969,6 @@ int vlapic_init(struct vcpu *v)
  15.333      {
  15.334          vlapic_set_reg(vlapic, APIC_LVT0, APIC_MODE_EXTINT << 8);
  15.335          vlapic_set_reg(vlapic, APIC_LVT1, APIC_MODE_NMI << 8);
  15.336 -        set_bit(_VLAPIC_BSP_ACCEPT_PIC, &vlapic->status);
  15.337      }
  15.338  #endif
  15.339  
    16.1 --- a/xen/arch/x86/hvm/vmx/io.c	Tue Nov 14 12:46:33 2006 -0700
    16.2 +++ b/xen/arch/x86/hvm/vmx/io.c	Tue Nov 14 14:59:37 2006 -0700
    16.3 @@ -69,20 +69,21 @@ static inline int is_interruptibility_st
    16.4  #ifdef __x86_64__
    16.5  static void update_tpr_threshold(struct vlapic *vlapic)
    16.6  {
    16.7 -    int highest_irr, tpr;
    16.8 +    int max_irr, tpr;
    16.9  
   16.10      /* Clear the work-to-do flag /then/ do the work. */
   16.11      vlapic->flush_tpr_threshold = 0;
   16.12      mb();
   16.13  
   16.14 -    highest_irr = vlapic_find_highest_irr(vlapic);
   16.15 -    tpr = vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xF0;
   16.16 +    if ( !vlapic_enabled(vlapic) || 
   16.17 +         ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
   16.18 +    {
   16.19 +        __vmwrite(TPR_THRESHOLD, 0);
   16.20 +        return;
   16.21 +    }
   16.22  
   16.23 -    if ( highest_irr == -1 )
   16.24 -        __vmwrite(TPR_THRESHOLD, 0);
   16.25 -    else
   16.26 -        __vmwrite(TPR_THRESHOLD,
   16.27 -                  (highest_irr > tpr) ? (tpr >> 4) : (highest_irr >> 4));
   16.28 +    tpr = vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xF0;
   16.29 +    __vmwrite(TPR_THRESHOLD, (max_irr > tpr) ? (tpr >> 4) : (max_irr >> 4));
   16.30  }
   16.31  #else
   16.32  #define update_tpr_threshold(v) ((void)0)
   16.33 @@ -115,7 +116,7 @@ asmlinkage void vmx_intr_assist(void)
   16.34              pic_set_xen_irq(pic, callback_irq, local_events_need_delivery());
   16.35      }
   16.36  
   16.37 -    if ( vlapic_enabled(vlapic) && vlapic->flush_tpr_threshold )
   16.38 +    if ( vlapic->flush_tpr_threshold )
   16.39          update_tpr_threshold(vlapic);
   16.40  
   16.41      has_ext_irq = cpu_has_pending_irq(v);
    17.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Nov 14 12:46:33 2006 -0700
    17.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Nov 14 14:59:37 2006 -0700
    17.3 @@ -853,7 +853,7 @@ static void vmx_do_cpuid(struct cpu_user
    17.4              /* Mask off reserved bits. */
    17.5              ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
    17.6  
    17.7 -            if ( !vlapic_global_enabled(vcpu_vlapic(v)) )
    17.8 +            if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
    17.9                  clear_bit(X86_FEATURE_APIC, &edx);
   17.10      
   17.11  #if CONFIG_PAGING_LEVELS >= 3
    18.1 --- a/xen/arch/x86/mm/shadow/common.c	Tue Nov 14 12:46:33 2006 -0700
    18.2 +++ b/xen/arch/x86/mm/shadow/common.c	Tue Nov 14 14:59:37 2006 -0700
    18.3 @@ -1953,9 +1953,11 @@ int shadow_remove_write_access(struct vc
    18.4              case 3: GUESS(0x70381C00000UL + (fault_addr >> 27), 3); break;
    18.5              }
    18.6  
    18.7 -            /* Linux direct map at 0xffff810000000000 */
    18.8 +            /* 64bit Linux direct map at 0xffff810000000000; older kernels 
    18.9 +             * had it at 0x0000010000000000UL */
   18.10              gfn = sh_mfn_to_gfn(v->domain, gmfn); 
   18.11              GUESS(0xffff810000000000UL + (gfn << PAGE_SHIFT), 4); 
   18.12 +            GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4); 
   18.13          }
   18.14  #endif /* CONFIG_PAGING_LEVELS >= 4 */
   18.15  #endif /* CONFIG_PAGING_LEVELS >= 3 */
    19.1 --- a/xen/arch/x86/mm/shadow/multi.c	Tue Nov 14 12:46:33 2006 -0700
    19.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Tue Nov 14 14:59:37 2006 -0700
    19.3 @@ -2623,6 +2623,10 @@ static int sh_page_fault(struct vcpu *v,
    19.4               * Fall through to the normal fault handing logic */
    19.5              perfc_incrc(shadow_fault_fast_fail);
    19.6              SHADOW_PRINTK("fast path false alarm!\n");
    19.7 +            /* Don't pass the reserved-bit bit: if we look at the fault 
    19.8 +             * below and decide to pass it to the guest, the reserved-bit
    19.9 +             * bit won't make sense there. */
   19.10 +            regs->error_code &= ~PFEC_reserved_bit;
   19.11          }
   19.12      }
   19.13  #endif /* SHOPT_FAST_FAULT_PATH */
   19.14 @@ -3266,8 +3270,9 @@ sh_set_toplevel_shadow(struct vcpu *v,
   19.15      }
   19.16      else
   19.17      {
   19.18 -        /* This guest MFN is a pagetable.  Must revoke write access. */
   19.19 -        if ( shadow_remove_write_access(v, gmfn, GUEST_PAGING_LEVELS, 0) != 0 )
   19.20 +        /* This guest MFN is a pagetable.  Must revoke write access 
   19.21 +         * (and can't use heuristics because we have no linear map here). */
   19.22 +        if ( shadow_remove_write_access(v, gmfn, 0, 0) != 0 )
   19.23              flush_tlb_mask(v->domain->domain_dirty_cpumask); 
   19.24          /* Make sure there's enough free shadow memory. */
   19.25          shadow_prealloc(d, SHADOW_MAX_ORDER); 
   19.26 @@ -3773,7 +3778,7 @@ sh_x86_emulate_write(struct vcpu *v, uns
   19.27          shadow_validate_guest_pt_write(v, mfn, addr, bytes_on_page);
   19.28          bytes -= bytes_on_page;
   19.29          /* If we are writing zeros to this page, might want to unshadow */
   19.30 -        if ( *(u8 *)addr == 0 )
   19.31 +        if ( likely(bytes_on_page >= 4) && (*(u32 *)addr == 0) )
   19.32              check_for_early_unshadow(v, mfn);
   19.33          sh_unmap_domain_page(addr);
   19.34      }
   19.35 @@ -3818,7 +3823,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
   19.36                    vaddr, prev, old, new, *(unsigned long *)addr, bytes);
   19.37  
   19.38      /* If we are writing zeros to this page, might want to unshadow */
   19.39 -    if ( *(u8 *)addr == 0 )
   19.40 +    if ( likely(bytes >= 4) && (*(u32 *)addr == 0) )
   19.41          check_for_early_unshadow(v, mfn);
   19.42  
   19.43      sh_unmap_domain_page(addr);
   19.44 @@ -3853,7 +3858,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
   19.45          rv = X86EMUL_CMPXCHG_FAILED;
   19.46  
   19.47      /* If we are writing zeros to this page, might want to unshadow */
   19.48 -    if ( *(u8 *)addr == 0 )
   19.49 +    if ( *(u32 *)addr == 0 )
   19.50          check_for_early_unshadow(v, mfn);
   19.51  
   19.52      sh_unmap_domain_page(addr);
    20.1 --- a/xen/arch/x86/traps.c	Tue Nov 14 12:46:33 2006 -0700
    20.2 +++ b/xen/arch/x86/traps.c	Tue Nov 14 14:59:37 2006 -0700
    20.3 @@ -985,8 +985,7 @@ static inline int admin_io_okay(
    20.4      return ioports_access_permitted(v->domain, port, port + bytes - 1);
    20.5  }
    20.6  
    20.7 -/* Check admin limits. Silently fail the access if it is disallowed. */
    20.8 -static inline unsigned char inb_user(
    20.9 +static inline int guest_inb_okay(
   20.10      unsigned int port, struct vcpu *v, struct cpu_user_regs *regs)
   20.11  {
   20.12      /*
   20.13 @@ -996,19 +995,21 @@ static inline unsigned char inb_user(
   20.14       * Note that we could emulate bit 4 instead of directly reading port 0x61,
   20.15       * but there's not really a good reason to do so.
   20.16       */
   20.17 -    if ( admin_io_okay(port, 1, v, regs) || (port == 0x61) )
   20.18 -        return inb(port);
   20.19 -    return ~0;
   20.20 +    return (admin_io_okay(port, 1, v, regs) || (port == 0x61));
   20.21  }
   20.22 -//#define inb_user(_p, _d, _r) (admin_io_okay(_p, 1, _d, _r) ? inb(_p) : ~0)
   20.23 -#define inw_user(_p, _d, _r) (admin_io_okay(_p, 2, _d, _r) ? inw(_p) : ~0)
   20.24 -#define inl_user(_p, _d, _r) (admin_io_okay(_p, 4, _d, _r) ? inl(_p) : ~0)
   20.25 -#define outb_user(_v, _p, _d, _r) \
   20.26 -    (admin_io_okay(_p, 1, _d, _r) ? outb(_v, _p) : ((void)0))
   20.27 -#define outw_user(_v, _p, _d, _r) \
   20.28 -    (admin_io_okay(_p, 2, _d, _r) ? outw(_v, _p) : ((void)0))
   20.29 -#define outl_user(_v, _p, _d, _r) \
   20.30 -    (admin_io_okay(_p, 4, _d, _r) ? outl(_v, _p) : ((void)0))
   20.31 +#define guest_inw_okay(_p, _d, _r) admin_io_okay(_p, 2, _d, _r)
   20.32 +#define guest_inl_okay(_p, _d, _r) admin_io_okay(_p, 4, _d, _r)
   20.33 +#define guest_outb_okay(_p, _d, _r) admin_io_okay(_p, 1, _d, _r)
   20.34 +#define guest_outw_okay(_p, _d, _r) admin_io_okay(_p, 2, _d, _r)
   20.35 +#define guest_outl_okay(_p, _d, _r) admin_io_okay(_p, 4, _d, _r)
   20.36 +
   20.37 +/* I/O emulation support. Helper routines for, and type of, the stack stub.*/
   20.38 +void host_to_guest_gpr_switch(struct cpu_user_regs *)
   20.39 +    __attribute__((__regparm__(1)));
   20.40 +unsigned long guest_to_host_gpr_switch(unsigned long)
   20.41 +    __attribute__((__regparm__(1)));
   20.42 +typedef unsigned long (*io_emul_stub_t)(struct cpu_user_regs *)
   20.43 +    __attribute__((__regparm__(1)));
   20.44  
   20.45  /* Instruction fetch with error handling. */
   20.46  #define insn_fetch(_type, _size, cs, eip)                                   \
   20.47 @@ -1028,6 +1029,7 @@ static int emulate_privileged_op(struct 
   20.48      unsigned long *reg, eip = regs->eip, cs = regs->cs, res;
   20.49      u8 opcode, modrm_reg = 0, modrm_rm = 0, rep_prefix = 0;
   20.50      unsigned int port, i, op_bytes = 4, data, rc;
   20.51 +    char io_emul_stub[16];
   20.52      u32 l, h;
   20.53  
   20.54      /* Legacy prefixes. */
   20.55 @@ -1068,6 +1070,9 @@ static int emulate_privileged_op(struct 
   20.56          opcode = insn_fetch(u8, 1, cs, eip);
   20.57      }
   20.58  #endif
   20.59 +
   20.60 +    if ( opcode == 0x0f )
   20.61 +        goto twobyte_opcode;
   20.62      
   20.63      /* Input/Output String instructions. */
   20.64      if ( (opcode >= 0x6c) && (opcode <= 0x6f) )
   20.65 @@ -1083,16 +1088,17 @@ static int emulate_privileged_op(struct 
   20.66          case 0x6d: /* INSW/INSL */
   20.67              if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
   20.68                  goto fail;
   20.69 +            port = (u16)regs->edx;
   20.70              switch ( op_bytes )
   20.71              {
   20.72              case 1:
   20.73 -                data = (u8)inb_user((u16)regs->edx, v, regs);
   20.74 +                data = (u8)(guest_inb_okay(port, v, regs) ? inb(port) : ~0);
   20.75                  break;
   20.76              case 2:
   20.77 -                data = (u16)inw_user((u16)regs->edx, v, regs);
   20.78 +                data = (u16)(guest_inw_okay(port, v, regs) ? inw(port) : ~0);
   20.79                  break;
   20.80              case 4:
   20.81 -                data = (u32)inl_user((u16)regs->edx, v, regs);
   20.82 +                data = (u32)(guest_inl_okay(port, v, regs) ? inl(port) : ~0);
   20.83                  break;
   20.84              }
   20.85              if ( (rc = copy_to_user((void *)regs->edi, &data, op_bytes)) != 0 )
   20.86 @@ -1115,16 +1121,20 @@ static int emulate_privileged_op(struct 
   20.87                  propagate_page_fault(regs->esi + op_bytes - rc, 0);
   20.88                  return EXCRET_fault_fixed;
   20.89              }
   20.90 +            port = (u16)regs->edx;
   20.91              switch ( op_bytes )
   20.92              {
   20.93              case 1:
   20.94 -                outb_user((u8)data, (u16)regs->edx, v, regs);
   20.95 +                if ( guest_outb_okay(port, v, regs) )
   20.96 +                    outb((u8)data, port);
   20.97                  break;
   20.98              case 2:
   20.99 -                outw_user((u16)data, (u16)regs->edx, v, regs);
  20.100 +                if ( guest_outw_okay(port, v, regs) )
  20.101 +                    outw((u16)data, port);
  20.102                  break;
  20.103              case 4:
  20.104 -                outl_user((u32)data, (u16)regs->edx, v, regs);
  20.105 +                if ( guest_outl_okay(port, v, regs) )
  20.106 +                    outl((u32)data, port);
  20.107                  break;
  20.108              }
  20.109              regs->esi += (int)((regs->eflags & EF_DF) ? -op_bytes : op_bytes);
  20.110 @@ -1141,6 +1151,27 @@ static int emulate_privileged_op(struct 
  20.111          goto done;
  20.112      }
  20.113  
  20.114 +    /*
  20.115 +     * Very likely to be an I/O instruction (IN/OUT).
  20.116 +     * Build an on-stack stub to execute the instruction with full guest
  20.117 +     * GPR context. This is needed for some systems which (ab)use IN/OUT
  20.118 +     * to communicate with BIOS code in system-management mode.
  20.119 +     */
  20.120 +    /* call host_to_guest_gpr_switch */
  20.121 +    io_emul_stub[0] = 0xe8;
  20.122 +    *(s32 *)&io_emul_stub[1] =
  20.123 +        (char *)host_to_guest_gpr_switch - &io_emul_stub[5];
  20.124 +    /* data16 or nop */
  20.125 +    io_emul_stub[5] = (op_bytes != 2) ? 0x90 : 0x66;
  20.126 +    /* <io-access opcode> */
  20.127 +    io_emul_stub[6] = opcode;
  20.128 +    /* imm8 or nop */
  20.129 +    io_emul_stub[7] = 0x90;
  20.130 +    /* jmp guest_to_host_gpr_switch */
  20.131 +    io_emul_stub[8] = 0xe9;
  20.132 +    *(s32 *)&io_emul_stub[9] =
  20.133 +        (char *)guest_to_host_gpr_switch - &io_emul_stub[13];
  20.134 +
  20.135      /* I/O Port and Interrupt Flag instructions. */
  20.136      switch ( opcode )
  20.137      {
  20.138 @@ -1148,21 +1179,31 @@ static int emulate_privileged_op(struct 
  20.139          op_bytes = 1;
  20.140      case 0xe5: /* IN imm8,%eax */
  20.141          port = insn_fetch(u8, 1, cs, eip);
  20.142 +        io_emul_stub[7] = port; /* imm8 */
  20.143      exec_in:
  20.144          if ( !guest_io_okay(port, op_bytes, v, regs) )
  20.145              goto fail;
  20.146          switch ( op_bytes )
  20.147          {
  20.148          case 1:
  20.149 -            regs->eax &= ~0xffUL;
  20.150 -            regs->eax |= (u8)inb_user(port, v, regs);
  20.151 +            res = regs->eax & ~0xffUL;
  20.152 +            if ( guest_inb_okay(port, v, regs) )
  20.153 +                regs->eax = res | (u8)((io_emul_stub_t)io_emul_stub)(regs);
  20.154 +            else
  20.155 +                regs->eax = res | (u8)~0;
  20.156              break;
  20.157          case 2:
  20.158 -            regs->eax &= ~0xffffUL;
  20.159 -            regs->eax |= (u16)inw_user(port, v, regs);
  20.160 +            res = regs->eax & ~0xffffUL;
  20.161 +            if ( guest_inw_okay(port, v, regs) )
  20.162 +                regs->eax = res | (u16)((io_emul_stub_t)io_emul_stub)(regs);
  20.163 +            else
  20.164 +                regs->eax = res | (u16)~0;
  20.165              break;
  20.166          case 4:
  20.167 -            regs->eax = (u32)inl_user(port, v, regs);
  20.168 +            if ( guest_inl_okay(port, v, regs) )
  20.169 +                regs->eax = (u32)((io_emul_stub_t)io_emul_stub)(regs);
  20.170 +            else
  20.171 +                regs->eax = (u32)~0;
  20.172              break;
  20.173          }
  20.174          goto done;
  20.175 @@ -1177,19 +1218,23 @@ static int emulate_privileged_op(struct 
  20.176          op_bytes = 1;
  20.177      case 0xe7: /* OUT %eax,imm8 */
  20.178          port = insn_fetch(u8, 1, cs, eip);
  20.179 +        io_emul_stub[7] = port; /* imm8 */
  20.180      exec_out:
  20.181          if ( !guest_io_okay(port, op_bytes, v, regs) )
  20.182              goto fail;
  20.183          switch ( op_bytes )
  20.184          {
  20.185          case 1:
  20.186 -            outb_user((u8)regs->eax, port, v, regs);
  20.187 +            if ( guest_outb_okay(port, v, regs) )
  20.188 +                ((io_emul_stub_t)io_emul_stub)(regs);
  20.189              break;
  20.190          case 2:
  20.191 -            outw_user((u16)regs->eax, port, v, regs);
  20.192 +            if ( guest_outw_okay(port, v, regs) )
  20.193 +                ((io_emul_stub_t)io_emul_stub)(regs);
  20.194              break;
  20.195          case 4:
  20.196 -            outl_user((u32)regs->eax, port, v, regs);
  20.197 +            if ( guest_outl_okay(port, v, regs) )
  20.198 +                ((io_emul_stub_t)io_emul_stub)(regs);
  20.199              break;
  20.200          }
  20.201          goto done;
  20.202 @@ -1212,15 +1257,13 @@ static int emulate_privileged_op(struct 
  20.203           */
  20.204          /*v->vcpu_info->evtchn_upcall_mask = (opcode == 0xfa);*/
  20.205          goto done;
  20.206 -
  20.207 -    case 0x0f: /* Two-byte opcode */
  20.208 -        break;
  20.209 -
  20.210 -    default:
  20.211 -        goto fail;
  20.212      }
  20.213  
  20.214 -    /* Remaining instructions only emulated from guest kernel. */
  20.215 +    /* No decode of this single-byte opcode. */
  20.216 +    goto fail;
  20.217 +
  20.218 + twobyte_opcode:
  20.219 +    /* Two-byte opcodes only emulated from guest kernel. */
  20.220      if ( !guest_kernel_mode(v, regs) )
  20.221          goto fail;
  20.222  
    21.1 --- a/xen/arch/x86/x86_32/Makefile	Tue Nov 14 12:46:33 2006 -0700
    21.2 +++ b/xen/arch/x86/x86_32/Makefile	Tue Nov 14 14:59:37 2006 -0700
    21.3 @@ -1,5 +1,6 @@
    21.4  obj-y += domain_page.o
    21.5  obj-y += entry.o
    21.6 +obj-y += gpr_switch.o
    21.7  obj-y += mm.o
    21.8  obj-y += seg_fixup.o
    21.9  obj-y += traps.o
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/xen/arch/x86/x86_32/gpr_switch.S	Tue Nov 14 14:59:37 2006 -0700
    22.3 @@ -0,0 +1,43 @@
    22.4 +/*
    22.5 + * GPR context switch between host and guest.
    22.6 + * Used by IO-port-access emulation stub.
    22.7 + *
    22.8 + * Copyright (c) 2006, Novell, Inc.
    22.9 + */
   22.10 +
   22.11 +#include <xen/config.h>
   22.12 +#include <asm/asm_defns.h>
   22.13 +
   22.14 +ENTRY(host_to_guest_gpr_switch)
   22.15 +        movl  (%esp), %ecx
   22.16 +        movl  %eax, (%esp)
   22.17 +        movl  UREGS_edx(%eax), %edx
   22.18 +        pushl %ebx
   22.19 +        movl  UREGS_ebx(%eax), %ebx
   22.20 +        pushl %ebp
   22.21 +        movl  UREGS_ebp(%eax), %ebp
   22.22 +        pushl %esi
   22.23 +        movl  UREGS_esi(%eax), %esi
   22.24 +        pushl %edi
   22.25 +        movl  UREGS_edi(%eax), %edi
   22.26 +        pushl %ecx
   22.27 +        movl  UREGS_ecx(%eax), %ecx
   22.28 +        movl  UREGS_eax(%eax), %eax
   22.29 +        ret
   22.30 +
   22.31 +ENTRY(guest_to_host_gpr_switch)
   22.32 +        pushl %edx
   22.33 +        movl  5*4(%esp), %edx
   22.34 +        movl  %eax, UREGS_eax(%edx)
   22.35 +        popl  UREGS_edx(%edx)
   22.36 +        movl  %edi, UREGS_edi(%edx)
   22.37 +        popl  %edi
   22.38 +        movl  %esi, UREGS_esi(%edx)
   22.39 +        popl  %esi
   22.40 +        movl  %ebp, UREGS_ebp(%edx)
   22.41 +        popl  %ebp
   22.42 +        movl  %ebx, UREGS_ebx(%edx)
   22.43 +        popl  %ebx
   22.44 +        movl  %ecx, UREGS_ecx(%edx)
   22.45 +        popl  %ecx
   22.46 +        ret
    23.1 --- a/xen/arch/x86/x86_64/Makefile	Tue Nov 14 12:46:33 2006 -0700
    23.2 +++ b/xen/arch/x86/x86_64/Makefile	Tue Nov 14 14:59:37 2006 -0700
    23.3 @@ -1,3 +1,4 @@
    23.4  obj-y += entry.o
    23.5 +obj-y += gpr_switch.o
    23.6  obj-y += mm.o
    23.7  obj-y += traps.o
    24.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.2 +++ b/xen/arch/x86/x86_64/gpr_switch.S	Tue Nov 14 14:59:37 2006 -0700
    24.3 @@ -0,0 +1,63 @@
    24.4 +/*
    24.5 + * GPR context switch between host and guest.
    24.6 + * Used by IO-port-access emulation stub.
    24.7 + *
    24.8 + * Copyright (c) 2006, Novell, Inc.
    24.9 + */
   24.10 +
   24.11 +#include <xen/config.h>
   24.12 +#include <asm/asm_defns.h>
   24.13 +
   24.14 +ENTRY(host_to_guest_gpr_switch)
   24.15 +        movq  (%rsp), %rcx
   24.16 +        movq  %rdi, (%rsp)
   24.17 +        movq  UREGS_rdx(%rdi), %rdx
   24.18 +        pushq %rbx
   24.19 +        movq  UREGS_rax(%rdi), %rax
   24.20 +        movq  UREGS_rbx(%rdi), %rbx
   24.21 +        pushq %rbp
   24.22 +        movq  UREGS_rsi(%rdi), %rsi
   24.23 +        movq  UREGS_rbp(%rdi), %rbp
   24.24 +        pushq %r12
   24.25 +        movq  UREGS_r8(%rdi), %r8
   24.26 +        movq  UREGS_r12(%rdi), %r12
   24.27 +        pushq %r13
   24.28 +        movq  UREGS_r9(%rdi), %r9
   24.29 +        movq  UREGS_r13(%rdi), %r13
   24.30 +        pushq %r14
   24.31 +        movq  UREGS_r10(%rdi), %r10
   24.32 +        movq  UREGS_r14(%rdi), %r14
   24.33 +        pushq %r15
   24.34 +        movq  UREGS_r11(%rdi), %r11
   24.35 +        movq  UREGS_r15(%rdi), %r15
   24.36 +        pushq %rcx
   24.37 +        movq  UREGS_rcx(%rdi), %rcx
   24.38 +        movq  UREGS_rdi(%rdi), %rdi
   24.39 +        ret
   24.40 +
   24.41 +ENTRY(guest_to_host_gpr_switch)
   24.42 +        pushq %rdi
   24.43 +        movq  7*8(%rsp), %rdi
   24.44 +        movq  %rax, UREGS_rax(%rdi)
   24.45 +        popq  UREGS_rdi(%rdi)
   24.46 +        movq  %r15, UREGS_r15(%rdi)
   24.47 +        movq  %r11, UREGS_r11(%rdi)
   24.48 +        popq  %r15
   24.49 +        movq  %r14, UREGS_r14(%rdi)
   24.50 +        movq  %r10, UREGS_r10(%rdi)
   24.51 +        popq  %r14
   24.52 +        movq  %r13, UREGS_r13(%rdi)
   24.53 +        movq  %r9, UREGS_r9(%rdi)
   24.54 +        popq  %r13
   24.55 +        movq  %r12, UREGS_r12(%rdi)
   24.56 +        movq  %r8, UREGS_r8(%rdi)
   24.57 +        popq  %r12
   24.58 +        movq  %rbp, UREGS_rbp(%rdi)
   24.59 +        movq  %rsi, UREGS_rsi(%rdi)
   24.60 +        popq  %rbp
   24.61 +        movq  %rbx, UREGS_rbx(%rdi)
   24.62 +        movq  %rdx, UREGS_rdx(%rdi)
   24.63 +        popq  %rbx
   24.64 +        movq  %rcx, UREGS_rcx(%rdi)
   24.65 +        popq  %rcx
   24.66 +        ret
    25.1 --- a/xen/include/asm-x86/hvm/vlapic.h	Tue Nov 14 12:46:33 2006 -0700
    25.2 +++ b/xen/include/asm-x86/hvm/vlapic.h	Tue Nov 14 14:59:37 2006 -0700
    25.3 @@ -33,22 +33,23 @@
    25.4  #define VLAPIC_ID(vlapic)   \
    25.5      (GET_APIC_ID(vlapic_get_reg(vlapic, APIC_ID)))
    25.6  
    25.7 -#define _VLAPIC_GLOB_DISABLE            0x0
    25.8 -#define VLAPIC_GLOB_DISABLE_MASK        0x1
    25.9 -#define VLAPIC_SOFTWARE_DISABLE_MASK    0x2
   25.10 -#define _VLAPIC_BSP_ACCEPT_PIC          0x3
   25.11 -
   25.12 -#define vlapic_enabled(vlapic)              \
   25.13 -    (!((vlapic)->status &                   \
   25.14 -       (VLAPIC_GLOB_DISABLE_MASK | VLAPIC_SOFTWARE_DISABLE_MASK)))
   25.15 -
   25.16 -#define vlapic_global_enabled(vlapic)       \
   25.17 -    (!(test_bit(_VLAPIC_GLOB_DISABLE, &(vlapic)->status)))
   25.18 +/*
   25.19 + * APIC can be disabled in two ways:
   25.20 + *  1. 'Hardware disable': via IA32_APIC_BASE_MSR[11]
   25.21 + *     CPU should behave as if it does not have an APIC.
   25.22 + *  2. 'Software disable': via APIC_SPIV[8].
   25.23 + *     APIC is visible but does not respond to interrupt messages.
   25.24 + */
   25.25 +#define VLAPIC_HW_DISABLED              0x1
   25.26 +#define VLAPIC_SW_DISABLED              0x2
   25.27 +#define vlapic_sw_disabled(vlapic)  ((vlapic)->disabled & VLAPIC_SW_DISABLED)
   25.28 +#define vlapic_hw_disabled(vlapic)  ((vlapic)->disabled & VLAPIC_HW_DISABLED)
   25.29 +#define vlapic_disabled(vlapic)     ((vlapic)->disabled)
   25.30 +#define vlapic_enabled(vlapic)      (!vlapic_disabled(vlapic))
   25.31  
   25.32  struct vlapic {
   25.33 -    uint32_t           status;
   25.34      uint64_t           apic_base_msr;
   25.35 -    unsigned long      base_address;
   25.36 +    uint32_t           disabled; /* VLAPIC_xx_DISABLED */
   25.37      uint32_t           timer_divisor;
   25.38      struct timer       vlapic_timer;
   25.39      int                timer_pending_count;
    26.1 --- a/xen/include/public/arch-powerpc.h	Tue Nov 14 12:46:33 2006 -0700
    26.2 +++ b/xen/include/public/arch-powerpc.h	Tue Nov 14 14:59:37 2006 -0700
    26.3 @@ -1,17 +1,21 @@
    26.4  /*
    26.5 - * This program is free software; you can redistribute it and/or modify
    26.6 - * it under the terms of the GNU General Public License as published by
    26.7 - * the Free Software Foundation; either version 2 of the License, or
    26.8 - * (at your option) any later version.
    26.9 + * Permission is hereby granted, free of charge, to any person obtaining a copy
   26.10 + * of this software and associated documentation files (the "Software"), to
   26.11 + * deal in the Software without restriction, including without limitation the
   26.12 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
   26.13 + * sell copies of the Software, and to permit persons to whom the Software is
   26.14 + * furnished to do so, subject to the following conditions:
   26.15   *
   26.16 - * This program is distributed in the hope that it will be useful,
   26.17 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
   26.18 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   26.19 - * GNU General Public License for more details.
   26.20 + * The above copyright notice and this permission notice shall be included in
   26.21 + * all copies or substantial portions of the Software.
   26.22   *
   26.23 - * You should have received a copy of the GNU General Public License
   26.24 - * along with this program; if not, write to the Free Software
   26.25 - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
   26.26 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   26.27 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   26.28 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   26.29 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   26.30 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   26.31 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
   26.32 + * DEALINGS IN THE SOFTWARE.
   26.33   *
   26.34   * Copyright (C) IBM Corp. 2005, 2006
   26.35   *
    27.1 --- a/xen/include/public/xencomm.h	Tue Nov 14 12:46:33 2006 -0700
    27.2 +++ b/xen/include/public/xencomm.h	Tue Nov 14 14:59:37 2006 -0700
    27.3 @@ -1,19 +1,23 @@
    27.4  /*
    27.5 - * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
    27.6 + * Permission is hereby granted, free of charge, to any person obtaining a copy
    27.7 + * of this software and associated documentation files (the "Software"), to
    27.8 + * deal in the Software without restriction, including without limitation the
    27.9 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
   27.10 + * sell copies of the Software, and to permit persons to whom the Software is
   27.11 + * furnished to do so, subject to the following conditions:
   27.12   *
   27.13 - * This program is free software; you can redistribute it and/or modify
   27.14 - * it under the terms of the GNU General Public License as published by
   27.15 - * the Free Software Foundation; either version 2 of the License, or
   27.16 - * (at your option) any later version.
   27.17 - * 
   27.18 - * This program is distributed in the hope that it will be useful,
   27.19 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
   27.20 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   27.21 - * GNU General Public License for more details.
   27.22 - * 
   27.23 - * You should have received a copy of the GNU General Public License
   27.24 - * along with this program; if not, write to the Free Software
   27.25 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   27.26 + * The above copyright notice and this permission notice shall be included in
   27.27 + * all copies or substantial portions of the Software.
   27.28 + *
   27.29 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   27.30 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   27.31 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   27.32 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   27.33 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
   27.34 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
   27.35 + * DEALINGS IN THE SOFTWARE.
   27.36 + *
   27.37 + * Copyright (C) IBM Corp. 2006
   27.38   */
   27.39  
   27.40  #ifndef _XEN_XENCOMM_H_