direct-io.hg

changeset 12439:63cb737b9a24

merge with xen-unstable.hg
author awilliam@xenbuild.aw
date Mon Nov 13 09:58:23 2006 -0700 (2006-11-13)
parents 529b3f3fb127 9a341c6ef6ae
children 2b0596c8a031
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S	Fri Nov 10 13:01:23 2006 -0700
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/head-xen.S	Mon Nov 13 09:58:23 2006 -0700
     1.3 @@ -9,7 +9,7 @@
     1.4  #include <asm/page.h>
     1.5  #include <asm/thread_info.h>
     1.6  #include <asm/asm-offsets.h>
     1.7 -#include <xen/interface/arch-x86_32.h>
     1.8 +#include <xen/interface/xen.h>
     1.9  #include <xen/interface/elfnote.h>
    1.10  
    1.11  /*
    1.12 @@ -192,6 +192,7 @@ ENTRY(cpu_gdt_table)
    1.13  #endif /* !CONFIG_XEN_COMPAT_030002 */
    1.14  	ELFNOTE(Xen, XEN_ELFNOTE_ENTRY,          .long,  startup_32)
    1.15  	ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long,  hypercall_page)
    1.16 +	ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW,   .long,  HYPERVISOR_VIRT_START)
    1.17  	ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,       .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
    1.18  #ifdef CONFIG_X86_PAE
    1.19  	ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE,       .asciz, "yes")
     2.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c	Fri Nov 10 13:01:23 2006 -0700
     2.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/fault-xen.c	Mon Nov 13 09:58:23 2006 -0700
     2.3 @@ -282,12 +282,6 @@ static int spurious_fault(struct pt_regs
     2.4  	pmd_t *pmd;
     2.5  	pte_t *pte;
     2.6  
     2.7 -#ifdef CONFIG_XEN
     2.8 -	/* Faults in hypervisor area are never spurious. */
     2.9 -	if (address >= HYPERVISOR_VIRT_START)
    2.10 -		return 0;
    2.11 -#endif
    2.12 -
    2.13  	/* Reserved-bit violation or user access to kernel space? */
    2.14  	if (error_code & 0x0c)
    2.15  		return 0;
    2.16 @@ -372,7 +366,7 @@ fastcall void __kprobes do_page_fault(st
    2.17  	if (unlikely(address >= TASK_SIZE)) { 
    2.18  #ifdef CONFIG_XEN
    2.19  		/* Faults in hypervisor area can never be patched up. */
    2.20 -		if (address >= HYPERVISOR_VIRT_START)
    2.21 +		if (address >= hypervisor_virt_start)
    2.22  			goto bad_area_nosemaphore;
    2.23  #endif
    2.24  		if (!(error_code & 5))
     3.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/init-xen.c	Fri Nov 10 13:01:23 2006 -0700
     3.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/init-xen.c	Mon Nov 13 09:58:23 2006 -0700
     3.3 @@ -130,7 +130,7 @@ static void __init page_table_range_init
     3.4  		pud = pud_offset(pgd, vaddr);
     3.5  		pmd = pmd_offset(pud, vaddr);
     3.6  		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
     3.7 -			if (vaddr < HYPERVISOR_VIRT_START && pmd_none(*pmd)) 
     3.8 +			if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
     3.9  				one_page_table_init(pmd);
    3.10  
    3.11  			vaddr += PMD_SIZE;
    3.12 @@ -187,7 +187,7 @@ static void __init kernel_physical_mappi
    3.13  		pmd += pmd_idx;
    3.14  		for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
    3.15  			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
    3.16 -			if (address >= HYPERVISOR_VIRT_START)
    3.17 +			if (address >= hypervisor_virt_start)
    3.18  				continue;
    3.19  
    3.20  			/* Map with big pages if possible, otherwise create normal page tables. */
    3.21 @@ -410,7 +410,7 @@ static void __init pagetable_init (void)
    3.22  	 * created - mappings will be set by set_fixmap():
    3.23  	 */
    3.24  	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
    3.25 -	page_table_range_init(vaddr, 0, pgd_base);
    3.26 +	page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
    3.27  
    3.28  	permanent_kmaps_init(pgd_base);
    3.29  }
     4.1 --- a/linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c	Fri Nov 10 13:01:23 2006 -0700
     4.2 +++ b/linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c	Mon Nov 13 09:58:23 2006 -0700
     4.3 @@ -102,8 +102,11 @@ static void set_pte_pfn(unsigned long va
     4.4  		return;
     4.5  	}
     4.6  	pte = pte_offset_kernel(pmd, vaddr);
     4.7 -	/* <pfn,flags> stored as-is, to permit clearing entries */
     4.8 -	set_pte(pte, pfn_pte(pfn, flags));
     4.9 +	if (pgprot_val(flags))
    4.10 +		/* <pfn,flags> stored as-is, to permit clearing entries */
    4.11 +		set_pte(pte, pfn_pte(pfn, flags));
    4.12 +	else
    4.13 +		pte_clear(&init_mm, vaddr, pte);
    4.14  
    4.15  	/*
    4.16  	 * It's enough to flush this one mapping.
    4.17 @@ -140,8 +143,11 @@ static void set_pte_pfn_ma(unsigned long
    4.18  		return;
    4.19  	}
    4.20  	pte = pte_offset_kernel(pmd, vaddr);
    4.21 -	/* <pfn,flags> stored as-is, to permit clearing entries */
    4.22 -	set_pte(pte, pfn_pte_ma(pfn, flags));
    4.23 +	if (pgprot_val(flags))
    4.24 +		/* <pfn,flags> stored as-is, to permit clearing entries */
    4.25 +		set_pte(pte, pfn_pte_ma(pfn, flags));
    4.26 +	else
    4.27 +		pte_clear(&init_mm, vaddr, pte);
    4.28  
    4.29  	/*
    4.30  	 * It's enough to flush this one mapping.
    4.31 @@ -186,9 +192,16 @@ void set_pmd_pfn(unsigned long vaddr, un
    4.32  }
    4.33  
    4.34  static int nr_fixmaps = 0;
    4.35 +unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
    4.36  unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
    4.37  EXPORT_SYMBOL(__FIXADDR_TOP);
    4.38  
    4.39 +void __init set_fixaddr_top()
    4.40 +{
    4.41 +	BUG_ON(nr_fixmaps > 0);
    4.42 +	__FIXADDR_TOP = hypervisor_virt_start - 2 * PAGE_SIZE;
    4.43 +}
    4.44 +
    4.45  void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
    4.46  {
    4.47  	unsigned long address = __fix_to_virt(idx);
    4.48 @@ -211,12 +224,6 @@ void __set_fixmap (enum fixed_addresses 
    4.49  	nr_fixmaps++;
    4.50  }
    4.51  
    4.52 -void set_fixaddr_top(unsigned long top)
    4.53 -{
    4.54 -	BUG_ON(nr_fixmaps > 0);
    4.55 -	__FIXADDR_TOP = top - PAGE_SIZE;
    4.56 -}
    4.57 -
    4.58  pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
    4.59  {
    4.60  	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
     5.1 --- a/linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c	Fri Nov 10 13:01:23 2006 -0700
     5.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c	Mon Nov 13 09:58:23 2006 -0700
     5.3 @@ -260,7 +260,10 @@ static void set_pte_phys(unsigned long v
     5.4  			return;
     5.5  		}
     5.6  	}
     5.7 -	new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
     5.8 +	if (pgprot_val(prot))
     5.9 +		new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
    5.10 +	else
    5.11 +		new_pte = __pte(0);
    5.12  
    5.13  	pte = pte_offset_kernel(pmd, vaddr);
    5.14  	if (!pte_none(*pte) &&
     6.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Fri Nov 10 13:01:23 2006 -0700
     6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Mon Nov 13 09:58:23 2006 -0700
     6.3 @@ -165,7 +165,7 @@ static int privcmd_ioctl(struct inode *i
     6.4  		struct mm_struct *mm = current->mm;
     6.5  		struct vm_area_struct *vma;
     6.6  		xen_pfn_t __user *p;
     6.7 -		unsigned long addr, mfn;
     6.8 +		unsigned long addr, mfn, nr_pages;
     6.9  		int i;
    6.10  
    6.11  		if (!is_initial_xendomain())
    6.12 @@ -174,7 +174,8 @@ static int privcmd_ioctl(struct inode *i
    6.13  		if (copy_from_user(&m, udata, sizeof(m)))
    6.14  			return -EFAULT;
    6.15  
    6.16 -		if ((m.num <= 0) || (m.num > (LONG_MAX >> PAGE_SHIFT)))
    6.17 +		nr_pages = m.num;
    6.18 +		if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
    6.19  			return -EINVAL;
    6.20  
    6.21  		down_read(&mm->mmap_sem);
    6.22 @@ -182,8 +183,7 @@ static int privcmd_ioctl(struct inode *i
    6.23  		vma = find_vma(mm, m.addr);
    6.24  		if (!vma ||
    6.25  		    (m.addr != vma->vm_start) ||
    6.26 -		    ((m.addr + ((unsigned long)m.num<<PAGE_SHIFT)) !=
    6.27 -		     vma->vm_end) ||
    6.28 +		    ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
    6.29  		    !privcmd_enforce_singleshot_mapping(vma)) {
    6.30  			up_read(&mm->mmap_sem);
    6.31  			return -EINVAL;
    6.32 @@ -191,7 +191,7 @@ static int privcmd_ioctl(struct inode *i
    6.33  
    6.34  		p = m.arr;
    6.35  		addr = m.addr;
    6.36 -		for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
    6.37 +		for (i = 0; i < nr_pages; i++, addr += PAGE_SIZE, p++) {
    6.38  			if (get_user(mfn, p)) {
    6.39  				up_read(&mm->mmap_sem);
    6.40  				return -EFAULT;
     7.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/fixmap.h	Fri Nov 10 13:01:23 2006 -0700
     7.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/fixmap.h	Mon Nov 13 09:58:23 2006 -0700
     7.3 @@ -98,7 +98,7 @@ enum fixed_addresses {
     7.4  extern void __set_fixmap(enum fixed_addresses idx,
     7.5  					maddr_t phys, pgprot_t flags);
     7.6  
     7.7 -extern void set_fixaddr_top(unsigned long top);
     7.8 +extern void set_fixaddr_top(void);
     7.9  
    7.10  #define set_fixmap(idx, phys) \
    7.11  		__set_fixmap(idx, phys, PAGE_KERNEL)
     8.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h	Fri Nov 10 13:01:23 2006 -0700
     8.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h	Mon Nov 13 09:58:23 2006 -0700
     8.3 @@ -56,6 +56,10 @@
     8.4  
     8.5  extern shared_info_t *HYPERVISOR_shared_info;
     8.6  
     8.7 +#ifdef CONFIG_X86_32
     8.8 +extern unsigned long hypervisor_virt_start;
     8.9 +#endif
    8.10 +
    8.11  /* arch/xen/i386/kernel/setup.c */
    8.12  extern start_info_t *xen_start_info;
    8.13  #ifdef CONFIG_XEN_PRIVILEGED_GUEST
     9.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h	Fri Nov 10 13:01:23 2006 -0700
     9.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/pgtable-2level-defs.h	Mon Nov 13 09:58:23 2006 -0700
     9.3 @@ -9,7 +9,6 @@
     9.4  
     9.5  #define PGDIR_SHIFT	22
     9.6  #define PTRS_PER_PGD	1024
     9.7 -#define PTRS_PER_PGD_NO_HV	(HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
     9.8  
     9.9  /*
    9.10   * the i386 is two-level, so we don't really have any
    10.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h	Fri Nov 10 13:01:23 2006 -0700
    10.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/pgtable-3level-defs.h	Mon Nov 13 09:58:23 2006 -0700
    10.3 @@ -8,7 +8,6 @@
    10.4   */
    10.5  #define PGDIR_SHIFT	30
    10.6  #define PTRS_PER_PGD	4
    10.7 -#define PTRS_PER_PGD_NO_HV 4
    10.8  
    10.9  /*
   10.10   * PMD_SHIFT determines the size of the area a middle-level
    11.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h	Fri Nov 10 13:01:23 2006 -0700
    11.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/setup_arch_post.h	Mon Nov 13 09:58:23 2006 -0700
    11.3 @@ -92,8 +92,10 @@ static void __init machine_specific_arch
    11.4  #endif
    11.5  
    11.6  	if (HYPERVISOR_xen_version(XENVER_platform_parameters,
    11.7 -				   &pp) == 0)
    11.8 -		set_fixaddr_top(pp.virt_start - PAGE_SIZE);
    11.9 +				   &pp) == 0) {
   11.10 +		hypervisor_virt_start = pp.virt_start;
   11.11 +		set_fixaddr_top();
   11.12 +	}
   11.13  
   11.14  	machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
   11.15  	machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/tools/check/check_crypto_lib	Mon Nov 13 09:58:23 2006 -0700
    12.3 @@ -0,0 +1,11 @@
    12.4 +#!/bin/bash
    12.5 +# CHECK-BUILD CHECK-INSTALL
    12.6 +
    12.7 +function error {
    12.8 +    echo
    12.9 +    echo "  *** Check for crypto library FAILED"
   12.10 +    exit 1
   12.11 +}
   12.12 +
   12.13 +set -e
   12.14 +ldconfig -p | grep -q libcrypto.so || error
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/tools/check/check_openssl_devel	Mon Nov 13 09:58:23 2006 -0700
    13.3 @@ -0,0 +1,11 @@
    13.4 +#!/bin/bash
    13.5 +# CHECK-BUILD
    13.6 +
    13.7 +function error {
    13.8 +    echo
    13.9 +    echo "  *** Check for openssl headers FAILED"
   13.10 +    exit 1
   13.11 +}
   13.12 +
   13.13 +set -e
   13.14 +[ -e /usr/include/openssl/md5.h ] || error
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/tools/check/check_x11_devel	Mon Nov 13 09:58:23 2006 -0700
    14.3 @@ -0,0 +1,11 @@
    14.4 +#!/bin/bash
    14.5 +# CHECK-BUILD
    14.6 +
    14.7 +function error {
    14.8 +    echo
    14.9 +    echo "  *** Check for x11 headers FAILED"
   14.10 +    exit 1
   14.11 +}
   14.12 +
   14.13 +set -e
   14.14 +[ -e /usr/include/X11/keysymdef.h ] || error
    15.1 --- a/tools/firmware/vmxassist/vm86.c	Fri Nov 10 13:01:23 2006 -0700
    15.2 +++ b/tools/firmware/vmxassist/vm86.c	Mon Nov 13 09:58:23 2006 -0700
    15.3 @@ -813,6 +813,58 @@ pop(struct regs *regs, unsigned prefix, 
    15.4  	return 1;
    15.5  }
    15.6  
    15.7 +static int
    15.8 +mov_to_seg(struct regs *regs, unsigned prefix, unsigned opc)
    15.9 +{
   15.10 +	unsigned eip = regs->eip - 1;
   15.11 +	unsigned modrm = fetch8(regs);
   15.12 +	unsigned addr = operand(prefix, regs, modrm);
   15.13 +
   15.14 +	/* Only need to emulate segment loads in real->protected mode. */
   15.15 +	if (mode != VM86_REAL_TO_PROTECTED)
   15.16 +		return 0;
   15.17 +
   15.18 +	/* Register source only. */
   15.19 +	if ((modrm & 0xC0) != 0xC0)
   15.20 +		goto fail;
   15.21 +
   15.22 +	switch ((modrm & 0x38) >> 3) {
   15.23 +	case 0: /* es */
   15.24 +		regs->ves = getreg16(regs, modrm);
   15.25 +		saved_rm_regs.ves = 0;
   15.26 +		oldctx.es_sel = regs->ves;
   15.27 +		return 1;
   15.28 +
   15.29 +	/* case 1: cs */
   15.30 +
   15.31 +	case 2: /* ss */
   15.32 +		regs->uss = getreg16(regs, modrm);
   15.33 +		saved_rm_regs.uss = 0;
   15.34 +		oldctx.ss_sel = regs->uss;
   15.35 +		return 1;
   15.36 +	case 3: /* ds */
   15.37 +		regs->vds = getreg16(regs, modrm);
   15.38 +		saved_rm_regs.vds = 0;
   15.39 +		oldctx.ds_sel = regs->vds;
   15.40 +		return 1;
   15.41 +	case 4: /* fs */
   15.42 +		regs->vfs = getreg16(regs, modrm);
   15.43 +		saved_rm_regs.vfs = 0;
   15.44 +		oldctx.fs_sel = regs->vfs;
   15.45 +		return 1;
   15.46 +	case 5: /* gs */
   15.47 +		regs->vgs = getreg16(regs, modrm);
   15.48 +		saved_rm_regs.vgs = 0;
   15.49 +		oldctx.gs_sel = regs->vgs;
   15.50 +		return 1;
   15.51 +	}
   15.52 +
   15.53 + fail:
   15.54 +	printf("%s:%d: missed opcode %02x %02x\n",
   15.55 +	       __FUNCTION__, __LINE__, opc, modrm);
   15.56 +	return 0;
   15.57 +}
   15.58 +
   15.59  /*
   15.60   * Emulate a segment load in protected mode
   15.61   */
   15.62 @@ -1257,11 +1309,9 @@ opcode(struct regs *regs)
   15.63  
   15.64  	for (;;) {
   15.65  		switch ((opc = fetch8(regs))) {
   15.66 -		case 0x07:
   15.67 -			if (prefix & DATA32)
   15.68 -				regs->ves = pop32(regs);
   15.69 -			else
   15.70 -				regs->ves = pop16(regs);
   15.71 +		case 0x07: /* pop %es */
   15.72 +			regs->ves = (prefix & DATA32) ?
   15.73 +				pop32(regs) : pop16(regs);
   15.74  			TRACE((regs, regs->eip - eip, "pop %%es"));
   15.75  			if (mode == VM86_REAL_TO_PROTECTED) {
   15.76  				saved_rm_regs.ves = 0;
   15.77 @@ -1316,6 +1366,16 @@ opcode(struct regs *regs)
   15.78  			}
   15.79  			goto invalid;
   15.80  
   15.81 +		case 0x1F: /* pop %ds */
   15.82 +			regs->vds = (prefix & DATA32) ?
   15.83 +				pop32(regs) : pop16(regs);
   15.84 +			TRACE((regs, regs->eip - eip, "pop %%ds"));
   15.85 +			if (mode == VM86_REAL_TO_PROTECTED) {
   15.86 +				saved_rm_regs.vds = 0;
   15.87 +				oldctx.ds_sel = regs->vds;
   15.88 +			}
   15.89 +			return OPC_EMULATED;
   15.90 +
   15.91  		case 0x26:
   15.92  			TRACE((regs, regs->eip - eip, "%%es:"));
   15.93  			prefix |= SEG_ES;
   15.94 @@ -1402,6 +1462,11 @@ opcode(struct regs *regs)
   15.95                                  goto invalid;
   15.96                          return OPC_EMULATED;
   15.97  
   15.98 +		case 0x8E: /* mov r16, sreg */
   15.99 +			if (!mov_to_seg(regs, prefix, opc))
  15.100 +				goto invalid;
  15.101 +			return OPC_EMULATED;
  15.102 +
  15.103  		case 0x8F: /* addr32 pop r/m16 */
  15.104                          if ((prefix & ADDR32) == 0)
  15.105                                  goto invalid;
    16.1 --- a/tools/ioemu/hw/piix4acpi.c	Fri Nov 10 13:01:23 2006 -0700
    16.2 +++ b/tools/ioemu/hw/piix4acpi.c	Mon Nov 13 09:58:23 2006 -0700
    16.3 @@ -398,8 +398,16 @@ void pci_piix4_acpi_init(PCIBus *bus, in
    16.4      pci_conf[0x0e] = 0x00;
    16.5      pci_conf[0x3d] = 0x01;  /* Hardwired to PIRQA is used */
    16.6  
    16.7 -    pci_register_io_region((PCIDevice *)d, 4, 0x10,
    16.8 -                           PCI_ADDRESS_SPACE_IO, acpi_map);
    16.9  
   16.10 +    /* PMBA POWER MANAGEMENT BASE ADDRESS, hardcoded to 0x1f40 
   16.11 +     * to make shutdown work for IPF, due to IPF Guest Firmware 
   16.12 +     * will enumerate pci devices. 
   16.13 +     *
   16.14 +     * TODO:  if Guest Firmware or Guest OS will change this PMBA,
   16.15 +     * More logic will be added.
   16.16 +     */
   16.17 +    pci_conf[0x40] = 0x41; /* Special device-specific BAR at 0x40 */
   16.18 +    pci_conf[0x41] = 0x1f;
   16.19 +    acpi_map(d, 0, 0x1f40, 0x10, PCI_ADDRESS_SPACE_IO);
   16.20      acpi_reset(d);
   16.21  }
    17.1 --- a/tools/ioemu/hw/piix_pci.c	Fri Nov 10 13:01:23 2006 -0700
    17.2 +++ b/tools/ioemu/hw/piix_pci.c	Mon Nov 13 09:58:23 2006 -0700
    17.3 @@ -338,10 +338,14 @@ static void pci_bios_init_device(PCIDevi
    17.4          break;
    17.5      case 0x0680:
    17.6          if (vendor_id == 0x8086 && device_id == 0x7113) {
    17.7 -            /* PIIX4 ACPI PM */
    17.8 -            pci_config_writew(d, 0x20, 0x0000); /* NO smb bus IO enable in PIIX4 */
    17.9 +            /*
   17.10 +             * PIIX4 ACPI PM.
   17.11 +             * Special device with special PCI config space. No ordinary BARs.
   17.12 +             */
   17.13 +            pci_config_writew(d, 0x20, 0x0000); // No smb bus IO enable
   17.14              pci_config_writew(d, 0x22, 0x0000);
   17.15 -            goto default_map;
   17.16 +            pci_config_writew(d, 0x3c, 0x0009); // Hardcoded IRQ9
   17.17 +            pci_config_writew(d, 0x3d, 0x0001);
   17.18          }
   17.19          break;
   17.20      case 0x0300:
   17.21 @@ -394,14 +398,6 @@ static void pci_bios_init_device(PCIDevi
   17.22          pic_irq = pci_irqs[pin];
   17.23          pci_config_writeb(d, PCI_INTERRUPT_LINE, pic_irq);
   17.24      }
   17.25 -
   17.26 -    if (class== 0x0680&& vendor_id == 0x8086 && device_id == 0x7113) {
   17.27 -         // PIIX4 ACPI PM
   17.28 -       pci_config_writew(d, 0x20, 0x0000); // NO smb bus IO enable in PIIX4
   17.29 -       pci_config_writew(d, 0x22, 0x0000);
   17.30 -       pci_config_writew(d, 0x3c, 0x0009); // Hardcodeed IRQ9
   17.31 -       pci_config_writew(d, 0x3d, 0x0001);
   17.32 -    }
   17.33  }
   17.34  
   17.35  /*
    18.1 --- a/tools/ioemu/hw/serial.c	Fri Nov 10 13:01:23 2006 -0700
    18.2 +++ b/tools/ioemu/hw/serial.c	Mon Nov 13 09:58:23 2006 -0700
    18.3 @@ -73,6 +73,11 @@
    18.4  #define UART_LSR_OE	0x02	/* Overrun error indicator */
    18.5  #define UART_LSR_DR	0x01	/* Receiver data ready */
    18.6  
    18.7 +/* Maximum retries for a single byte transmit. */
    18.8 +#define WRITE_MAX_SINGLE_RETRIES 3
    18.9 +/* Maximum retries for a sequence of back-to-back unsuccessful transmits. */
   18.10 +#define WRITE_MAX_TOTAL_RETRIES 10
   18.11 +
   18.12  struct SerialState {
   18.13      uint8_t divider;
   18.14      uint8_t rbr; /* receive register */
   18.15 @@ -98,8 +103,12 @@ struct SerialState {
   18.16       * If a character transmitted via UART cannot be written to its
   18.17       * destination immediately we remember it here and retry a few times via
   18.18       * a polling timer.
   18.19 +     *  - write_single_retries: Number of write retries for current byte.
   18.20 +     *  - write_total_retries:  Number of write retries for back-to-back
   18.21 +     *                          unsuccessful transmits.
   18.22       */
   18.23 -    int write_retries;
   18.24 +    int write_single_retries;
   18.25 +    int write_total_retries;
   18.26      char write_chr;
   18.27      QEMUTimer *write_retry_timer;
   18.28  };
   18.29 @@ -217,16 +226,21 @@ static void serial_chr_write(void *opaqu
   18.30  {
   18.31      SerialState *s = opaque;
   18.32  
   18.33 +    /* Cancel any outstanding retry if this is a new byte. */
   18.34      qemu_del_timer(s->write_retry_timer);
   18.35  
   18.36      /* Retry every 100ms for 300ms total. */
   18.37      if (qemu_chr_write(s->chr, &s->write_chr, 1) == -1) {
   18.38 -        if (s->write_retries++ >= 3)
   18.39 -            printf("serial: write error\n");
   18.40 -        else
   18.41 +        s->write_total_retries++; 
   18.42 +        if (s->write_single_retries++ >= WRITE_MAX_SINGLE_RETRIES)
   18.43 +            fprintf(stderr, "serial: write error\n");
   18.44 +        else if (s->write_total_retries <= WRITE_MAX_TOTAL_RETRIES) {
   18.45              qemu_mod_timer(s->write_retry_timer,
   18.46                             qemu_get_clock(vm_clock) + ticks_per_sec / 10);
   18.47 -        return;
   18.48 +            return;
   18.49 +        }
   18.50 +    } else {
   18.51 +        s->write_total_retries = 0;  /* if successful then reset counter */
   18.52      }
   18.53  
   18.54      /* Success: Notify guest that THR is empty. */
   18.55 @@ -255,7 +269,7 @@ static void serial_ioport_write(void *op
   18.56              s->lsr &= ~UART_LSR_THRE;
   18.57              serial_update_irq(s);
   18.58              s->write_chr = val;
   18.59 -            s->write_retries = 0;
   18.60 +            s->write_single_retries = 0;
   18.61              serial_chr_write(s);
   18.62          }
   18.63          break;
    19.1 --- a/tools/ioemu/vl.c	Fri Nov 10 13:01:23 2006 -0700
    19.2 +++ b/tools/ioemu/vl.c	Mon Nov 13 09:58:23 2006 -0700
    19.3 @@ -6424,7 +6424,8 @@ int main(int argc, char **argv)
    19.4          page_array[i] = i;
    19.5      if (xc_domain_translate_gpfn_list(xc_handle, domid, tmp_nr_pages,
    19.6                                        page_array, page_array)) {
    19.7 -        fprintf(logfile, "xc_get_pfn_list returned error %d\n", errno);
    19.8 +        fprintf(logfile, "xc_domain_translate_gpfn_list returned error %d\n",
    19.9 +                errno);
   19.10          exit(-1);
   19.11      }
   19.12  
    20.1 --- a/tools/libfsimage/common/fsimage_grub.c	Fri Nov 10 13:01:23 2006 -0700
    20.2 +++ b/tools/libfsimage/common/fsimage_grub.c	Mon Nov 13 09:58:23 2006 -0700
    20.3 @@ -126,7 +126,7 @@ int
    20.4  fsig_devread(fsi_file_t *ffi, unsigned int sector, unsigned int offset,
    20.5      unsigned int bufsize, char *buf)
    20.6  {
    20.7 -	uint64_t off = ffi->ff_fsi->f_off + ((uint64_t)(sector * 512)) + offset;
    20.8 +	uint64_t off = ffi->ff_fsi->f_off + ((uint64_t)sector * 512) + offset;
    20.9  	ssize_t bytes_read = 0;
   20.10  
   20.11  	while (bufsize) {
    21.1 --- a/tools/libfsimage/ext2fs/fsys_ext2fs.c	Fri Nov 10 13:01:23 2006 -0700
    21.2 +++ b/tools/libfsimage/ext2fs/fsys_ext2fs.c	Mon Nov 13 09:58:23 2006 -0700
    21.3 @@ -232,6 +232,7 @@ struct ext2_dir_entry
    21.4  #define S_ISREG(m)      (((m) & S_IFMT) == S_IFREG)
    21.5  #define S_ISDIR(m)      (((m) & S_IFMT) == S_IFDIR)
    21.6  
    21.7 +#if defined(__i386__) || defined(__x86_64__)
    21.8  /* include/asm-i386/bitops.h */
    21.9  /*
   21.10   * ffz = Find First Zero in word. Undefined if no zero exists,
   21.11 @@ -251,6 +252,66 @@ ffz (unsigned long word)
   21.12    return word;
   21.13  }
   21.14  
   21.15 +#elif defined(__ia64__)
   21.16 +
   21.17 +typedef unsigned long __u64;
   21.18 +
   21.19 +#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
   21.20 +# define ia64_popcnt(x) __builtin_popcountl(x)
   21.21 +#else
   21.22 +# define ia64_popcnt(x)                                     \
   21.23 +  ({                                                        \
   21.24 +    __u64 ia64_intri_res;                                   \
   21.25 +    asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
   21.26 +    ia64_intri_res;                                         \
   21.27 +  })
   21.28 +#endif
   21.29 +
   21.30 +static __inline__ unsigned long
   21.31 +ffz (unsigned long word)
   21.32 +{
   21.33 +  unsigned long result;
   21.34 +
   21.35 +  result = ia64_popcnt(word & (~word - 1));
   21.36 +  return result;
   21.37 +}
   21.38 +
   21.39 +#elif defined(__powerpc__)
   21.40 +
   21.41 +static __inline__ int
   21.42 +__ilog2(unsigned long x)
   21.43 +{
   21.44 +  int lz;
   21.45 +
   21.46 +  asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
   21.47 +  return BITS_PER_LONG - 1 - lz;
   21.48 +}
   21.49 +
   21.50 +static __inline__ unsigned long
   21.51 +ffz (unsigned long word)
   21.52 +{
   21.53 +  if ((word = ~word) == 0)
   21.54 +    return BITS_PER_LONG;
   21.55 +  return __ilog2(word & -word);
   21.56 +}
   21.57 +
   21.58 +#else /* Unoptimized */
   21.59 +
   21.60 +static __inline__ unsigned long
   21.61 +ffz (unsigned long word)
   21.62 +{
   21.63 +  unsigned long result;
   21.64 +
   21.65 +  result = 0;
   21.66 +  while(word & 1)
   21.67 +    {
   21.68 +      result++;
   21.69 +      word >>= 1;
   21.70 +    }
   21.71 +  return result;
   21.72 +}
   21.73 +#endif
   21.74 +
   21.75  /* check filesystem types and read superblock into memory buffer */
   21.76  int
   21.77  ext2fs_mount (fsi_file_t *ffi)
    22.1 --- a/tools/libfsimage/reiserfs/fsys_reiserfs.c	Fri Nov 10 13:01:23 2006 -0700
    22.2 +++ b/tools/libfsimage/reiserfs/fsys_reiserfs.c	Mon Nov 13 09:58:23 2006 -0700
    22.3 @@ -363,6 +363,8 @@ struct fsys_reiser_info
    22.4  #define JOURNAL_START    ((__u32 *) (INFO + 1))
    22.5  #define JOURNAL_END      ((__u32 *) (FSYS_BUF + FSYS_BUFLEN))
    22.6  
    22.7 +#if defined(__i386__) || defined(__x86_64__)
    22.8 +
    22.9  #ifdef __amd64
   22.10  #define BSF "bsfq"
   22.11  #else
   22.12 @@ -376,6 +378,61 @@ grub_log2 (unsigned long word)
   22.13  	   : "r" (word));
   22.14    return word;
   22.15  }
   22.16 +
   22.17 +#elif defined(__ia64__)
   22.18 +
   22.19 +#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
   22.20 +# define ia64_popcnt(x) __builtin_popcountl(x)
   22.21 +#else
   22.22 +# define ia64_popcnt(x)                                     \
   22.23 +  ({                                                        \
   22.24 +    __u64 ia64_intri_res;                                   \
   22.25 +    asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
   22.26 +    ia64_intri_res;                                         \
   22.27 +  })
   22.28 +#endif
   22.29 +
   22.30 +static __inline__ unsigned long
   22.31 +grub_log2 (unsigned long word)
   22.32 +{
   22.33 +  unsigned long result;
   22.34 +
   22.35 +  result = ia64_popcnt((word - 1) & ~word);
   22.36 +  return result;
   22.37 +}
   22.38 +
   22.39 +#elif defined(__powerpc__)
   22.40 +
   22.41 +static __inline__ int
   22.42 +__ilog2(unsigned long x)
   22.43 +{
   22.44 +  int lz;
   22.45 +
   22.46 +  asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
   22.47 +  return BITS_PER_LONG - 1 - lz;
   22.48 +}
   22.49 +
   22.50 +static __inline__ unsigned long
   22.51 +grub_log2 (unsigned long word)
   22.52 +{
   22.53 +  return __ilog2(word & -word);
   22.54 +}
   22.55 +
   22.56 +#else /* Unoptimized */
   22.57 +
   22.58 +static __inline__ unsigned long
   22.59 +grub_log2 (unsigned long word)
   22.60 +{
   22.61 +  unsigned long result = 0;
   22.62 +
   22.63 +  while (!(word & 1UL))
   22.64 +    {
   22.65 +      result++;
   22.66 +      word >>= 1;
   22.67 +    }
   22.68 +  return result;
   22.69 +}
   22.70 +#endif
   22.71  #define log2 grub_log2
   22.72  
   22.73  static __inline__ int
    23.1 --- a/tools/xenstore/xenstored_core.c	Fri Nov 10 13:01:23 2006 -0700
    23.2 +++ b/tools/xenstore/xenstored_core.c	Mon Nov 13 09:58:23 2006 -0700
    23.3 @@ -575,8 +575,10 @@ struct node *get_node(struct connection 
    23.4  	/* If we don't have permission, we don't have node. */
    23.5  	if (node) {
    23.6  		if ((perm_for_conn(conn, node->perms, node->num_perms) & perm)
    23.7 -		    != perm)
    23.8 +		    != perm) {
    23.9 +			errno = EACCES;
   23.10  			node = NULL;
   23.11 +		}
   23.12  	}
   23.13  	/* Clean up errno if they weren't supposed to know. */
   23.14  	if (!node) 
   23.15 @@ -789,7 +791,7 @@ static void delete_node_single(struct co
   23.16  		corrupt(conn, "Could not delete '%s'", node->name);
   23.17  		return;
   23.18  	}
   23.19 -	domain_entry_dec(conn);
   23.20 +	domain_entry_dec(conn, node);
   23.21  }
   23.22  
   23.23  /* Must not be / */
   23.24 @@ -840,7 +842,7 @@ static struct node *construct_node(struc
   23.25  	node->children = node->data = NULL;
   23.26  	node->childlen = node->datalen = 0;
   23.27  	node->parent = parent;
   23.28 -	domain_entry_inc(conn);
   23.29 +	domain_entry_inc(conn, node);
   23.30  	return node;
   23.31  }
   23.32  
   23.33 @@ -876,7 +878,7 @@ static struct node *create_node(struct c
   23.34  	 * something goes wrong. */
   23.35  	for (i = node; i; i = i->parent) {
   23.36  		if (!write_node(conn, i)) {
   23.37 -			domain_entry_dec(conn);
   23.38 +			domain_entry_dec(conn, i);
   23.39  			return NULL;
   23.40  		}
   23.41  		talloc_set_destructor(i, destroy_node);
   23.42 @@ -1106,6 +1108,7 @@ static void do_get_perms(struct connecti
   23.43  static void do_set_perms(struct connection *conn, struct buffered_data *in)
   23.44  {
   23.45  	unsigned int num;
   23.46 +	struct xs_permissions *perms;
   23.47  	char *name, *permstr;
   23.48  	struct node *node;
   23.49  
   23.50 @@ -1127,12 +1130,24 @@ static void do_set_perms(struct connecti
   23.51  		return;
   23.52  	}
   23.53  
   23.54 -	node->perms = talloc_array(node, struct xs_permissions, num);
   23.55 -	node->num_perms = num;
   23.56 -	if (!xs_strings_to_perms(node->perms, num, permstr)) {
   23.57 +	perms = talloc_array(node, struct xs_permissions, num);
   23.58 +	if (!xs_strings_to_perms(perms, num, permstr)) {
   23.59  		send_error(conn, errno);
   23.60  		return;
   23.61  	}
   23.62 +
   23.63 +	/* Unprivileged domains may not change the owner. */
   23.64 +	if (domain_is_unprivileged(conn) &&
   23.65 +	    perms[0].id != node->perms[0].id) {
   23.66 +		send_error(conn, EPERM);
   23.67 +		return;
   23.68 +	}
   23.69 +
   23.70 +	domain_entry_dec(conn, node);
   23.71 +	node->perms = perms;
   23.72 +	node->num_perms = num;
   23.73 +	domain_entry_inc(conn, node);
   23.74 +
   23.75  	if (!write_node(conn, node)) {
   23.76  		send_error(conn, errno);
   23.77  		return;
    24.1 --- a/tools/xenstore/xenstored_domain.c	Fri Nov 10 13:01:23 2006 -0700
    24.2 +++ b/tools/xenstore/xenstored_domain.c	Mon Nov 13 09:58:23 2006 -0700
    24.3 @@ -501,18 +501,35 @@ int domain_init(void)
    24.4  	return xce_handle;
    24.5  }
    24.6  
    24.7 -void domain_entry_inc(struct connection *conn)
    24.8 +void domain_entry_inc(struct connection *conn, struct node *node)
    24.9  {
   24.10 -	if (!conn || !conn->domain)
   24.11 +	struct domain *d;
   24.12 +
   24.13 +	if (!conn)
   24.14  		return;
   24.15 -	conn->domain->nbentry++;
   24.16 +
   24.17 +	if (node->perms && node->perms[0].id != conn->id) {
   24.18 +		d = find_domain_by_domid(node->perms[0].id);
   24.19 +		if (d)
   24.20 +			d->nbentry++;
   24.21 +	}
   24.22 +	else if (conn->domain) {
   24.23 +		conn->domain->nbentry++;
   24.24 +	}
   24.25  }
   24.26  
   24.27 -void domain_entry_dec(struct connection *conn)
   24.28 +void domain_entry_dec(struct connection *conn, struct node *node)
   24.29  {
   24.30 -	if (!conn || !conn->domain)
   24.31 +	struct domain *d;
   24.32 +
   24.33 +	if (!conn)
   24.34  		return;
   24.35 -	if (conn->domain->nbentry)
   24.36 +
   24.37 +	if (node->perms && node->perms[0].id != conn->id) {
   24.38 +		d = find_domain_by_domid(node->perms[0].id);
   24.39 +		if (d && d->nbentry)
   24.40 +			d->nbentry--;
   24.41 +	} else if (conn->domain && conn->domain->nbentry)
   24.42  		conn->domain->nbentry--;
   24.43  }
   24.44  
    25.1 --- a/tools/xenstore/xenstored_domain.h	Fri Nov 10 13:01:23 2006 -0700
    25.2 +++ b/tools/xenstore/xenstored_domain.h	Mon Nov 13 09:58:23 2006 -0700
    25.3 @@ -50,8 +50,8 @@ bool domain_can_write(struct connection 
    25.4  bool domain_is_unprivileged(struct connection *conn);
    25.5  
    25.6  /* Quota manipulation */
    25.7 -void domain_entry_inc(struct connection *conn);
    25.8 -void domain_entry_dec(struct connection *conn);
    25.9 +void domain_entry_inc(struct connection *conn, struct node *);
   25.10 +void domain_entry_dec(struct connection *conn, struct node *);
   25.11  int domain_entry(struct connection *conn);
   25.12  void domain_watch_inc(struct connection *conn);
   25.13  void domain_watch_dec(struct connection *conn);
    26.1 --- a/xen/arch/x86/boot/x86_32.S	Fri Nov 10 13:01:23 2006 -0700
    26.2 +++ b/xen/arch/x86/boot/x86_32.S	Mon Nov 13 09:58:23 2006 -0700
    26.3 @@ -196,21 +196,16 @@ ENTRY(stack_start)
    26.4          
    26.5  /*** DESCRIPTOR TABLES ***/
    26.6  
    26.7 -.globl idt
    26.8 -.globl gdt        
    26.9 -
   26.10          ALIGN
   26.11          
   26.12          .word   0    
   26.13  idt_descr:
   26.14          .word   256*8-1
   26.15 -idt:
   26.16          .long   idt_table
   26.17  
   26.18          .word   0
   26.19  gdt_descr:
   26.20          .word   LAST_RESERVED_GDT_BYTE
   26.21 -gdt:
   26.22          .long   gdt_table - FIRST_RESERVED_GDT_BYTE
   26.23  
   26.24          .word   0
    27.1 --- a/xen/arch/x86/boot/x86_64.S	Fri Nov 10 13:01:23 2006 -0700
    27.2 +++ b/xen/arch/x86/boot/x86_64.S	Mon Nov 13 09:58:23 2006 -0700
    27.3 @@ -192,9 +192,6 @@ 1:      jmp     1b
    27.4  
    27.5  /*** DESCRIPTOR TABLES ***/
    27.6  
    27.7 -.globl idt
    27.8 -.globl gdt        
    27.9 -
   27.10          .align 8, 0xCC
   27.11  multiboot_ptr:
   27.12          .long   0
   27.13 @@ -210,13 +207,11 @@ cpuid_ext_features:
   27.14          .word   0
   27.15  gdt_descr:
   27.16          .word   LAST_RESERVED_GDT_BYTE
   27.17 -gdt:
   27.18          .quad   gdt_table - FIRST_RESERVED_GDT_BYTE
   27.19  
   27.20          .word   0,0,0
   27.21  idt_descr:
   27.22          .word   256*16-1
   27.23 -idt:
   27.24          .quad   idt_table
   27.25  
   27.26  ENTRY(stack_start)
    28.1 --- a/xen/arch/x86/cpu/mcheck/Makefile	Fri Nov 10 13:01:23 2006 -0700
    28.2 +++ b/xen/arch/x86/cpu/mcheck/Makefile	Mon Nov 13 09:58:23 2006 -0700
    28.3 @@ -2,6 +2,6 @@ obj-y += k7.o
    28.4  obj-y += mce.o
    28.5  obj-y += non-fatal.o
    28.6  obj-y += p4.o
    28.7 -obj-y += p5.o
    28.8 -obj-y += p6.o
    28.9 -obj-y += winchip.o
   28.10 +obj-$(x86_32) += p5.o
   28.11 +obj-$(x86_32) += p6.o
   28.12 +obj-$(x86_32) += winchip.o
    29.1 --- a/xen/arch/x86/cpu/mcheck/mce.c	Fri Nov 10 13:01:23 2006 -0700
    29.2 +++ b/xen/arch/x86/cpu/mcheck/mce.c	Mon Nov 13 09:58:23 2006 -0700
    29.3 @@ -39,18 +39,22 @@ void mcheck_init(struct cpuinfo_x86 *c)
    29.4  			break;
    29.5  
    29.6  		case X86_VENDOR_INTEL:
    29.7 +#ifndef CONFIG_X86_64
    29.8  			if (c->x86==5)
    29.9  				intel_p5_mcheck_init(c);
   29.10  			if (c->x86==6)
   29.11  				intel_p6_mcheck_init(c);
   29.12 +#endif
   29.13  			if (c->x86==15)
   29.14  				intel_p4_mcheck_init(c);
   29.15  			break;
   29.16  
   29.17 +#ifndef CONFIG_X86_64
   29.18  		case X86_VENDOR_CENTAUR:
   29.19  			if (c->x86==5)
   29.20  				winchip_mcheck_init(c);
   29.21  			break;
   29.22 +#endif
   29.23  
   29.24  		default:
   29.25  			break;
    30.1 --- a/xen/arch/x86/cpu/mtrr/Makefile	Fri Nov 10 13:01:23 2006 -0700
    30.2 +++ b/xen/arch/x86/cpu/mtrr/Makefile	Mon Nov 13 09:58:23 2006 -0700
    30.3 @@ -1,6 +1,6 @@
    30.4 -obj-y += amd.o
    30.5 -obj-y += centaur.o
    30.6 -obj-y += cyrix.o
    30.7 +obj-$(x86_32) += amd.o
    30.8 +obj-$(x86_32) += centaur.o
    30.9 +obj-$(x86_32) += cyrix.o
   30.10  obj-y += generic.o
   30.11  obj-y += main.o
   30.12  obj-y += state.o
    31.1 --- a/xen/arch/x86/cpu/mtrr/main.c	Fri Nov 10 13:01:23 2006 -0700
    31.2 +++ b/xen/arch/x86/cpu/mtrr/main.c	Mon Nov 13 09:58:23 2006 -0700
    31.3 @@ -64,7 +64,11 @@ struct mtrr_ops * mtrr_if = NULL;
    31.4  static void set_mtrr(unsigned int reg, unsigned long base,
    31.5  		     unsigned long size, mtrr_type type);
    31.6  
    31.7 +#ifndef CONFIG_X86_64
    31.8  extern int arr3_protected;
    31.9 +#else
   31.10 +#define arr3_protected 0
   31.11 +#endif
   31.12  
   31.13  static char *mtrr_strings[MTRR_NUM_TYPES] =
   31.14  {
   31.15 @@ -539,9 +543,11 @@ extern void centaur_init_mtrr(void);
   31.16  
   31.17  static void __init init_ifs(void)
   31.18  {
   31.19 +#ifndef CONFIG_X86_64
   31.20  	amd_init_mtrr();
   31.21  	cyrix_init_mtrr();
   31.22  	centaur_init_mtrr();
   31.23 +#endif
   31.24  }
   31.25  
   31.26  /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
   31.27 @@ -593,6 +599,7 @@ void __init mtrr_bp_init(void)
   31.28  			size_and_mask = 0;
   31.29  		}
   31.30  	} else {
   31.31 +#ifndef CONFIG_X86_64
   31.32  		switch (boot_cpu_data.x86_vendor) {
   31.33  		case X86_VENDOR_AMD:
   31.34  			if (cpu_has_k6_mtrr) {
   31.35 @@ -619,6 +626,7 @@ void __init mtrr_bp_init(void)
   31.36  		default:
   31.37  			break;
   31.38  		}
   31.39 +#endif
   31.40  	}
   31.41  
   31.42  	if (mtrr_if) {
    32.1 --- a/xen/arch/x86/domain.c	Fri Nov 10 13:01:23 2006 -0700
    32.2 +++ b/xen/arch/x86/domain.c	Mon Nov 13 09:58:23 2006 -0700
    32.3 @@ -166,6 +166,9 @@ void vcpu_destroy(struct vcpu *v)
    32.4  
    32.5  int arch_domain_create(struct domain *d)
    32.6  {
    32.7 +#ifdef __x86_64__
    32.8 +    struct page_info *pg;
    32.9 +#endif
   32.10      l1_pgentry_t gdt_l1e;
   32.11      int vcpuid, pdpt_order;
   32.12      int i, rc = -ENOMEM;
   32.13 @@ -194,19 +197,17 @@ int arch_domain_create(struct domain *d)
   32.14  
   32.15  #else /* __x86_64__ */
   32.16  
   32.17 -    d->arch.mm_perdomain_l2 = alloc_xenheap_page();
   32.18 -    d->arch.mm_perdomain_l3 = alloc_xenheap_page();
   32.19 -    if ( (d->arch.mm_perdomain_l2 == NULL) ||
   32.20 -         (d->arch.mm_perdomain_l3 == NULL) )
   32.21 +    if ( (pg = alloc_domheap_page(NULL)) == NULL )
   32.22          goto fail;
   32.23 -
   32.24 -    memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
   32.25 +    d->arch.mm_perdomain_l2 = clear_page(page_to_virt(pg));
   32.26      for ( i = 0; i < (1 << pdpt_order); i++ )
   32.27          d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] =
   32.28              l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
   32.29                            __PAGE_HYPERVISOR);
   32.30  
   32.31 -    memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
   32.32 +    if ( (pg = alloc_domheap_page(NULL)) == NULL )
   32.33 +        goto fail;
   32.34 +    d->arch.mm_perdomain_l3 = clear_page(page_to_virt(pg));
   32.35      d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
   32.36          l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
   32.37                              __PAGE_HYPERVISOR);
   32.38 @@ -240,8 +241,8 @@ int arch_domain_create(struct domain *d)
   32.39   fail:
   32.40      free_xenheap_page(d->shared_info);
   32.41  #ifdef __x86_64__
   32.42 -    free_xenheap_page(d->arch.mm_perdomain_l2);
   32.43 -    free_xenheap_page(d->arch.mm_perdomain_l3);
   32.44 +    free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
   32.45 +    free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
   32.46  #endif
   32.47      free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
   32.48      return rc;
   32.49 @@ -265,8 +266,8 @@ void arch_domain_destroy(struct domain *
   32.50          get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)));
   32.51  
   32.52  #ifdef __x86_64__
   32.53 -    free_xenheap_page(d->arch.mm_perdomain_l2);
   32.54 -    free_xenheap_page(d->arch.mm_perdomain_l3);
   32.55 +    free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
   32.56 +    free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
   32.57  #endif
   32.58  
   32.59      free_xenheap_page(d->shared_info);
   32.60 @@ -587,9 +588,9 @@ static void load_segments(struct vcpu *n
   32.61          regs->entry_vector  = TRAP_syscall;
   32.62          regs->rflags       &= ~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|
   32.63                                  X86_EFLAGS_NT|X86_EFLAGS_TF);
   32.64 -        regs->ss            = __GUEST_SS;
   32.65 +        regs->ss            = FLAT_KERNEL_SS;
   32.66          regs->rsp           = (unsigned long)(rsp-11);
   32.67 -        regs->cs            = __GUEST_CS;
   32.68 +        regs->cs            = FLAT_KERNEL_CS;
   32.69          regs->rip           = nctxt->failsafe_callback_eip;
   32.70      }
   32.71  }
    33.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Nov 10 13:01:23 2006 -0700
    33.2 +++ b/xen/arch/x86/hvm/hvm.c	Mon Nov 13 09:58:23 2006 -0700
    33.3 @@ -517,7 +517,8 @@ int hvm_bringup_ap(int vcpuid, int tramp
    33.4      if ( bsp->vcpu_id != 0 )
    33.5      {
    33.6          gdprintk(XENLOG_ERR, "Not calling hvm_bringup_ap from BSP context.\n");
    33.7 -        domain_crash_synchronous();
    33.8 +        domain_crash(bsp->domain);
    33.9 +        return -EINVAL;
   33.10      }
   33.11  
   33.12      if ( (v = d->vcpu[vcpuid]) == NULL )
    34.1 --- a/xen/arch/x86/hvm/intercept.c	Fri Nov 10 13:01:23 2006 -0700
    34.2 +++ b/xen/arch/x86/hvm/intercept.c	Mon Nov 13 09:58:23 2006 -0700
    34.3 @@ -253,11 +253,7 @@ int register_io_handler(
    34.4      struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler;
    34.5      int num = handler->num_slot;
    34.6  
    34.7 -    if ( num >= MAX_IO_HANDLER )
    34.8 -    {
    34.9 -        printk("no extra space, register io interceptor failed!\n");
   34.10 -        domain_crash_synchronous();
   34.11 -    }
   34.12 +    BUG_ON(num >= MAX_IO_HANDLER);
   34.13  
   34.14      handler->hdl_list[num].addr = addr;
   34.15      handler->hdl_list[num].size = size;
    35.1 --- a/xen/arch/x86/hvm/io.c	Fri Nov 10 13:01:23 2006 -0700
    35.2 +++ b/xen/arch/x86/hvm/io.c	Mon Nov 13 09:58:23 2006 -0700
    35.3 @@ -81,9 +81,7 @@ static void set_reg_value (int size, int
    35.4              regs->ebx |= ((value & 0xFF) << 8);
    35.5              break;
    35.6          default:
    35.7 -            printk("Error: size:%x, index:%x are invalid!\n", size, index);
    35.8 -            domain_crash_synchronous();
    35.9 -            break;
   35.10 +            goto crash;
   35.11          }
   35.12          break;
   35.13      case WORD:
   35.14 @@ -121,9 +119,7 @@ static void set_reg_value (int size, int
   35.15              regs->edi |= (value & 0xFFFF);
   35.16              break;
   35.17          default:
   35.18 -            printk("Error: size:%x, index:%x are invalid!\n", size, index);
   35.19 -            domain_crash_synchronous();
   35.20 -            break;
   35.21 +            goto crash;
   35.22          }
   35.23          break;
   35.24      case LONG:
   35.25 @@ -153,15 +149,13 @@ static void set_reg_value (int size, int
   35.26              regs->edi = value;
   35.27              break;
   35.28          default:
   35.29 -            printk("Error: size:%x, index:%x are invalid!\n", size, index);
   35.30 -            domain_crash_synchronous();
   35.31 -            break;
   35.32 +            goto crash;
   35.33          }
   35.34          break;
   35.35      default:
   35.36 -        printk("Error: size:%x, index:%x are invalid!\n", size, index);
   35.37 +    crash:
   35.38 +        gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n", size, index);
   35.39          domain_crash_synchronous();
   35.40 -        break;
   35.41      }
   35.42  }
   35.43  #else
   35.44 @@ -184,7 +178,7 @@ static inline void __set_reg_value(unsig
   35.45          *reg = value;
   35.46          break;
   35.47      default:
   35.48 -        printk("Error: <__set_reg_value>: size:%x is invalid\n", size);
   35.49 +        gdprintk(XENLOG_ERR, "size:%x is invalid\n", size);
   35.50          domain_crash_synchronous();
   35.51      }
   35.52  }
   35.53 @@ -226,7 +220,8 @@ static void set_reg_value (int size, int
   35.54              regs->rbx |= ((value & 0xFF) << 8);
   35.55              break;
   35.56          default:
   35.57 -            printk("Error: size:%x, index:%x are invalid!\n", size, index);
   35.58 +            gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n",
   35.59 +                     size, index);
   35.60              domain_crash_synchronous();
   35.61              break;
   35.62          }
   35.63 @@ -283,7 +278,7 @@ static void set_reg_value (int size, int
   35.64          __set_reg_value(&regs->r15, size, value);
   35.65          break;
   35.66      default:
   35.67 -        printk("Error: <set_reg_value> Invalid index\n");
   35.68 +        gdprintk(XENLOG_ERR, "Invalid index\n");
   35.69          domain_crash_synchronous();
   35.70      }
   35.71      return;
    36.1 --- a/xen/arch/x86/hvm/platform.c	Fri Nov 10 13:01:23 2006 -0700
    36.2 +++ b/xen/arch/x86/hvm/platform.c	Mon Nov 13 09:58:23 2006 -0700
    36.3 @@ -731,8 +731,7 @@ static void hvm_send_assist_req(struct v
    36.4      {
    36.5          /* This indicates a bug in the device model.  Crash the domain. */
    36.6          gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
    36.7 -        domain_crash(v->domain);
    36.8 -        return;
    36.9 +        domain_crash_synchronous();
   36.10      }
   36.11  
   36.12      prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
    37.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Nov 10 13:01:23 2006 -0700
    37.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Nov 13 09:58:23 2006 -0700
    37.3 @@ -326,14 +326,14 @@ static inline int long_mode_do_msr_read(
    37.4  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
    37.5  {
    37.6      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
    37.7 -    struct vcpu *vc = current;
    37.8 -    struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
    37.9 +    struct vcpu *v = current;
   37.10 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   37.11  
   37.12      HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx "
   37.13                  "msr_content %"PRIx64"\n", 
   37.14                  (unsigned long)regs->ecx, msr_content);
   37.15  
   37.16 -    switch (regs->ecx)
   37.17 +    switch ( regs->ecx )
   37.18      {
   37.19      case MSR_EFER:
   37.20  #ifdef __x86_64__
   37.21 @@ -342,24 +342,24 @@ static inline int long_mode_do_msr_write
   37.22          {
   37.23              printk("Trying to set reserved bit in EFER: %"PRIx64"\n",
   37.24                     msr_content);
   37.25 -            svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
   37.26 +            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   37.27              return 0;
   37.28          }
   37.29  
   37.30          /* LME: 0 -> 1 */
   37.31          if ( msr_content & EFER_LME &&
   37.32 -             !test_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state))
   37.33 +             !test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
   37.34          {
   37.35 -            if ( svm_paging_enabled(vc) ||
   37.36 +            if ( svm_paging_enabled(v) ||
   37.37                   !test_bit(SVM_CPU_STATE_PAE_ENABLED,
   37.38 -                           &vc->arch.hvm_svm.cpu_state) )
   37.39 +                           &v->arch.hvm_svm.cpu_state) )
   37.40              {
   37.41                  printk("Trying to set LME bit when "
   37.42                         "in paging mode or PAE bit is not set\n");
   37.43 -                svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
   37.44 +                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   37.45                  return 0;
   37.46              }
   37.47 -            set_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state);
   37.48 +            set_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state);
   37.49          }
   37.50  
   37.51          /* We have already recorded that we want LME, so it will be set 
   37.52 @@ -374,13 +374,13 @@ static inline int long_mode_do_msr_write
   37.53  
   37.54      case MSR_FS_BASE:
   37.55      case MSR_GS_BASE:
   37.56 -        if ( !svm_long_mode_enabled(vc) )
   37.57 -            domain_crash_synchronous();
   37.58 +        if ( !svm_long_mode_enabled(v) )
   37.59 +            goto exit_and_crash;
   37.60  
   37.61          if (!IS_CANO_ADDRESS(msr_content))
   37.62          {
   37.63              HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
   37.64 -            svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
   37.65 +            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
   37.66          }
   37.67  
   37.68          if (regs->ecx == MSR_FS_BASE)
   37.69 @@ -412,7 +412,13 @@ static inline int long_mode_do_msr_write
   37.70      default:
   37.71          return 0;
   37.72      }
   37.73 +
   37.74      return 1;
   37.75 +
   37.76 + exit_and_crash:
   37.77 +    gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
   37.78 +    domain_crash(v->domain);
   37.79 +    return 1; /* handled */
   37.80  }
   37.81  
   37.82  
   37.83 @@ -421,7 +427,6 @@ static inline int long_mode_do_msr_write
   37.84  #define savedebug(_v,_reg) \
   37.85      __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
   37.86  
   37.87 -
   37.88  static inline void svm_save_dr(struct vcpu *v)
   37.89  {
   37.90      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   37.91 @@ -938,7 +943,8 @@ static void svm_do_general_protection_fa
   37.92          svm_dump_vmcb(__func__, vmcb);
   37.93          svm_dump_regs(__func__, regs);
   37.94          svm_dump_inst(vmcb->rip);
   37.95 -        __hvm_bug(regs);
   37.96 +        domain_crash(v->domain);
   37.97 +        return;
   37.98      }
   37.99  
  37.100      HVM_DBG_LOG(DBG_LEVEL_1,
  37.101 @@ -1169,8 +1175,9 @@ static void svm_get_prefix_info(
  37.102      if (inst_copy_from_guest(inst, svm_rip2pointer(vmcb), sizeof(inst)) 
  37.103          != MAX_INST_LEN) 
  37.104      {
  37.105 -        printk("%s: get guest instruction failed\n", __func__);
  37.106 -        domain_crash_synchronous();
  37.107 +        gdprintk(XENLOG_ERR, "get guest instruction failed\n");
  37.108 +        domain_crash(current->domain);
  37.109 +        return;
  37.110      }
  37.111  
  37.112      for (i = 0; i < MAX_INST_LEN; i++)
  37.113 @@ -1266,9 +1273,7 @@ static inline int svm_get_io_address(
  37.114          isize --;
  37.115  
  37.116      if (isize > 1) 
  37.117 -    {
  37.118          svm_get_prefix_info(vmcb, dir, &seg, &asize);
  37.119 -    }
  37.120  
  37.121      ASSERT(dir == IOREQ_READ || dir == IOREQ_WRITE);
  37.122  
  37.123 @@ -1470,8 +1475,10 @@ static int svm_set_cr0(unsigned long val
  37.124          mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
  37.125          if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain))
  37.126          {
  37.127 -            printk("Invalid CR3 value = %lx\n", v->arch.hvm_svm.cpu_cr3);
  37.128 -            domain_crash_synchronous(); /* need to take a clean path */
  37.129 +            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
  37.130 +                     v->arch.hvm_svm.cpu_cr3, mfn);
  37.131 +            domain_crash(v->domain);
  37.132 +            return 0;
  37.133          }
  37.134  
  37.135  #if defined(__x86_64__)
  37.136 @@ -1556,7 +1563,7 @@ static void mov_from_cr(int cr, int gp, 
  37.137      vmcb = v->arch.hvm_svm.vmcb;
  37.138      ASSERT(vmcb);
  37.139  
  37.140 -    switch (cr)
  37.141 +    switch ( cr )
  37.142      {
  37.143      case 0:
  37.144          value = v->arch.hvm_svm.cpu_shadow_cr0;
  37.145 @@ -1582,7 +1589,8 @@ static void mov_from_cr(int cr, int gp, 
  37.146          break;
  37.147          
  37.148      default:
  37.149 -        __hvm_bug(regs);
  37.150 +        domain_crash(v->domain);
  37.151 +        return;
  37.152      }
  37.153  
  37.154      set_reg(gp, value, regs, vmcb);
  37.155 @@ -1602,14 +1610,11 @@ static inline int svm_pgbit_test(struct 
  37.156   */
  37.157  static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
  37.158  {
  37.159 -    unsigned long value;
  37.160 -    unsigned long old_cr;
  37.161 +    unsigned long value, old_cr, old_base_mfn, mfn;
  37.162      struct vcpu *v = current;
  37.163      struct vlapic *vlapic = vcpu_vlapic(v);
  37.164      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  37.165  
  37.166 -    ASSERT(vmcb);
  37.167 -
  37.168      value = get_reg(gpreg, regs, vmcb);
  37.169  
  37.170      HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
  37.171 @@ -1623,8 +1628,6 @@ static int mov_to_cr(int gpreg, int cr, 
  37.172          return svm_set_cr0(value);
  37.173  
  37.174      case 3: 
  37.175 -    {
  37.176 -        unsigned long old_base_mfn, mfn;
  37.177          if (svm_dbg_on)
  37.178              printk("CR3 write =%lx \n", value );
  37.179          /* If paging is not enabled yet, simply copy the value to CR3. */
  37.180 @@ -1644,7 +1647,7 @@ static int mov_to_cr(int gpreg, int cr, 
  37.181               */
  37.182              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  37.183              if (mfn != pagetable_get_pfn(v->arch.guest_table))
  37.184 -                __hvm_bug(regs);
  37.185 +                goto bad_cr3;
  37.186              shadow_update_cr3(v);
  37.187          }
  37.188          else 
  37.189 @@ -1656,10 +1659,7 @@ static int mov_to_cr(int gpreg, int cr, 
  37.190              HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
  37.191              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  37.192              if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain))
  37.193 -            {
  37.194 -                printk("Invalid CR3 value=%lx\n", value);
  37.195 -                domain_crash_synchronous(); /* need to take a clean path */
  37.196 -            }
  37.197 +                goto bad_cr3;
  37.198  
  37.199              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  37.200              v->arch.guest_table = pagetable_from_pfn(mfn);
  37.201 @@ -1673,10 +1673,8 @@ static int mov_to_cr(int gpreg, int cr, 
  37.202              HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
  37.203          }
  37.204          break;
  37.205 -    }
  37.206  
  37.207      case 4: /* CR4 */
  37.208 -    {
  37.209          if (svm_dbg_on)
  37.210              printk( "write cr4=%lx, cr0=%lx\n", 
  37.211                      value,  v->arch.hvm_svm.cpu_shadow_cr0 );
  37.212 @@ -1692,10 +1690,7 @@ static int mov_to_cr(int gpreg, int cr, 
  37.213                  mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
  37.214                  if ( !VALID_MFN(mfn) || 
  37.215                       !get_page(mfn_to_page(mfn), v->domain) )
  37.216 -                {
  37.217 -                    printk("Invalid CR3 value = %lx", v->arch.hvm_svm.cpu_cr3);
  37.218 -                    domain_crash_synchronous(); /* need to take a clean path */
  37.219 -                }
  37.220 +                    goto bad_cr3;
  37.221  
  37.222                  /*
  37.223                   * Now arch.guest_table points to machine physical.
  37.224 @@ -1741,20 +1736,23 @@ static int mov_to_cr(int gpreg, int cr, 
  37.225              shadow_update_paging_modes(v);
  37.226          }
  37.227          break;
  37.228 -    }
  37.229  
  37.230      case 8:
  37.231 -    {
  37.232          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
  37.233          break;
  37.234 -    }
  37.235  
  37.236      default:
  37.237 -        printk("invalid cr: %d\n", cr);
  37.238 -        __hvm_bug(regs);
  37.239 +        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
  37.240 +        domain_crash(v->domain);
  37.241 +        return 0;
  37.242      }
  37.243  
  37.244      return 1;
  37.245 +
  37.246 + bad_cr3:
  37.247 +    gdprintk(XENLOG_ERR, "Invalid CR3\n");
  37.248 +    domain_crash(v->domain);
  37.249 +    return 0;
  37.250  }
  37.251  
  37.252  
  37.253 @@ -1857,8 +1855,7 @@ static int svm_cr_access(struct vcpu *v,
  37.254          break;
  37.255  
  37.256      default:
  37.257 -        __hvm_bug(regs);
  37.258 -        break;
  37.259 +        BUG();
  37.260      }
  37.261  
  37.262      ASSERT(inst_len);
  37.263 @@ -2037,16 +2034,15 @@ void svm_handle_invlpg(const short invlp
  37.264      int inst_len;
  37.265      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  37.266  
  37.267 -    ASSERT(vmcb);
  37.268      /* 
  37.269       * Unknown how many bytes the invlpg instruction will take.  Use the
  37.270       * maximum instruction length here
  37.271       */
  37.272      if (inst_copy_from_guest(opcode, svm_rip2pointer(vmcb), length) < length)
  37.273      {
  37.274 -        printk("svm_handle_invlpg (): Error reading memory %d bytes\n", 
  37.275 -               length);
  37.276 -        __hvm_bug(regs);
  37.277 +        gdprintk(XENLOG_ERR, "Error reading memory %d bytes\n", length);
  37.278 +        domain_crash(v->domain);
  37.279 +        return;
  37.280      }
  37.281  
  37.282      if (invlpga)
  37.283 @@ -2510,7 +2506,7 @@ asmlinkage void svm_vmexit_handler(struc
  37.284      if (exit_reason == VMEXIT_INVALID)
  37.285      {
  37.286          svm_dump_vmcb(__func__, vmcb);
  37.287 -        domain_crash_synchronous();
  37.288 +        goto exit_and_crash;
  37.289      }
  37.290  
  37.291  #ifdef SVM_EXTRA_DEBUG
  37.292 @@ -2734,8 +2730,7 @@ asmlinkage void svm_vmexit_handler(struc
  37.293          break;
  37.294  
  37.295      case VMEXIT_TASK_SWITCH:
  37.296 -        __hvm_bug(regs);
  37.297 -        break;
  37.298 +        goto exit_and_crash;
  37.299  
  37.300      case VMEXIT_CPUID:
  37.301          svm_vmexit_do_cpuid(vmcb, regs->eax, regs);
  37.302 @@ -2811,15 +2806,16 @@ asmlinkage void svm_vmexit_handler(struc
  37.303          break;
  37.304  
  37.305      case VMEXIT_SHUTDOWN:
  37.306 -        printk("Guest shutdown exit\n");
  37.307 -        domain_crash_synchronous();
  37.308 -        break;
  37.309 +        gdprintk(XENLOG_ERR, "Guest shutdown exit\n");
  37.310 +        goto exit_and_crash;
  37.311  
  37.312      default:
  37.313 -        printk("unexpected VMEXIT: exit reason = 0x%x, exitinfo1 = %"PRIx64", "
  37.314 -               "exitinfo2 = %"PRIx64"\n", exit_reason, 
  37.315 -               (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);
  37.316 -        __hvm_bug(regs);       /* should not happen */
  37.317 +    exit_and_crash:
  37.318 +        gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "
  37.319 +                 "exitinfo1 = %"PRIx64", exitinfo2 = %"PRIx64"\n",
  37.320 +                 exit_reason, 
  37.321 +                 (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);
  37.322 +        domain_crash(v->domain);
  37.323          break;
  37.324      }
  37.325  
  37.326 @@ -2840,8 +2836,6 @@ asmlinkage void svm_vmexit_handler(struc
  37.327          printk("svm_vmexit_handler: Returning\n");
  37.328      }
  37.329  #endif
  37.330 -
  37.331 -    return;
  37.332  }
  37.333  
  37.334  asmlinkage void svm_load_cr2(void)
    38.1 --- a/xen/arch/x86/hvm/vioapic.c	Fri Nov 10 13:01:23 2006 -0700
    38.2 +++ b/xen/arch/x86/hvm/vioapic.c	Mon Nov 13 09:58:23 2006 -0700
    38.3 @@ -35,6 +35,7 @@
    38.4  #include <public/hvm/ioreq.h>
    38.5  #include <asm/hvm/io.h>
    38.6  #include <asm/hvm/vpic.h>
    38.7 +#include <asm/hvm/vlapic.h>
    38.8  #include <asm/hvm/support.h>
    38.9  #include <asm/current.h>
   38.10  #include <asm/event.h>
   38.11 @@ -285,42 +286,6 @@ static int ioapic_inj_irq(struct vioapic
   38.12      return result;
   38.13  }
   38.14  
   38.15 -#ifndef __ia64__
   38.16 -static int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t dest)
   38.17 -{
   38.18 -    int result = 0;
   38.19 -    uint32_t logical_dest;
   38.20 -
   38.21 -    HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vlapic_match_logical_addr "
   38.22 -                "vcpu=%d vlapic_id=%x dest=%x\n",
   38.23 -                vlapic_vcpu(vlapic)->vcpu_id, VLAPIC_ID(vlapic), dest);
   38.24 -
   38.25 -    logical_dest = vlapic_get_reg(vlapic, APIC_LDR);
   38.26 -
   38.27 -    switch ( vlapic_get_reg(vlapic, APIC_DFR) )
   38.28 -    {
   38.29 -    case APIC_DFR_FLAT:
   38.30 -        result = ((dest & GET_APIC_LOGICAL_ID(logical_dest)) != 0);
   38.31 -        break;
   38.32 -    case APIC_DFR_CLUSTER:
   38.33 -        /* Should we support flat cluster mode ?*/
   38.34 -        if ( (GET_APIC_LOGICAL_ID(logical_dest) >> 4
   38.35 -              == ((dest >> 0x4) & 0xf)) &&
   38.36 -             (logical_dest & (dest  & 0xf)) )
   38.37 -            result = 1;
   38.38 -        break;
   38.39 -    default:
   38.40 -        gdprintk(XENLOG_WARNING, "error DFR value for lapic of vcpu %d\n",
   38.41 -                 vlapic_vcpu(vlapic)->vcpu_id);
   38.42 -        break;
   38.43 -    }
   38.44 -
   38.45 -    return result;
   38.46 -}
   38.47 -#else
   38.48 -extern int vlapic_match_logical_addr(struct vlapic *vlapic, uint16_t dest);
   38.49 -#endif
   38.50 -
   38.51  static uint32_t ioapic_get_delivery_bitmask(struct vioapic *vioapic,
   38.52                                              uint16_t dest,
   38.53                                              uint8_t dest_mode,
    39.1 --- a/xen/arch/x86/hvm/vlapic.c	Fri Nov 10 13:01:23 2006 -0700
    39.2 +++ b/xen/arch/x86/hvm/vlapic.c	Mon Nov 13 09:58:23 2006 -0700
    39.3 @@ -196,63 +196,56 @@ uint32_t vlapic_get_ppr(struct vlapic *v
    39.4      return ppr;
    39.5  }
    39.6  
    39.7 -/* This only for fixed delivery mode */
    39.8 +int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
    39.9 +{
   39.10 +    int result = 0;
   39.11 +    uint8_t logical_id;
   39.12 +
   39.13 +    logical_id = GET_APIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
   39.14 +
   39.15 +    switch ( vlapic_get_reg(vlapic, APIC_DFR) )
   39.16 +    {
   39.17 +    case APIC_DFR_FLAT:
   39.18 +        if ( logical_id & mda )
   39.19 +            result = 1;
   39.20 +        break;
   39.21 +    case APIC_DFR_CLUSTER:
   39.22 +        if ( ((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf) )
   39.23 +            result = 1;
   39.24 +        break;
   39.25 +    default:
   39.26 +        gdprintk(XENLOG_WARNING, "Bad DFR value for lapic of vcpu %d\n",
   39.27 +                 vlapic_vcpu(vlapic)->vcpu_id);
   39.28 +        break;
   39.29 +    }
   39.30 +
   39.31 +    return result;
   39.32 +}
   39.33 +
   39.34  static int vlapic_match_dest(struct vcpu *v, struct vlapic *source,
   39.35 -                             int short_hand, int dest, int dest_mode,
   39.36 -                             int delivery_mode)
   39.37 +                             int short_hand, int dest, int dest_mode)
   39.38  {
   39.39      int result = 0;
   39.40      struct vlapic *target = vcpu_vlapic(v);
   39.41  
   39.42      HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
   39.43 -                "dest_mode 0x%x, short_hand 0x%x, delivery_mode 0x%x.",
   39.44 -                target, source, dest, dest_mode, short_hand, delivery_mode);
   39.45 -
   39.46 -    if ( unlikely(target == NULL) &&
   39.47 -         ((delivery_mode != APIC_DM_INIT) &&
   39.48 -          (delivery_mode != APIC_DM_STARTUP) &&
   39.49 -          (delivery_mode != APIC_DM_NMI)) )
   39.50 -    {
   39.51 -        HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "uninitialized target vcpu %p, "
   39.52 -                    "delivery_mode 0x%x, dest 0x%x.\n",
   39.53 -                    v, delivery_mode, dest);
   39.54 -        return result;
   39.55 -    }
   39.56 +                "dest_mode 0x%x, short_hand 0x%x\n",
   39.57 +                target, source, dest, dest_mode, short_hand);
   39.58  
   39.59      switch ( short_hand )
   39.60      {
   39.61 -    case APIC_DEST_NOSHORT:             /* no shorthand */
   39.62 -        if ( !dest_mode )   /* Physical */
   39.63 -        {
   39.64 -            result = ( ((target != NULL) ?
   39.65 -                         GET_APIC_ID(vlapic_get_reg(target, APIC_ID)):
   39.66 -                         v->vcpu_id)) == dest;
   39.67 -        }
   39.68 -        else                /* Logical */
   39.69 +    case APIC_DEST_NOSHORT:
   39.70 +        if ( dest_mode == 0 )
   39.71          {
   39.72 -            uint32_t ldr;
   39.73 -            if ( target == NULL )
   39.74 -                break;
   39.75 -            ldr = vlapic_get_reg(target, APIC_LDR);
   39.76 -            
   39.77 -            /* Flat mode */
   39.78 -            if ( vlapic_get_reg(target, APIC_DFR) == APIC_DFR_FLAT )
   39.79 -            {
   39.80 -                result = GET_APIC_LOGICAL_ID(ldr) & dest;
   39.81 -            }
   39.82 -            else
   39.83 -            {
   39.84 -                if ( (delivery_mode == APIC_DM_LOWEST) &&
   39.85 -                     (dest == 0xff) )
   39.86 -                {
   39.87 -                    /* What shall we do now? */
   39.88 -                    gdprintk(XENLOG_ERR, "Broadcast IPI with lowest priority "
   39.89 -                             "delivery mode\n");
   39.90 -                    domain_crash_synchronous();
   39.91 -                }
   39.92 -                result = ((GET_APIC_LOGICAL_ID(ldr) == (dest & 0xf)) ?
   39.93 -                          (GET_APIC_LOGICAL_ID(ldr) >> 4) & (dest >> 4) : 0);
   39.94 -            }
   39.95 +            /* Physical mode. */
   39.96 +            if ( (dest == 0xFF) || /* broadcast? */
   39.97 +                 (GET_APIC_ID(vlapic_get_reg(target, APIC_ID)) == dest) )
   39.98 +                result = 1;
   39.99 +        }
  39.100 +        else
  39.101 +        {
  39.102 +            /* Logical mode. */
  39.103 +            result = vlapic_match_logical_addr(target, dest);
  39.104          }
  39.105          break;
  39.106  
  39.107 @@ -271,16 +264,14 @@ static int vlapic_match_dest(struct vcpu
  39.108          break;
  39.109  
  39.110      default:
  39.111 +        gdprintk(XENLOG_WARNING, "Bad dest shorthand value %x\n", short_hand);
  39.112          break;
  39.113      }
  39.114  
  39.115      return result;
  39.116  }
  39.117  
  39.118 -/*
  39.119 - * Add a pending IRQ into lapic.
  39.120 - * Return 1 if successfully added and 0 if discarded.
  39.121 - */
  39.122 +/* Add a pending IRQ into lapic. */
  39.123  static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
  39.124                               int vector, int level, int trig_mode)
  39.125  {
  39.126 @@ -331,7 +322,7 @@ static int vlapic_accept_irq(struct vcpu
  39.127          if ( test_and_clear_bit(_VCPUF_initialised, &v->vcpu_flags) )
  39.128          {
  39.129              gdprintk(XENLOG_ERR, "Reset hvm vcpu not supported yet\n");
  39.130 -            domain_crash_synchronous();
  39.131 +            goto exit_and_crash;
  39.132          }
  39.133          v->arch.hvm_vcpu.init_sipi_sipi_state =
  39.134              HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
  39.135 @@ -349,7 +340,7 @@ static int vlapic_accept_irq(struct vcpu
  39.136          if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
  39.137          {
  39.138              gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id);
  39.139 -            domain_crash_synchronous();
  39.140 +            goto exit_and_crash;
  39.141          }
  39.142  
  39.143          if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 )
  39.144 @@ -359,11 +350,14 @@ static int vlapic_accept_irq(struct vcpu
  39.145      default:
  39.146          gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode %x\n",
  39.147                   delivery_mode);
  39.148 -        domain_crash_synchronous();
  39.149 -        break;
  39.150 +        goto exit_and_crash;
  39.151      }
  39.152  
  39.153      return result;
  39.154 +
  39.155 + exit_and_crash:
  39.156 +    domain_crash(v->domain);
  39.157 +    return 0;
  39.158  }
  39.159  
  39.160  /* This function is used by both ioapic and lapic.The bitmap is for vcpu_id. */
  39.161 @@ -440,10 +434,9 @@ static void vlapic_ipi(struct vlapic *vl
  39.162  
  39.163      for_each_vcpu ( vlapic_domain(vlapic), v )
  39.164      {
  39.165 -        if ( vlapic_match_dest(v, vlapic, short_hand,
  39.166 -                               dest, dest_mode, delivery_mode) )
  39.167 +        if ( vlapic_match_dest(v, vlapic, short_hand, dest, dest_mode) )
  39.168          {
  39.169 -            if ( delivery_mode == APIC_DM_LOWEST)
  39.170 +            if ( delivery_mode == APIC_DM_LOWEST )
  39.171                  set_bit(v->vcpu_id, &lpr_map);
  39.172              else
  39.173                  vlapic_accept_irq(v, delivery_mode,
  39.174 @@ -578,14 +571,17 @@ static unsigned long vlapic_read(struct 
  39.175      default:
  39.176          gdprintk(XENLOG_ERR, "Local APIC read with len=0x%lx, "
  39.177                   "should be 4 instead.\n", len);
  39.178 -        domain_crash_synchronous();
  39.179 -        break;
  39.180 +        goto exit_and_crash;
  39.181      }
  39.182  
  39.183      HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
  39.184                  "and the result is 0x%lx.", offset, len, result);
  39.185  
  39.186      return result;
  39.187 +
  39.188 + exit_and_crash:
  39.189 +    domain_crash(v->domain);
  39.190 +    return 0;
  39.191  }
  39.192  
  39.193  static void vlapic_write(struct vcpu *v, unsigned long address,
  39.194 @@ -625,7 +621,7 @@ static void vlapic_write(struct vcpu *v,
  39.195              {
  39.196                  gdprintk(XENLOG_ERR, "Uneven alignment error for "
  39.197                           "2-byte vlapic access\n");
  39.198 -                domain_crash_synchronous();
  39.199 +                goto exit_and_crash;
  39.200              }
  39.201  
  39.202              val = (tmp & ~(0xffff << (8*alignment))) |
  39.203 @@ -635,8 +631,9 @@ static void vlapic_write(struct vcpu *v,
  39.204          default:
  39.205              gdprintk(XENLOG_ERR, "Local APIC write with len = %lx, "
  39.206                       "should be 4 instead\n", len);
  39.207 -            domain_crash_synchronous();
  39.208 -            break;
  39.209 +        exit_and_crash:
  39.210 +            domain_crash(v->domain);
  39.211 +            return;
  39.212          }
  39.213      }
  39.214  
    40.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Fri Nov 10 13:01:23 2006 -0700
    40.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Mon Nov 13 09:58:23 2006 -0700
    40.3 @@ -466,14 +466,14 @@ void vm_launch_fail(unsigned long eflags
    40.4  {
    40.5      unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
    40.6      printk("<vm_launch_fail> error code %lx\n", error);
    40.7 -    __hvm_bug(guest_cpu_user_regs());
    40.8 +    domain_crash_synchronous();
    40.9  }
   40.10  
   40.11  void vm_resume_fail(unsigned long eflags)
   40.12  {
   40.13      unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
   40.14      printk("<vm_resume_fail> error code %lx\n", error);
   40.15 -    __hvm_bug(guest_cpu_user_regs());
   40.16 +    domain_crash_synchronous();
   40.17  }
   40.18  
   40.19  void arch_vmx_do_resume(struct vcpu *v)
    41.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Nov 10 13:01:23 2006 -0700
    41.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Nov 13 09:58:23 2006 -0700
    41.3 @@ -151,15 +151,14 @@ static inline int long_mode_do_msr_read(
    41.4  
    41.5      case MSR_FS_BASE:
    41.6          if ( !(vmx_long_mode_enabled(v)) )
    41.7 -            /* XXX should it be GP fault */
    41.8 -            domain_crash_synchronous();
    41.9 +            goto exit_and_crash;
   41.10  
   41.11          msr_content = __vmread(GUEST_FS_BASE);
   41.12          break;
   41.13  
   41.14      case MSR_GS_BASE:
   41.15          if ( !(vmx_long_mode_enabled(v)) )
   41.16 -            domain_crash_synchronous();
   41.17 +            goto exit_and_crash;
   41.18  
   41.19          msr_content = __vmread(GUEST_GS_BASE);
   41.20          break;
   41.21 @@ -183,6 +182,11 @@ static inline int long_mode_do_msr_read(
   41.22      regs->edx = (u32)(msr_content >> 32);
   41.23  
   41.24      return 1;
   41.25 +
   41.26 + exit_and_crash:
   41.27 +    gdprintk(XENLOG_ERR, "Fatal error reading MSR %lx\n", (long)regs->ecx);
   41.28 +    domain_crash(v->domain);
   41.29 +    return 1; /* handled */
   41.30  }
   41.31  
   41.32  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
   41.33 @@ -233,7 +237,7 @@ static inline int long_mode_do_msr_write
   41.34      case MSR_FS_BASE:
   41.35      case MSR_GS_BASE:
   41.36          if ( !(vmx_long_mode_enabled(v)) )
   41.37 -            domain_crash_synchronous();
   41.38 +            goto exit_and_crash;
   41.39  
   41.40          if ( !IS_CANO_ADDRESS(msr_content) )
   41.41          {
   41.42 @@ -251,7 +255,7 @@ static inline int long_mode_do_msr_write
   41.43  
   41.44      case MSR_SHADOW_GS_BASE:
   41.45          if ( !(vmx_long_mode_enabled(v)) )
   41.46 -            domain_crash_synchronous();
   41.47 +            goto exit_and_crash;
   41.48  
   41.49          v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
   41.50          wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
   41.51 @@ -267,6 +271,11 @@ static inline int long_mode_do_msr_write
   41.52      }
   41.53  
   41.54      return 1;
   41.55 +
   41.56 + exit_and_crash:
   41.57 +    gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
   41.58 +    domain_crash(v->domain);
   41.59 +    return 1; /* handled */
   41.60  }
   41.61  
   41.62  static void vmx_restore_msrs(struct vcpu *v)
   41.63 @@ -726,8 +735,7 @@ static int __get_instruction_length(void
   41.64  {
   41.65      int len;
   41.66      len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
   41.67 -    if ( (len < 1) || (len > 15) )
   41.68 -        __hvm_bug(guest_cpu_user_regs());
   41.69 +    BUG_ON((len < 1) || (len > 15));
   41.70      return len;
   41.71  }
   41.72  
   41.73 @@ -823,7 +831,10 @@ static void vmx_do_cpuid(struct cpu_user
   41.74          /* 8-byte aligned valid pseudophys address from vmxassist, please. */
   41.75          if ( (value & 7) || (mfn == INVALID_MFN) ||
   41.76               !v->arch.hvm_vmx.vmxassist_enabled )
   41.77 -            domain_crash_synchronous();
   41.78 +        {
   41.79 +            domain_crash(v->domain);
   41.80 +            return;
   41.81 +        }
   41.82  
   41.83          p = map_domain_page(mfn);
   41.84          value = *((uint64_t *)(p + (value & (PAGE_SIZE - 1))));
   41.85 @@ -966,8 +977,9 @@ static int check_for_null_selector(unsig
   41.86      memset(inst, 0, MAX_INST_LEN);
   41.87      if ( inst_copy_from_guest(inst, eip, inst_len) != inst_len )
   41.88      {
   41.89 -        printk("check_for_null_selector: get guest instruction failed\n");
   41.90 -        domain_crash_synchronous();
   41.91 +        gdprintk(XENLOG_ERR, "Get guest instruction failed\n");
   41.92 +        domain_crash(current->domain);
   41.93 +        return 0;
   41.94      }
   41.95  
   41.96      for ( i = 0; i < inst_len; i++ )
   41.97 @@ -1169,7 +1181,7 @@ static void vmx_world_save(struct vcpu *
   41.98      c->ldtr_arbytes.bytes = __vmread(GUEST_LDTR_AR_BYTES);
   41.99  }
  41.100  
  41.101 -static void vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
  41.102 +static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
  41.103  {
  41.104      unsigned long mfn, old_base_mfn;
  41.105  
  41.106 @@ -1192,10 +1204,7 @@ static void vmx_world_restore(struct vcp
  41.107           */
  41.108          mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
  41.109          if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
  41.110 -        {
  41.111 -            printk("Invalid CR3 value=%x", c->cr3);
  41.112 -            domain_crash_synchronous();
  41.113 -        }
  41.114 +            goto bad_cr3;
  41.115      }
  41.116      else
  41.117      {
  41.118 @@ -1205,13 +1214,8 @@ static void vmx_world_restore(struct vcp
  41.119           */
  41.120          HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
  41.121          mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
  41.122 -        if ( !VALID_MFN(mfn) )
  41.123 -        {
  41.124 -            printk("Invalid CR3 value=%x", c->cr3);
  41.125 -            domain_crash_synchronous();
  41.126 -        }
  41.127 -        if ( !get_page(mfn_to_page(mfn), v->domain) )
  41.128 -            domain_crash_synchronous();
  41.129 +        if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  41.130 +            goto bad_cr3;
  41.131          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  41.132          v->arch.guest_table = pagetable_from_pfn(mfn);
  41.133          if (old_base_mfn)
  41.134 @@ -1280,6 +1284,11 @@ static void vmx_world_restore(struct vcp
  41.135  
  41.136      shadow_update_paging_modes(v);
  41.137      __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
  41.138 +    return 0;
  41.139 +
  41.140 + bad_cr3:
  41.141 +    gdprintk(XENLOG_ERR, "Invalid CR3 value=%x", c->cr3);
  41.142 +    return -EINVAL;
  41.143  }
  41.144  
  41.145  enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
  41.146 @@ -1320,7 +1329,8 @@ static int vmx_assist(struct vcpu *v, in
  41.147          if (cp != 0) {
  41.148              if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
  41.149                  goto error;
  41.150 -            vmx_world_restore(v, &c);
  41.151 +            if ( vmx_world_restore(v, &c) != 0 )
  41.152 +                goto error;
  41.153              v->arch.hvm_vmx.vmxassist_enabled = 1;            
  41.154              return 1;
  41.155          }
  41.156 @@ -1337,7 +1347,8 @@ static int vmx_assist(struct vcpu *v, in
  41.157          if (cp != 0) {
  41.158              if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
  41.159                  goto error;
  41.160 -            vmx_world_restore(v, &c);
  41.161 +            if ( vmx_world_restore(v, &c) != 0 )
  41.162 +                goto error;
  41.163              v->arch.hvm_vmx.vmxassist_enabled = 0;
  41.164              return 1;
  41.165          }
  41.166 @@ -1345,8 +1356,8 @@ static int vmx_assist(struct vcpu *v, in
  41.167      }
  41.168  
  41.169   error:
  41.170 -    printk("Failed to transfer to vmxassist\n");
  41.171 -    domain_crash_synchronous();
  41.172 +    gdprintk(XENLOG_ERR, "Failed to transfer to vmxassist\n");
  41.173 +    domain_crash(v->domain);
  41.174      return 0;
  41.175  }
  41.176  
  41.177 @@ -1390,9 +1401,10 @@ static int vmx_set_cr0(unsigned long val
  41.178          mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
  41.179          if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  41.180          {
  41.181 -            printk("Invalid CR3 value = %lx (mfn=%lx)\n", 
  41.182 -                   v->arch.hvm_vmx.cpu_cr3, mfn);
  41.183 -            domain_crash_synchronous(); /* need to take a clean path */
  41.184 +            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
  41.185 +                     v->arch.hvm_vmx.cpu_cr3, mfn);
  41.186 +            domain_crash(v->domain);
  41.187 +            return 0;
  41.188          }
  41.189  
  41.190  #if defined(__x86_64__)
  41.191 @@ -1536,12 +1548,12 @@ static int vmx_set_cr0(unsigned long val
  41.192   */
  41.193  static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
  41.194  {
  41.195 -    unsigned long value;
  41.196 -    unsigned long old_cr;
  41.197 +    unsigned long value, old_cr, old_base_mfn, mfn;
  41.198      struct vcpu *v = current;
  41.199      struct vlapic *vlapic = vcpu_vlapic(v);
  41.200  
  41.201 -    switch ( gp ) {
  41.202 +    switch ( gp )
  41.203 +    {
  41.204      CASE_GET_REG(EAX, eax);
  41.205      CASE_GET_REG(ECX, ecx);
  41.206      CASE_GET_REG(EDX, edx);
  41.207 @@ -1554,8 +1566,8 @@ static int mov_to_cr(int gp, int cr, str
  41.208          value = __vmread(GUEST_RSP);
  41.209          break;
  41.210      default:
  41.211 -        printk("invalid gp: %d\n", gp);
  41.212 -        __hvm_bug(regs);
  41.213 +        gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
  41.214 +        goto exit_and_crash;
  41.215      }
  41.216  
  41.217      TRACE_VMEXIT(1, TYPE_MOV_TO_CR);
  41.218 @@ -1564,13 +1576,12 @@ static int mov_to_cr(int gp, int cr, str
  41.219  
  41.220      HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
  41.221  
  41.222 -    switch ( cr ) {
  41.223 +    switch ( cr )
  41.224 +    {
  41.225      case 0:
  41.226          return vmx_set_cr0(value);
  41.227 +
  41.228      case 3:
  41.229 -    {
  41.230 -        unsigned long old_base_mfn, mfn;
  41.231 -
  41.232          /*
  41.233           * If paging is not enabled yet, simply copy the value to CR3.
  41.234           */
  41.235 @@ -1590,7 +1601,7 @@ static int mov_to_cr(int gp, int cr, str
  41.236               */
  41.237              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  41.238              if (mfn != pagetable_get_pfn(v->arch.guest_table))
  41.239 -                __hvm_bug(regs);
  41.240 +                goto bad_cr3;
  41.241              shadow_update_cr3(v);
  41.242          } else {
  41.243              /*
  41.244 @@ -1600,10 +1611,7 @@ static int mov_to_cr(int gp, int cr, str
  41.245              HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
  41.246              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  41.247              if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  41.248 -            {
  41.249 -                printk("Invalid CR3 value=%lx\n", value);
  41.250 -                domain_crash_synchronous(); /* need to take a clean path */
  41.251 -            }
  41.252 +                goto bad_cr3;
  41.253              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  41.254              v->arch.guest_table = pagetable_from_pfn(mfn);
  41.255              if (old_base_mfn)
  41.256 @@ -1618,9 +1626,8 @@ static int mov_to_cr(int gp, int cr, str
  41.257              __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
  41.258          }
  41.259          break;
  41.260 -    }
  41.261 +
  41.262      case 4: /* CR4 */
  41.263 -    {
  41.264          old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
  41.265  
  41.266          if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
  41.267 @@ -1633,10 +1640,7 @@ static int mov_to_cr(int gp, int cr, str
  41.268                  mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
  41.269                  if ( !VALID_MFN(mfn) ||
  41.270                       !get_page(mfn_to_page(mfn), v->domain) )
  41.271 -                {
  41.272 -                    printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
  41.273 -                    domain_crash_synchronous(); /* need to take a clean path */
  41.274 -                }
  41.275 +                    goto bad_cr3;
  41.276  
  41.277                  /*
  41.278                   * Now arch.guest_table points to machine physical.
  41.279 @@ -1682,18 +1686,24 @@ static int mov_to_cr(int gp, int cr, str
  41.280          if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
  41.281              shadow_update_paging_modes(v);
  41.282          break;
  41.283 -    }
  41.284 +
  41.285      case 8:
  41.286 -    {
  41.287          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
  41.288          break;
  41.289 -    }
  41.290 +
  41.291      default:
  41.292 -        printk("invalid cr: %d\n", gp);
  41.293 -        __hvm_bug(regs);
  41.294 +        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
  41.295 +        domain_crash(v->domain);
  41.296 +        return 0;
  41.297      }
  41.298  
  41.299      return 1;
  41.300 +
  41.301 + bad_cr3:
  41.302 +    gdprintk(XENLOG_ERR, "Invalid CR3\n");
  41.303 + exit_and_crash:
  41.304 +    domain_crash(v->domain);
  41.305 +    return 0;
  41.306  }
  41.307  
  41.308  /*
  41.309 @@ -1715,7 +1725,9 @@ static void mov_from_cr(int cr, int gp, 
  41.310          value = (value & 0xF0) >> 4;
  41.311          break;
  41.312      default:
  41.313 -        __hvm_bug(regs);
  41.314 +        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
  41.315 +        domain_crash(v->domain);
  41.316 +        break;
  41.317      }
  41.318  
  41.319      switch ( gp ) {
  41.320 @@ -1733,7 +1745,8 @@ static void mov_from_cr(int cr, int gp, 
  41.321          break;
  41.322      default:
  41.323          printk("invalid gp: %d\n", gp);
  41.324 -        __hvm_bug(regs);
  41.325 +        domain_crash(v->domain);
  41.326 +        break;
  41.327      }
  41.328  
  41.329      TRACE_VMEXIT(1, TYPE_MOV_FROM_CR);
  41.330 @@ -1782,9 +1795,9 @@ static int vmx_cr_access(unsigned long e
  41.331          return vmx_set_cr0(value);
  41.332          break;
  41.333      default:
  41.334 -        __hvm_bug(regs);
  41.335 -        break;
  41.336 +        BUG();
  41.337      }
  41.338 +
  41.339      return 1;
  41.340  }
  41.341  
  41.342 @@ -1814,7 +1827,7 @@ static inline void vmx_do_msr_read(struc
  41.343          msr_content = vcpu_vlapic(v)->apic_base_msr;
  41.344          break;
  41.345      default:
  41.346 -        if (long_mode_do_msr_read(regs))
  41.347 +        if ( long_mode_do_msr_read(regs) )
  41.348              return;
  41.349  
  41.350          if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) )
  41.351 @@ -2045,11 +2058,6 @@ asmlinkage void vmx_vmexit_handler(struc
  41.352  
  41.353      perfc_incra(vmexits, exit_reason);
  41.354  
  41.355 -    if ( (exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT) &&
  41.356 -         (exit_reason != EXIT_REASON_VMCALL) &&
  41.357 -         (exit_reason != EXIT_REASON_IO_INSTRUCTION) )
  41.358 -        HVM_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
  41.359 -
  41.360      if ( exit_reason != EXIT_REASON_EXTERNAL_INTERRUPT )
  41.361          local_irq_enable();
  41.362  
  41.363 @@ -2077,7 +2085,7 @@ asmlinkage void vmx_vmexit_handler(struc
  41.364          printk("************* VMCS Area **************\n");
  41.365          vmcs_dump_vcpu();
  41.366          printk("**************************************\n");
  41.367 -        domain_crash_synchronous();
  41.368 +        goto exit_and_crash;
  41.369      }
  41.370  
  41.371      TRACE_VMEXIT(0, exit_reason);
  41.372 @@ -2121,8 +2129,6 @@ asmlinkage void vmx_vmexit_handler(struc
  41.373  #else
  41.374          case TRAP_debug:
  41.375          {
  41.376 -            void store_cpu_user_regs(struct cpu_user_regs *regs);
  41.377 -
  41.378              if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
  41.379              {
  41.380                  store_cpu_user_regs(regs);
  41.381 @@ -2193,8 +2199,7 @@ asmlinkage void vmx_vmexit_handler(struc
  41.382          vmx_do_extint(regs);
  41.383          break;
  41.384      case EXIT_REASON_TRIPLE_FAULT:
  41.385 -        domain_crash_synchronous();
  41.386 -        break;
  41.387 +        goto exit_and_crash;
  41.388      case EXIT_REASON_PENDING_INTERRUPT:
  41.389          /* Disable the interrupt window. */
  41.390          v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
  41.391 @@ -2202,8 +2207,7 @@ asmlinkage void vmx_vmexit_handler(struc
  41.392                    v->arch.hvm_vcpu.u.vmx.exec_control);
  41.393          break;
  41.394      case EXIT_REASON_TASK_SWITCH:
  41.395 -        domain_crash_synchronous();
  41.396 -        break;
  41.397 +        goto exit_and_crash;
  41.398      case EXIT_REASON_CPUID:
  41.399          inst_len = __get_instruction_length(); /* Safe: CPUID */
  41.400          __update_guest_eip(inst_len);
  41.401 @@ -2268,8 +2272,7 @@ asmlinkage void vmx_vmexit_handler(struc
  41.402      case EXIT_REASON_MWAIT_INSTRUCTION:
  41.403      case EXIT_REASON_MONITOR_INSTRUCTION:
  41.404      case EXIT_REASON_PAUSE_INSTRUCTION:
  41.405 -        domain_crash_synchronous();
  41.406 -        break;
  41.407 +        goto exit_and_crash;
  41.408      case EXIT_REASON_VMCLEAR:
  41.409      case EXIT_REASON_VMLAUNCH:
  41.410      case EXIT_REASON_VMPTRLD:
  41.411 @@ -2289,7 +2292,10 @@ asmlinkage void vmx_vmexit_handler(struc
  41.412          break;
  41.413  
  41.414      default:
  41.415 -        domain_crash_synchronous();     /* should not happen */
  41.416 +    exit_and_crash:
  41.417 +        gdprintk(XENLOG_ERR, "Bad vmexit (reason %x)\n", exit_reason);
  41.418 +        domain_crash(v->domain);
  41.419 +        break;
  41.420      }
  41.421  }
  41.422  
    42.1 --- a/xen/arch/x86/mm.c	Fri Nov 10 13:01:23 2006 -0700
    42.2 +++ b/xen/arch/x86/mm.c	Mon Nov 13 09:58:23 2006 -0700
    42.3 @@ -1717,7 +1717,7 @@ int new_guest_cr3(unsigned long mfn)
    42.4      unsigned long old_base_mfn;
    42.5  
    42.6      if ( is_hvm_domain(d) && !hvm_paging_enabled(v) )
    42.7 -        domain_crash_synchronous();
    42.8 +        return 0;
    42.9  
   42.10      if ( shadow_mode_refcounts(d) )
   42.11      {
   42.12 @@ -2134,13 +2134,14 @@ int do_mmuext_op(
   42.13  
   42.14          default:
   42.15              MEM_LOG("Invalid extended pt command 0x%x", op.cmd);
   42.16 +            rc = -ENOSYS;
   42.17              okay = 0;
   42.18              break;
   42.19          }
   42.20  
   42.21          if ( unlikely(!okay) )
   42.22          {
   42.23 -            rc = -EINVAL;
   42.24 +            rc = rc ? rc : -EINVAL;
   42.25              break;
   42.26          }
   42.27  
   42.28 @@ -2151,9 +2152,11 @@ int do_mmuext_op(
   42.29      process_deferred_ops();
   42.30  
   42.31      /* Add incremental work we have done to the @done output parameter. */
   42.32 -    done += i;
   42.33      if ( unlikely(!guest_handle_is_null(pdone)) )
   42.34 +    {
   42.35 +        done += i;
   42.36          copy_to_guest(pdone, &done, 1);
   42.37 +    }
   42.38  
   42.39      UNLOCK_BIGLOCK(d);
   42.40      return rc;
   42.41 @@ -2351,12 +2354,14 @@ int do_mmu_update(
   42.42  
   42.43          default:
   42.44              MEM_LOG("Invalid page update command %x", cmd);
   42.45 +            rc = -ENOSYS;
   42.46 +            okay = 0;
   42.47              break;
   42.48          }
   42.49  
   42.50          if ( unlikely(!okay) )
   42.51          {
   42.52 -            rc = -EINVAL;
   42.53 +            rc = rc ? rc : -EINVAL;
   42.54              break;
   42.55          }
   42.56  
   42.57 @@ -2370,9 +2375,11 @@ int do_mmu_update(
   42.58      process_deferred_ops();
   42.59  
   42.60      /* Add incremental work we have done to the @done output parameter. */
   42.61 -    done += i;
   42.62      if ( unlikely(!guest_handle_is_null(pdone)) )
   42.63 +    {
   42.64 +        done += i;
   42.65          copy_to_guest(pdone, &done, 1);
   42.66 +    }
   42.67  
   42.68      UNLOCK_BIGLOCK(d);
   42.69      return rc;
   42.70 @@ -3106,7 +3113,7 @@ static int ptwr_emulated_update(
   42.71               * zap the PRESENT bit on the assumption the bottom half will be
   42.72               * written immediately after we return to the guest.
   42.73               */
   42.74 -            MEM_LOG("ptwr_emulate: fixing up invalid PAE PTE %"PRIpte"\n",
   42.75 +            MEM_LOG("ptwr_emulate: fixing up invalid PAE PTE %"PRIpte,
   42.76                      l1e_get_intpte(nl1e));
   42.77              l1e_remove_flags(nl1e, _PAGE_PRESENT);
   42.78          }
    43.1 --- a/xen/arch/x86/oprofile/xenoprof.c	Fri Nov 10 13:01:23 2006 -0700
    43.2 +++ b/xen/arch/x86/oprofile/xenoprof.c	Mon Nov 13 09:58:23 2006 -0700
    43.3 @@ -684,7 +684,7 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
    43.4          break;
    43.5  
    43.6      default:
    43.7 -        ret = -EINVAL;
    43.8 +        ret = -ENOSYS;
    43.9      }
   43.10  
   43.11      spin_unlock(&xenoprof_lock);
    44.1 --- a/xen/arch/x86/physdev.c	Fri Nov 10 13:01:23 2006 -0700
    44.2 +++ b/xen/arch/x86/physdev.c	Mon Nov 13 09:58:23 2006 -0700
    44.3 @@ -135,7 +135,7 @@ long do_physdev_op(int cmd, XEN_GUEST_HA
    44.4      }
    44.5  
    44.6      default:
    44.7 -        ret = -EINVAL;
    44.8 +        ret = -ENOSYS;
    44.9          break;
   44.10      }
   44.11  
    45.1 --- a/xen/arch/x86/traps.c	Fri Nov 10 13:01:23 2006 -0700
    45.2 +++ b/xen/arch/x86/traps.c	Mon Nov 13 09:58:23 2006 -0700
    45.3 @@ -1310,8 +1310,10 @@ static int emulate_privileged_op(struct 
    45.4  
    45.5          case 3: /* Write CR3 */
    45.6              LOCK_BIGLOCK(v->domain);
    45.7 -            (void)new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
    45.8 +            rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
    45.9              UNLOCK_BIGLOCK(v->domain);
   45.10 +            if ( rc == 0 ) /* not okay */
   45.11 +                goto fail;
   45.12              break;
   45.13  
   45.14          case 4:
    46.1 --- a/xen/arch/x86/x86_32/traps.c	Fri Nov 10 13:01:23 2006 -0700
    46.2 +++ b/xen/arch/x86/x86_32/traps.c	Mon Nov 13 09:58:23 2006 -0700
    46.3 @@ -179,16 +179,16 @@ unsigned long do_iret(void)
    46.4  
    46.5      /* Check worst-case stack frame for overlap with Xen protected area. */
    46.6      if ( unlikely(!access_ok(regs->esp, 40)) )
    46.7 -        domain_crash_synchronous();
    46.8 +        goto exit_and_crash;
    46.9  
   46.10      /* Pop and restore EAX (clobbered by hypercall). */
   46.11      if ( unlikely(__copy_from_user(&regs->eax, (void __user *)regs->esp, 4)) )
   46.12 -        domain_crash_synchronous();
   46.13 +        goto exit_and_crash;
   46.14      regs->esp += 4;
   46.15  
   46.16      /* Pop and restore CS and EIP. */
   46.17      if ( unlikely(__copy_from_user(&regs->eip, (void __user *)regs->esp, 8)) )
   46.18 -        domain_crash_synchronous();
   46.19 +        goto exit_and_crash;
   46.20      regs->esp += 8;
   46.21  
   46.22      /*
   46.23 @@ -196,7 +196,7 @@ unsigned long do_iret(void)
   46.24       * to avoid firing the BUG_ON(IOPL) check in arch_getdomaininfo_ctxt.
   46.25       */
   46.26      if ( unlikely(__copy_from_user(&eflags, (void __user *)regs->esp, 4)) )
   46.27 -        domain_crash_synchronous();
   46.28 +        goto exit_and_crash;
   46.29      regs->esp += 4;
   46.30      regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
   46.31  
   46.32 @@ -204,17 +204,17 @@ unsigned long do_iret(void)
   46.33      {
   46.34          /* Return to VM86 mode: pop and restore ESP,SS,ES,DS,FS and GS. */
   46.35          if ( __copy_from_user(&regs->esp, (void __user *)regs->esp, 24) )
   46.36 -            domain_crash_synchronous();
   46.37 +            goto exit_and_crash;
   46.38      }
   46.39      else if ( unlikely(ring_0(regs)) )
   46.40      {
   46.41 -        domain_crash_synchronous();
   46.42 +        goto exit_and_crash;
   46.43      }
   46.44      else if ( !ring_1(regs) )
   46.45      {
   46.46          /* Return to ring 2/3: pop and restore ESP and SS. */
   46.47          if ( __copy_from_user(&regs->esp, (void __user *)regs->esp, 8) )
   46.48 -            domain_crash_synchronous();
   46.49 +            goto exit_and_crash;
   46.50      }
   46.51  
   46.52      /* No longer in NMI context. */
   46.53 @@ -228,6 +228,11 @@ unsigned long do_iret(void)
   46.54       * value.
   46.55       */
   46.56      return regs->eax;
   46.57 +
   46.58 + exit_and_crash:
   46.59 +    gdprintk(XENLOG_ERR, "Fatal error\n");
   46.60 +    domain_crash(current->domain);
   46.61 +    return 0;
   46.62  }
   46.63  
   46.64  #include <asm/asm_defns.h>
   46.65 @@ -355,7 +360,7 @@ static long register_guest_callback(stru
   46.66          break;
   46.67  
   46.68      default:
   46.69 -        ret = -EINVAL;
   46.70 +        ret = -ENOSYS;
   46.71          break;
   46.72      }
   46.73  
   46.74 @@ -368,12 +373,20 @@ static long unregister_guest_callback(st
   46.75  
   46.76      switch ( unreg->type )
   46.77      {
   46.78 +    case CALLBACKTYPE_event:
   46.79 +    case CALLBACKTYPE_failsafe:
   46.80 +#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
   46.81 +    case CALLBACKTYPE_sysenter:
   46.82 +#endif
   46.83 +        ret = -EINVAL;
   46.84 +        break;
   46.85 +
   46.86      case CALLBACKTYPE_nmi:
   46.87          ret = unregister_guest_nmi_callback();
   46.88          break;
   46.89  
   46.90      default:
   46.91 -        ret = -EINVAL;
   46.92 +        ret = -ENOSYS;
   46.93          break;
   46.94      }
   46.95  
   46.96 @@ -412,7 +425,7 @@ long do_callback_op(int cmd, XEN_GUEST_H
   46.97      break;
   46.98  
   46.99      default:
  46.100 -        ret = -EINVAL;
  46.101 +        ret = -ENOSYS;
  46.102          break;
  46.103      }
  46.104  
    47.1 --- a/xen/arch/x86/x86_64/entry.S	Fri Nov 10 13:01:23 2006 -0700
    47.2 +++ b/xen/arch/x86/x86_64/entry.S	Mon Nov 13 09:58:23 2006 -0700
    47.3 @@ -45,7 +45,7 @@ restore_all_guest:
    47.4          addq  $8,%rsp
    47.5          popq  %rcx                    # RIP
    47.6          popq  %r11                    # CS
    47.7 -        cmpw  $__GUEST_CS32,%r11
    47.8 +        cmpw  $FLAT_KERNEL_CS32,%r11
    47.9          popq  %r11                    # RFLAGS
   47.10          popq  %rsp                    # RSP
   47.11          je    1f
   47.12 @@ -119,7 +119,7 @@ restore_all_xen:
   47.13          ALIGN
   47.14  ENTRY(syscall_enter)
   47.15          sti
   47.16 -        movl  $__GUEST_SS,24(%rsp)
   47.17 +        movl  $FLAT_KERNEL_SS,24(%rsp)
   47.18          pushq %rcx
   47.19          pushq $0
   47.20          movl  $TRAP_syscall,4(%rsp)
   47.21 @@ -298,9 +298,9 @@ FLT13:  movq  %rax,(%rsi)               
   47.22          movl  $TRAP_syscall,UREGS_entry_vector+8(%rsp)
   47.23          andl  $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\
   47.24                   X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
   47.25 -        movq  $__GUEST_SS,UREGS_ss+8(%rsp)
   47.26 +        movq  $FLAT_KERNEL_SS,UREGS_ss+8(%rsp)
   47.27          movq  %rsi,UREGS_rsp+8(%rsp)
   47.28 -        movq  $__GUEST_CS,UREGS_cs+8(%rsp)
   47.29 +        movq  $FLAT_KERNEL_CS,UREGS_cs+8(%rsp)
   47.30          movq  TRAPBOUNCE_eip(%rdx),%rax
   47.31          testq %rax,%rax
   47.32          jz    domain_crash_synchronous
    48.1 --- a/xen/arch/x86/x86_64/mm.c	Fri Nov 10 13:01:23 2006 -0700
    48.2 +++ b/xen/arch/x86/x86_64/mm.c	Mon Nov 13 09:58:23 2006 -0700
    48.3 @@ -76,17 +76,17 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
    48.4  
    48.5  void __init paging_init(void)
    48.6  {
    48.7 -    unsigned long i, mpt_size;
    48.8 +    unsigned long i, mpt_size, va;
    48.9      l3_pgentry_t *l3_ro_mpt;
   48.10      l2_pgentry_t *l2_ro_mpt = NULL;
   48.11 -    struct page_info *pg;
   48.12 +    struct page_info *l1_pg, *l2_pg;
   48.13  
   48.14      /* Create user-accessible L2 directory to map the MPT for guests. */
   48.15 -    l3_ro_mpt = alloc_xenheap_page();
   48.16 -    clear_page(l3_ro_mpt);
   48.17 +    if ( (l2_pg = alloc_domheap_page(NULL)) == NULL )
   48.18 +        goto nomem;
   48.19 +    l3_ro_mpt = clear_page(page_to_virt(l2_pg));
   48.20      idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
   48.21 -        l4e_from_page(
   48.22 -            virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
   48.23 +        l4e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER);
   48.24  
   48.25      /*
   48.26       * Allocate and map the machine-to-phys table.
   48.27 @@ -96,33 +96,37 @@ void __init paging_init(void)
   48.28      mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL);
   48.29      for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
   48.30      {
   48.31 -        if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
   48.32 -            panic("Not enough memory for m2p table\n");
   48.33 +        if ( (l1_pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
   48.34 +            goto nomem;
   48.35          map_pages_to_xen(
   48.36 -            RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_mfn(pg), 
   48.37 +            RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT),
   48.38 +            page_to_mfn(l1_pg), 
   48.39              1UL << PAGETABLE_ORDER,
   48.40              PAGE_HYPERVISOR);
   48.41          memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0x55,
   48.42                 1UL << L2_PAGETABLE_SHIFT);
   48.43          if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) )
   48.44          {
   48.45 -            unsigned long va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
   48.46 -
   48.47 -            l2_ro_mpt = alloc_xenheap_page();
   48.48 -            clear_page(l2_ro_mpt);
   48.49 +            if ( (l2_pg = alloc_domheap_page(NULL)) == NULL )
   48.50 +                goto nomem;
   48.51 +            va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
   48.52 +            l2_ro_mpt = clear_page(page_to_virt(l2_pg));
   48.53              l3_ro_mpt[l3_table_offset(va)] =
   48.54 -                l3e_from_page(
   48.55 -                    virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
   48.56 +                l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER);
   48.57              l2_ro_mpt += l2_table_offset(va);
   48.58          }
   48.59          /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
   48.60          *l2_ro_mpt++ = l2e_from_page(
   48.61 -            pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
   48.62 +            l1_pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
   48.63      }
   48.64  
   48.65      /* Set up linear page table mapping. */
   48.66      idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
   48.67          l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR);
   48.68 +    return;
   48.69 +
   48.70 + nomem:
   48.71 +    panic("Not enough memory for m2p table\n");    
   48.72  }
   48.73  
   48.74  void __init setup_idle_pagetable(void)
    49.1 --- a/xen/arch/x86/x86_64/traps.c	Fri Nov 10 13:01:23 2006 -0700
    49.2 +++ b/xen/arch/x86/x86_64/traps.c	Mon Nov 13 09:58:23 2006 -0700
    49.3 @@ -200,7 +200,7 @@ unsigned long do_iret(void)
    49.4      {
    49.5          gdprintk(XENLOG_ERR, "Fault while reading IRET context from "
    49.6                  "guest stack\n");
    49.7 -        domain_crash_synchronous();
    49.8 +        goto exit_and_crash;
    49.9      }
   49.10  
   49.11      /* Returning to user mode? */
   49.12 @@ -210,7 +210,7 @@ unsigned long do_iret(void)
   49.13          {
   49.14              gdprintk(XENLOG_ERR, "Guest switching to user mode with no "
   49.15                      "user page tables\n");
   49.16 -            domain_crash_synchronous();
   49.17 +            goto exit_and_crash;
   49.18          }
   49.19          toggle_guest_mode(v);
   49.20      }
   49.21 @@ -236,6 +236,11 @@ unsigned long do_iret(void)
   49.22  
   49.23      /* Saved %rax gets written back to regs->rax in entry.S. */
   49.24      return iret_saved.rax;
   49.25 +
   49.26 + exit_and_crash:
   49.27 +    gdprintk(XENLOG_ERR, "Fatal error\n");
   49.28 +    domain_crash(v->domain);
   49.29 +    return 0;
   49.30  }
   49.31  
   49.32  asmlinkage void syscall_enter(void);
   49.33 @@ -285,9 +290,9 @@ void __init percpu_traps_init(void)
   49.34      stack[14] = 0x41;
   49.35      stack[15] = 0x53;
   49.36  
   49.37 -    /* pushq $__GUEST_CS64 */
   49.38 +    /* pushq $FLAT_KERNEL_CS64 */
   49.39      stack[16] = 0x68;
   49.40 -    *(u32 *)&stack[17] = __GUEST_CS64;
   49.41 +    *(u32 *)&stack[17] = FLAT_KERNEL_CS64;
   49.42  
   49.43      /* jmp syscall_enter */
   49.44      stack[21] = 0xe9;
   49.45 @@ -317,9 +322,9 @@ void __init percpu_traps_init(void)
   49.46      stack[14] = 0x41;
   49.47      stack[15] = 0x53;
   49.48  
   49.49 -    /* pushq $__GUEST_CS32 */
   49.50 +    /* pushq $FLAT_KERNEL_CS32 */
   49.51      stack[16] = 0x68;
   49.52 -    *(u32 *)&stack[17] = __GUEST_CS32;
   49.53 +    *(u32 *)&stack[17] = FLAT_KERNEL_CS32;
   49.54  
   49.55      /* jmp syscall_enter */
   49.56      stack[21] = 0xe9;
   49.57 @@ -369,7 +374,7 @@ static long register_guest_callback(stru
   49.58          break;
   49.59  
   49.60      default:
   49.61 -        ret = -EINVAL;
   49.62 +        ret = -ENOSYS;
   49.63          break;
   49.64      }
   49.65  
   49.66 @@ -382,12 +387,18 @@ static long unregister_guest_callback(st
   49.67  
   49.68      switch ( unreg->type )
   49.69      {
   49.70 +    case CALLBACKTYPE_event:
   49.71 +    case CALLBACKTYPE_failsafe:
   49.72 +    case CALLBACKTYPE_syscall:
   49.73 +        ret = -EINVAL;
   49.74 +        break;
   49.75 +
   49.76      case CALLBACKTYPE_nmi:
   49.77          ret = unregister_guest_nmi_callback();
   49.78          break;
   49.79  
   49.80      default:
   49.81 -        ret = -EINVAL;
   49.82 +        ret = -ENOSYS;
   49.83          break;
   49.84      }
   49.85  
   49.86 @@ -426,7 +437,7 @@ long do_callback_op(int cmd, XEN_GUEST_H
   49.87      break;
   49.88  
   49.89      default:
   49.90 -        ret = -EINVAL;
   49.91 +        ret = -ENOSYS;
   49.92          break;
   49.93      }
   49.94  
    50.1 --- a/xen/common/domain.c	Fri Nov 10 13:01:23 2006 -0700
    50.2 +++ b/xen/common/domain.c	Mon Nov 13 09:58:23 2006 -0700
    50.3 @@ -22,6 +22,7 @@
    50.4  #include <xen/delay.h>
    50.5  #include <xen/shutdown.h>
    50.6  #include <xen/percpu.h>
    50.7 +#include <xen/multicall.h>
    50.8  #include <asm/debugger.h>
    50.9  #include <public/sched.h>
   50.10  #include <public/vcpu.h>
   50.11 @@ -256,6 +257,10 @@ void __domain_crash(struct domain *d)
   50.12  void __domain_crash_synchronous(void)
   50.13  {
   50.14      __domain_crash(current->domain);
   50.15 +
   50.16 +    /* Flush multicall state before dying. */
   50.17 +    this_cpu(mc_state).flags = 0;
   50.18 +
   50.19      for ( ; ; )
   50.20          do_softirq();
   50.21  }
    51.1 --- a/xen/include/asm-x86/bitops.h	Fri Nov 10 13:01:23 2006 -0700
    51.2 +++ b/xen/include/asm-x86/bitops.h	Mon Nov 13 09:58:23 2006 -0700
    51.3 @@ -246,7 +246,7 @@ static __inline__ int constant_test_bit(
    51.4  	return ((1U << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
    51.5  }
    51.6  
    51.7 -static __inline__ int variable_test_bit(int nr, volatile void * addr)
    51.8 +static __inline__ int variable_test_bit(int nr, const volatile void * addr)
    51.9  {
   51.10  	int oldbit;
   51.11  
    52.1 --- a/xen/include/asm-x86/config.h	Fri Nov 10 13:01:23 2006 -0700
    52.2 +++ b/xen/include/asm-x86/config.h	Mon Nov 13 09:58:23 2006 -0700
    52.3 @@ -194,12 +194,6 @@ extern unsigned long _end; /* standard E
    52.4  #define __HYPERVISOR_DS32 0xe018
    52.5  #define __HYPERVISOR_DS   __HYPERVISOR_DS64
    52.6  
    52.7 -#define __GUEST_CS64      0xe033
    52.8 -#define __GUEST_CS32      0xe023
    52.9 -#define __GUEST_CS        __GUEST_CS64
   52.10 -#define __GUEST_DS        0x0000
   52.11 -#define __GUEST_SS        0xe02b
   52.12 -
   52.13  /* For generic assembly code: use macros to define operation/operand sizes. */
   52.14  #define __OS          "q"  /* Operation Suffix */
   52.15  #define __OP          "r"  /* Operand Prefix */
    53.1 --- a/xen/include/asm-x86/desc.h	Fri Nov 10 13:01:23 2006 -0700
    53.2 +++ b/xen/include/asm-x86/desc.h	Mon Nov 13 09:58:23 2006 -0700
    53.3 @@ -155,17 +155,12 @@ do { \
    53.4  #endif
    53.5  
    53.6  extern struct desc_struct gdt_table[];
    53.7 -extern struct desc_struct *gdt;
    53.8 -extern idt_entry_t        *idt;
    53.9  
   53.10  struct Xgt_desc_struct {
   53.11      unsigned short size;
   53.12      unsigned long address __attribute__((packed));
   53.13  };
   53.14  
   53.15 -#define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2))
   53.16 -#define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2))
   53.17 -
   53.18  extern void set_intr_gate(unsigned int irq, void * addr);
   53.19  extern void set_system_gate(unsigned int n, void *addr);
   53.20  extern void set_task_gate(unsigned int n, unsigned int sel);
    54.1 --- a/xen/include/asm-x86/hvm/support.h	Fri Nov 10 13:01:23 2006 -0700
    54.2 +++ b/xen/include/asm-x86/hvm/support.h	Mon Nov 13 09:58:23 2006 -0700
    54.3 @@ -118,13 +118,6 @@ extern unsigned int opt_hvm_debug_level;
    54.4  #define HVM_DBG_LOG(level, _f, _a...)
    54.5  #endif
    54.6  
    54.7 -#define  __hvm_bug(regs)                                        \
    54.8 -    do {                                                        \
    54.9 -        printk("__hvm_bug at %s:%d\n", __FILE__, __LINE__);     \
   54.10 -        show_execution_state(regs);                             \
   54.11 -        domain_crash_synchronous();                             \
   54.12 -    } while (0)
   54.13 -
   54.14  #define TRACE_VMEXIT(index, value)                              \
   54.15      current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
   54.16  
    55.1 --- a/xen/include/asm-x86/hvm/vlapic.h	Fri Nov 10 13:01:23 2006 -0700
    55.2 +++ b/xen/include/asm-x86/hvm/vlapic.h	Mon Nov 13 09:58:23 2006 -0700
    55.3 @@ -90,4 +90,6 @@ struct vlapic *apic_round_robin(
    55.4  
    55.5  s_time_t get_apictime_scheduled(struct vcpu *v);
    55.6  
    55.7 +int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda);
    55.8 +
    55.9  #endif /* __ASM_X86_HVM_VLAPIC_H__ */
    56.1 --- a/xen/include/public/elfnote.h	Fri Nov 10 13:01:23 2006 -0700
    56.2 +++ b/xen/include/public/elfnote.h	Mon Nov 13 09:58:23 2006 -0700
    56.3 @@ -138,6 +138,15 @@
    56.4   */
    56.5  #define XEN_ELFNOTE_BSD_SYMTAB    11
    56.6  
    56.7 +/*
    56.8 + * The lowest address the hypervisor hole can begin at (numeric).
    56.9 + *
   56.10 + * This must not be set higher than HYPERVISOR_VIRT_START. Its presence
   56.11 + * also indicates to the hypervisor that the kernel can deal with the
   56.12 + * hole starting at a higher address.
   56.13 + */
   56.14 +#define XEN_ELFNOTE_HV_START_LOW  12
   56.15 +
   56.16  #endif /* __XEN_PUBLIC_ELFNOTE_H__ */
   56.17  
   56.18  /*
    57.1 --- a/xen/include/public/hvm/ioreq.h	Fri Nov 10 13:01:23 2006 -0700
    57.2 +++ b/xen/include/public/hvm/ioreq.h	Mon Nov 13 09:58:23 2006 -0700
    57.3 @@ -80,7 +80,7 @@ struct buffered_iopage {
    57.4  };            /* sizeof this structure must be in one page */
    57.5  typedef struct buffered_iopage buffered_iopage_t;
    57.6  
    57.7 -#define ACPI_PM1A_EVT_BLK_ADDRESS           0x000000000000c010
    57.8 +#define ACPI_PM1A_EVT_BLK_ADDRESS           0x0000000000001f40
    57.9  #define ACPI_PM1A_CNT_BLK_ADDRESS           (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
   57.10  #define ACPI_PM_TMR_BLK_ADDRESS             (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
   57.11