ia64/xen-unstable

changeset 4706:65b28c74cec2

bitkeeper revision 1.1393 (4271e39733erltTpi7grGFwvE3eDmw)

Merge http://xen.bkbits.net:8080/xeno-unstable.bk
into gandalf.hpl.hp.com:/var/bk/xeno-unstable.bk
author xenbk@gandalf.hpl.hp.com
date Fri Apr 29 07:34:47 2005 +0000 (2005-04-29)
parents 397aeb925c68 75a775c40caf
children 4aec39caa003
files .rootkeys linux-2.4.29-xen-sparse/arch/xen/kernel/pci-dma.c linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h linux-2.6.11-xen-sparse/include/asm-xen/gnttab.h linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h tools/libxc/xc.h tools/libxc/xc_domain.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_linux_save.c tools/libxc/xc_plan9_build.c tools/libxc/xc_ptrace.c tools/libxc/xc_vmx_build.c tools/xentrace/xenctx.c xen/arch/ia64/dom0_ops.c xen/arch/ia64/domain.c xen/arch/ia64/xenmisc.c xen/arch/x86/apic.c xen/arch/x86/boot/x86_32.S xen/arch/x86/boot/x86_64.S xen/arch/x86/cdb.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/extable.c xen/arch/x86/i387.c xen/arch/x86/irq.c xen/arch/x86/mm.c xen/arch/x86/nmi.c xen/arch/x86/shadow.c xen/arch/x86/time.c xen/arch/x86/trampoline.S xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/call_with_regs.S xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/seg_fixup.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/arch/x86/x86_emulate.c xen/common/dom0_ops.c xen/common/domain.c xen/common/keyhandler.c xen/common/schedule.c xen/drivers/char/console.c xen/drivers/char/serial.c xen/include/asm-ia64/debugger.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/regs.h xen/include/asm-x86/apic.h xen/include/asm-x86/config.h xen/include/asm-x86/debugger.h xen/include/asm-x86/domain.h xen/include/asm-x86/ldt.h xen/include/asm-x86/processor.h xen/include/asm-x86/shadow.h xen/include/asm-x86/vmx.h xen/include/asm-x86/vmx_platform.h xen/include/asm-x86/vmx_vmcs.h xen/include/asm-x86/x86_32/asm_defns.h xen/include/asm-x86/x86_32/current.h xen/include/asm-x86/x86_32/regs.h xen/include/asm-x86/x86_64/asm_defns.h xen/include/asm-x86/x86_64/current.h xen/include/asm-x86/x86_64/regs.h xen/include/asm-x86/x86_emulate.h xen/include/public/arch-ia64.h xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h xen/include/public/dom0_ops.h xen/include/xen/domain.h xen/include/xen/irq.h xen/include/xen/keyhandler.h xen/include/xen/sched.h xen/include/xen/serial.h
line diff
     1.1 --- a/.rootkeys	Thu Apr 28 13:54:01 2005 +0000
     1.2 +++ b/.rootkeys	Fri Apr 29 07:34:47 2005 +0000
     1.3 @@ -152,6 +152,7 @@ 3e5a4e65RMGcuA-HCn3-wNx3fFQwdg linux-2.4
     1.4  4241709bNBs1q4Ss32YW0CyFVOGhEg linux-2.4.29-xen-sparse/arch/xen/kernel/ioport.c
     1.5  3e5a4e653U6cELGv528IxOLHvCq8iA linux-2.4.29-xen-sparse/arch/xen/kernel/irq.c
     1.6  3e5a4e65muT6SU3ck47IP87Q7Ti5hA linux-2.4.29-xen-sparse/arch/xen/kernel/ldt.c
     1.7 +4270e964iKFC24KiVm6jC5Eo7MxV6w linux-2.4.29-xen-sparse/arch/xen/kernel/pci-dma.c
     1.8  4051db95N9N99FjsRwi49YKUNHWI8A linux-2.4.29-xen-sparse/arch/xen/kernel/pci-pc.c
     1.9  3e5a4e65IGt3WwQDNiL4h-gYWgNTWQ linux-2.4.29-xen-sparse/arch/xen/kernel/process.c
    1.10  3e5a4e66tR-qJMLj3MppcKqmvuI2XQ linux-2.4.29-xen-sparse/arch/xen/kernel/setup.c
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/linux-2.4.29-xen-sparse/arch/xen/kernel/pci-dma.c	Fri Apr 29 07:34:47 2005 +0000
     2.3 @@ -0,0 +1,137 @@
     2.4 +/*
     2.5 + * Dynamic DMA mapping support.
     2.6 + *
     2.7 + * On i386 there is no hardware dynamic DMA address translation,
     2.8 + * so consistent alloc/free are merely page allocation/freeing.
     2.9 + * The rest of the dynamic DMA mapping interface is implemented
    2.10 + * in asm/pci.h.
    2.11 + */
    2.12 +
    2.13 +#include <linux/types.h>
    2.14 +#include <linux/mm.h>
    2.15 +#include <linux/string.h>
    2.16 +#include <linux/pci.h>
    2.17 +#include <linux/version.h>
    2.18 +#include <asm/io.h>
    2.19 +#include <asm-xen/balloon.h>
    2.20 +
    2.21 +#define pte_offset_kernel pte_offset
    2.22 +
    2.23 +struct dma_coherent_mem {
    2.24 +	void		*virt_base;
    2.25 +	u32		device_base;
    2.26 +	int		size;
    2.27 +	int		flags;
    2.28 +	unsigned long	*bitmap;
    2.29 +};
    2.30 +
    2.31 +static void
    2.32 +xen_contig_memory(unsigned long vstart, unsigned int order)
    2.33 +{
    2.34 +	/*
    2.35 +	 * Ensure multi-page extents are contiguous in machine memory.
    2.36 +	 * This code could be cleaned up some, and the number of
    2.37 +	 * hypercalls reduced.
    2.38 +	 */
    2.39 +	pgd_t         *pgd; 
    2.40 +	pmd_t         *pmd;
    2.41 +	pte_t         *pte;
    2.42 +	unsigned long  pfn, i, flags;
    2.43 +
    2.44 +	scrub_pages(vstart, 1 << order);
    2.45 +
    2.46 +        balloon_lock(flags);
    2.47 +
    2.48 +	/* 1. Zap current PTEs, giving away the underlying pages. */
    2.49 +	for (i = 0; i < (1<<order); i++) {
    2.50 +		pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
    2.51 +		pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
    2.52 +		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
    2.53 +		pfn = pte->pte_low >> PAGE_SHIFT;
    2.54 +		HYPERVISOR_update_va_mapping(
    2.55 +			vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
    2.56 +		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
    2.57 +			INVALID_P2M_ENTRY;
    2.58 +		if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
    2.59 +					  &pfn, 1, 0) != 1) BUG();
    2.60 +	}
    2.61 +	/* 2. Get a new contiguous memory extent. */
    2.62 +	if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
    2.63 +				  &pfn, 1, order) != 1) BUG();
    2.64 +	/* 3. Map the new extent in place of old pages. */
    2.65 +	for (i = 0; i < (1<<order); i++) {
    2.66 +		pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
    2.67 +		pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
    2.68 +		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
    2.69 +		HYPERVISOR_update_va_mapping(
    2.70 +			vstart + (i*PAGE_SIZE),
    2.71 +			__pte_ma(((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0);
    2.72 +		xen_machphys_update(
    2.73 +			pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
    2.74 +		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
    2.75 +			pfn+i;
    2.76 +	}
    2.77 +	/* Flush updates through and flush the TLB. */
    2.78 +	flush_tlb_all();
    2.79 +
    2.80 +        balloon_unlock(flags);
    2.81 +}
    2.82 +
    2.83 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
    2.84 +void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
    2.85 +			   dma_addr_t *dma_handle)
    2.86 +#else
    2.87 +void *dma_alloc_coherent(struct device *dev, size_t size,
    2.88 +			   dma_addr_t *dma_handle, int gfp)
    2.89 +#endif
    2.90 +{
    2.91 +	void *ret;
    2.92 +	unsigned int order = get_order(size);
    2.93 +	unsigned long vstart;
    2.94 +
    2.95 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
    2.96 +	int gfp = GFP_ATOMIC;
    2.97 +
    2.98 +	if (hwdev == NULL || ((u32)hwdev->dma_mask < 0xffffffff))
    2.99 +		gfp |= GFP_DMA;
   2.100 +#else
   2.101 +	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
   2.102 +
   2.103 +	/* ignore region specifiers */
   2.104 +	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
   2.105 +
   2.106 +	if (mem) {
   2.107 +		int page = bitmap_find_free_region(mem->bitmap, mem->size,
   2.108 +						     order);
   2.109 +		if (page >= 0) {
   2.110 +			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
   2.111 +			ret = mem->virt_base + (page << PAGE_SHIFT);
   2.112 +			memset(ret, 0, size);
   2.113 +			return ret;
   2.114 +		}
   2.115 +		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
   2.116 +			return NULL;
   2.117 +	}
   2.118 +
   2.119 +	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
   2.120 +		gfp |= GFP_DMA;
   2.121 +#endif
   2.122 +
   2.123 +	vstart = __get_free_pages(gfp, order);
   2.124 +	ret = (void *)vstart;
   2.125 +	if (ret == NULL)
   2.126 +		return ret;
   2.127 +
   2.128 +	xen_contig_memory(vstart, order);
   2.129 +
   2.130 +	memset(ret, 0, size);
   2.131 +	*dma_handle = virt_to_bus(ret);
   2.132 +
   2.133 +	return ret;
   2.134 +}
   2.135 +
   2.136 +void pci_free_consistent(struct pci_dev *hwdev, size_t size,
   2.137 +			 void *vaddr, dma_addr_t dma_handle)
   2.138 +{
   2.139 +	free_pages((unsigned long)vaddr, get_order(size));
   2.140 +}
     3.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c	Thu Apr 28 13:54:01 2005 +0000
     3.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c	Fri Apr 29 07:34:47 2005 +0000
     3.3 @@ -600,7 +600,7 @@ void __init cpu_init (void)
     3.4  	 * Set up the per-thread TLS descriptor cache:
     3.5  	 */
     3.6  	memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
     3.7 -		GDT_ENTRY_TLS_ENTRIES * 8);
     3.8 +	       GDT_ENTRY_TLS_ENTRIES * 8);
     3.9  
    3.10  	cpu_gdt_init(&cpu_gdt_descr[cpu]);
    3.11  
     4.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S	Thu Apr 28 13:54:01 2005 +0000
     4.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S	Fri Apr 29 07:34:47 2005 +0000
     4.3 @@ -2,7 +2,9 @@
     4.4  #include <linux/config.h>
     4.5  
     4.6  .section __xen_guest
     4.7 -	.ascii	"GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=3.0,VIRT_BASE=0xC0000000"
     4.8 +	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
     4.9 +	.ascii	",XEN_VER=3.0"
    4.10 +	.ascii	",VIRT_BASE=0xC0000000"
    4.11  	.ascii	",LOADER=generic"
    4.12  	.byte	0
    4.13  
     5.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c	Thu Apr 28 13:54:01 2005 +0000
     5.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c	Fri Apr 29 07:34:47 2005 +0000
     5.3 @@ -44,13 +44,13 @@ xen_contig_memory(unsigned long vstart, 
     5.4  
     5.5  	/* 1. Zap current PTEs, giving away the underlying pages. */
     5.6  	for (i = 0; i < (1<<order); i++) {
     5.7 -		pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
     5.8 -		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
     5.9 -		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
    5.10 -		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
    5.11 -		pfn = pte->pte_low >> PAGE_SHIFT;
    5.12 -		HYPERVISOR_update_va_mapping(
    5.13 -			vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
    5.14 +		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
    5.15 +		pud = pud_offset(pgd, vstart + (i*PAGE_SIZE));
    5.16 +		pmd = pmd_offset(pud, vstart + (i*PAGE_SIZE));
    5.17 +		pte = pte_offset_kernel(pmd, vstart + (i*PAGE_SIZE));
    5.18 +		pfn = pte_val_ma(*pte) >> PAGE_SHIFT;
    5.19 +		HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
    5.20 +					     __pte_ma(0), 0);
    5.21  		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
    5.22  			INVALID_P2M_ENTRY;
    5.23  		if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
    5.24 @@ -61,17 +61,10 @@ xen_contig_memory(unsigned long vstart, 
    5.25  				  &pfn, 1, order) != 1) BUG();
    5.26  	/* 3. Map the new extent in place of old pages. */
    5.27  	for (i = 0; i < (1<<order); i++) {
    5.28 -		pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
    5.29 -		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
    5.30 -		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
    5.31 -		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
    5.32 -		HYPERVISOR_update_va_mapping(
    5.33 -			vstart + (i*PAGE_SIZE),
    5.34 +		HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
    5.35  			__pte_ma(((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0);
    5.36 -		xen_machphys_update(
    5.37 -			pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
    5.38 -		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
    5.39 -			pfn+i;
    5.40 +		xen_machphys_update(pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
    5.41 +		phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = pfn+i;
    5.42  	}
    5.43  	flush_tlb_all();
    5.44  
    5.45 @@ -82,11 +75,9 @@ void *dma_alloc_coherent(struct device *
    5.46  			   dma_addr_t *dma_handle, int gfp)
    5.47  {
    5.48  	void *ret;
    5.49 +	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    5.50  	unsigned int order = get_order(size);
    5.51  	unsigned long vstart;
    5.52 -
    5.53 -	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    5.54 -
    5.55  	/* ignore region specifiers */
    5.56  	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
    5.57  
    5.58 @@ -108,14 +99,13 @@ void *dma_alloc_coherent(struct device *
    5.59  
    5.60  	vstart = __get_free_pages(gfp, order);
    5.61  	ret = (void *)vstart;
    5.62 -	if (ret == NULL)
    5.63 -		return ret;
    5.64  
    5.65 -	xen_contig_memory(vstart, order);
    5.66 +	if (ret != NULL) {
    5.67 +		xen_contig_memory(vstart, order);
    5.68  
    5.69 -	memset(ret, 0, size);
    5.70 -	*dma_handle = virt_to_bus(ret);
    5.71 -
    5.72 +		memset(ret, 0, size);
    5.73 +		*dma_handle = virt_to_bus(ret);
    5.74 +	}
    5.75  	return ret;
    5.76  }
    5.77  
     6.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c	Thu Apr 28 13:54:01 2005 +0000
     6.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c	Fri Apr 29 07:34:47 2005 +0000
     6.3 @@ -329,7 +329,7 @@ int copy_thread(int nr, unsigned long cl
     6.4  		desc->b = LDT_entry_b(&info);
     6.5  	}
     6.6  
     6.7 -        p->thread.io_pl = current->thread.io_pl;
     6.8 +	p->thread.io_pl = current->thread.io_pl;
     6.9  
    6.10  	err = 0;
    6.11   out:
    6.12 @@ -445,7 +445,7 @@ struct task_struct fastcall * __switch_t
    6.13  	physdev_op_t iopl_op, iobmp_op;
    6.14  	multicall_entry_t _mcl[8], *mcl = _mcl;
    6.15  
    6.16 -        /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
    6.17 +	/* XEN NOTE: FS/GS saved in switch_mm(), not here. */
    6.18  
    6.19  	/*
    6.20  	 * This is basically '__unlazy_fpu', except that we queue a
     7.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c	Thu Apr 28 13:54:01 2005 +0000
     7.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c	Fri Apr 29 07:34:47 2005 +0000
     7.3 @@ -40,6 +40,7 @@
     7.4  #include <linux/efi.h>
     7.5  #include <linux/init.h>
     7.6  #include <linux/edd.h>
     7.7 +#include <linux/kernel.h>
     7.8  #include <linux/percpu.h>
     7.9  #include <linux/notifier.h>
    7.10  #include <video/edid.h>
    7.11 @@ -59,15 +60,11 @@
    7.12  /* Allows setting of maximum possible memory size  */
    7.13  static unsigned long xen_override_max_pfn;
    7.14  
    7.15 -extern struct notifier_block *panic_notifier_list;
    7.16  static int xen_panic_event(struct notifier_block *, unsigned long, void *);
    7.17  static struct notifier_block xen_panic_block = {
    7.18 -	xen_panic_event,
    7.19 -        NULL,
    7.20 -        0 /* try to go last */
    7.21 +	xen_panic_event, NULL, 0 /* try to go last */
    7.22  };
    7.23  
    7.24 -
    7.25  int disable_pse __initdata = 0;
    7.26  
    7.27  /*
    7.28 @@ -901,6 +898,7 @@ efi_find_max_pfn(unsigned long start, un
    7.29  	return 0;
    7.30  }
    7.31  
    7.32 +
    7.33  /*
    7.34   * Find the highest page frame number we have available
    7.35   */
    7.36 @@ -1397,22 +1395,21 @@ static void set_mca_bus(int x) { }
    7.37   */
    7.38  void __init setup_arch(char **cmdline_p)
    7.39  {
    7.40 -	int i,j;
    7.41 +	int i, j;
    7.42  	physdev_op_t op;
    7.43  	unsigned long max_low_pfn;
    7.44  
    7.45  	/* Force a quick death if the kernel panics. */
    7.46  	extern int panic_timeout;
    7.47 -	if ( panic_timeout == 0 )
    7.48 +	if (panic_timeout == 0)
    7.49  		panic_timeout = 1;
    7.50  
    7.51  	/* Register a call for panic conditions. */
    7.52  	notifier_chain_register(&panic_notifier_list, &xen_panic_block);
    7.53  
    7.54 -	HYPERVISOR_vm_assist(
    7.55 -		VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
    7.56 -	HYPERVISOR_vm_assist(
    7.57 -		VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
    7.58 +	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
    7.59 +	HYPERVISOR_vm_assist(VMASST_CMD_enable,
    7.60 +			     VMASST_TYPE_writable_pagetables);
    7.61  
    7.62  	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
    7.63  	early_cpu_init();
    7.64 @@ -1478,7 +1475,8 @@ void __init setup_arch(char **cmdline_p)
    7.65  	init_mm.start_code = (unsigned long) _text;
    7.66  	init_mm.end_code = (unsigned long) _etext;
    7.67  	init_mm.end_data = (unsigned long) _edata;
    7.68 -	init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames) << PAGE_SHIFT;
    7.69 +	init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) +
    7.70 +		       xen_start_info.nr_pt_frames) << PAGE_SHIFT;
    7.71  
    7.72  	/* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
    7.73  	/*code_resource.start = virt_to_phys(_text);*/
    7.74 @@ -1511,7 +1509,7 @@ void __init setup_arch(char **cmdline_p)
    7.75  			max_pfn * sizeof(unsigned long));
    7.76  
    7.77  		if (max_pfn > xen_start_info.nr_pages) {
    7.78 -			/* set to INVALID_P2M_ENTRY */                        
    7.79 +			/* set to INVALID_P2M_ENTRY */
    7.80  			memset(phys_to_machine_mapping, ~0,
    7.81  				max_pfn * sizeof(unsigned long));
    7.82  			memcpy(phys_to_machine_mapping,
    7.83 @@ -1617,16 +1615,14 @@ void __init setup_arch(char **cmdline_p)
    7.84  	}
    7.85  }
    7.86  
    7.87 -
    7.88  static int
    7.89  xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
    7.90  {
    7.91 -     HYPERVISOR_crash();    
    7.92 -     /* we're never actually going to get here... */
    7.93 -     return NOTIFY_DONE;
    7.94 +	HYPERVISOR_crash();    
    7.95 +	/* we're never actually going to get here... */
    7.96 +	return NOTIFY_DONE;
    7.97  }
    7.98  
    7.99 -
   7.100  #include "setup_arch_post.h"
   7.101  /*
   7.102   * Local Variables:
     8.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Apr 28 13:54:01 2005 +0000
     8.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c	Fri Apr 29 07:34:47 2005 +0000
     8.3 @@ -820,7 +820,7 @@ static int __init do_boot_cpu(int apicid
     8.4  #if 0
     8.5  	unsigned short nmi_high = 0, nmi_low = 0;
     8.6  #endif
     8.7 -	full_execution_context_t ctxt;
     8.8 +	vcpu_guest_context_t ctxt;
     8.9  	extern void startup_32_smp(void);
    8.10  	extern void hypervisor_callback(void);
    8.11  	extern void failsafe_callback(void);
    8.12 @@ -865,18 +865,18 @@ static int __init do_boot_cpu(int apicid
    8.13  
    8.14  	memset(&ctxt, 0, sizeof(ctxt));
    8.15  
    8.16 -	ctxt.cpu_ctxt.ds = __USER_DS;
    8.17 -	ctxt.cpu_ctxt.es = __USER_DS;
    8.18 -	ctxt.cpu_ctxt.fs = 0;
    8.19 -	ctxt.cpu_ctxt.gs = 0;
    8.20 -	ctxt.cpu_ctxt.ss = __KERNEL_DS;
    8.21 -	ctxt.cpu_ctxt.cs = __KERNEL_CS;
    8.22 -	ctxt.cpu_ctxt.eip = start_eip;
    8.23 -	ctxt.cpu_ctxt.esp = idle->thread.esp;
    8.24 -	ctxt.cpu_ctxt.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
    8.25 +	ctxt.user_regs.ds = __USER_DS;
    8.26 +	ctxt.user_regs.es = __USER_DS;
    8.27 +	ctxt.user_regs.fs = 0;
    8.28 +	ctxt.user_regs.gs = 0;
    8.29 +	ctxt.user_regs.ss = __KERNEL_DS;
    8.30 +	ctxt.user_regs.cs = __KERNEL_CS;
    8.31 +	ctxt.user_regs.eip = start_eip;
    8.32 +	ctxt.user_regs.esp = idle->thread.esp;
    8.33 +	ctxt.user_regs.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
    8.34  
    8.35  	/* FPU is set up to default initial state. */
    8.36 -	memset(ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
    8.37 +	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
    8.38  
    8.39  	/* Virtual IDT is empty at start-of-day. */
    8.40  	for ( i = 0; i < 256; i++ )
    8.41 @@ -903,8 +903,8 @@ static int __init do_boot_cpu(int apicid
    8.42  	}
    8.43  
    8.44  	/* Ring 1 stack is the initial stack. */
    8.45 -	ctxt.kernel_ss  = __KERNEL_DS;
    8.46 -	ctxt.kernel_esp = idle->thread.esp;
    8.47 +	ctxt.kernel_ss = __KERNEL_DS;
    8.48 +	ctxt.kernel_sp = idle->thread.esp;
    8.49  
    8.50  	/* Callback handlers. */
    8.51  	ctxt.event_callback_cs     = __KERNEL_CS;
     9.1 --- a/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c	Thu Apr 28 13:54:01 2005 +0000
     9.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c	Fri Apr 29 07:34:47 2005 +0000
     9.3 @@ -50,8 +50,8 @@
     9.4  
     9.5      if ( !test_and_set_bit(0, &printed) )
     9.6      {
     9.7 -        HYPERVISOR_vm_assist(
     9.8 -            VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
     9.9 +        HYPERVISOR_vm_assist(VMASST_CMD_disable,
    9.10 +			     VMASST_TYPE_4gb_segments_notify);
    9.11  
    9.12          DP("");
    9.13          DP("***************************************************************");
    9.14 @@ -77,8 +77,7 @@
    9.15  
    9.16  static int __init fixup_init(void)
    9.17  {
    9.18 -    HYPERVISOR_vm_assist(
    9.19 -        VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
    9.20 +    HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
    9.21      return 0;
    9.22  }
    9.23  __initcall(fixup_init);
    10.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S	Thu Apr 28 13:54:01 2005 +0000
    10.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S	Fri Apr 29 07:34:47 2005 +0000
    10.3 @@ -50,7 +50,7 @@
    10.4  
    10.5  
    10.6  EVENT_MASK      = (CS+4)
    10.7 -ECF_IN_SYSCALL  = (1<<8)
    10.8 +VGCF_IN_SYSCALL = (1<<8)
    10.9          
   10.10  /*
   10.11   * Copied from arch/xen/i386/kernel/entry.S
   10.12 @@ -169,7 +169,7 @@ ECF_IN_SYSCALL  = (1<<8)
   10.13           *     struct switch_to_user {
   10.14           *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
   10.15           *     } PACKED;
   10.16 -         * #define ECF_IN_SYSCALL (1<<8) 
   10.17 +         * #define VGCF_IN_SYSCALL (1<<8) 
   10.18           */
   10.19          .macro SWITCH_TO_USER flag
   10.20          movl $0,%gs:pda_kernel_mode     # change to user mode
   10.21 @@ -275,7 +275,7 @@ sysret_check:
   10.22  	jnz  sysret_careful 
   10.23          XEN_UNBLOCK_EVENTS(%rsi)                
   10.24  	RESTORE_ARGS 0,8,0
   10.25 -        SWITCH_TO_USER ECF_IN_SYSCALL
   10.26 +        SWITCH_TO_USER VGCF_IN_SYSCALL
   10.27  
   10.28  	/* Handle reschedules */
   10.29  	/* edx:	work, edi: workmask */	
    11.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h	Thu Apr 28 13:54:01 2005 +0000
    11.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h	Fri Apr 29 07:34:47 2005 +0000
    11.3 @@ -519,7 +519,7 @@ HYPERVISOR_vm_assist(
    11.4  
    11.5  static inline int
    11.6  HYPERVISOR_boot_vcpu(
    11.7 -    unsigned long vcpu, full_execution_context_t *ctxt)
    11.8 +    unsigned long vcpu, vcpu_guest_context_t *ctxt)
    11.9  {
   11.10      int ret;
   11.11      unsigned long ign1, ign2;
    12.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h	Thu Apr 28 13:54:01 2005 +0000
    12.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h	Fri Apr 29 07:34:47 2005 +0000
    12.3 @@ -111,7 +111,7 @@ typedef struct { unsigned long pgprot; }
    12.4  static inline unsigned long pgd_val(pgd_t x)
    12.5  {
    12.6  	unsigned long ret = x.pgd;
    12.7 -	if (ret) ret = machine_to_phys(ret) | 1;
    12.8 +	if (ret) ret = machine_to_phys(ret);
    12.9  	return ret;
   12.10  }
   12.11  #define pgprot_val(x)	((x).pgprot)
    13.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h	Thu Apr 28 13:54:01 2005 +0000
    13.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h	Fri Apr 29 07:34:47 2005 +0000
    13.3 @@ -489,7 +489,7 @@ HYPERVISOR_switch_to_user(void)
    13.4  
    13.5  static inline int
    13.6  HYPERVISOR_boot_vcpu(
    13.7 -    unsigned long vcpu, full_execution_context_t *ctxt)
    13.8 +    unsigned long vcpu, vcpu_guest_context_t *ctxt)
    13.9  {
   13.10      int ret;
   13.11  
    14.1 --- a/tools/libxc/xc.h	Thu Apr 28 13:54:01 2005 +0000
    14.2 +++ b/tools/libxc/xc.h	Fri Apr 29 07:34:47 2005 +0000
    14.3 @@ -160,7 +160,7 @@ int xc_domain_getfullinfo(int xc_handle,
    14.4                            u32 domid,
    14.5                            u32 vcpu,
    14.6                            xc_domaininfo_t *info,
    14.7 -                          full_execution_context_t *ctxt);
    14.8 +                          vcpu_guest_context_t *ctxt);
    14.9  int xc_domain_setcpuweight(int xc_handle,
   14.10                             u32 domid,
   14.11                             float weight);
    15.1 --- a/tools/libxc/xc_domain.c	Thu Apr 28 13:54:01 2005 +0000
    15.2 +++ b/tools/libxc/xc_domain.c	Fri Apr 29 07:34:47 2005 +0000
    15.3 @@ -144,7 +144,7 @@ int xc_domain_getfullinfo(int xc_handle,
    15.4                            u32 domid,
    15.5                            u32 vcpu,
    15.6                            xc_domaininfo_t *info,
    15.7 -                          full_execution_context_t *ctxt)
    15.8 +                          vcpu_guest_context_t *ctxt)
    15.9  {
   15.10      int rc, errno_saved;
   15.11      dom0_op_t op;
    16.1 --- a/tools/libxc/xc_linux_build.c	Thu Apr 28 13:54:01 2005 +0000
    16.2 +++ b/tools/libxc/xc_linux_build.c	Fri Apr 29 07:34:47 2005 +0000
    16.3 @@ -45,7 +45,7 @@ static int setup_guest(int xc_handle,
    16.4                           gzFile initrd_gfd, unsigned long initrd_len,
    16.5                           unsigned long nr_pages,
    16.6                           unsigned long *pvsi, unsigned long *pvke,
    16.7 -                         full_execution_context_t *ctxt,
    16.8 +                         vcpu_guest_context_t *ctxt,
    16.9                           const char *cmdline,
   16.10                           unsigned long shared_info_frame,
   16.11                           unsigned int control_evtchn,
   16.12 @@ -316,7 +316,7 @@ int xc_linux_build(int xc_handle,
   16.13      int initrd_fd = -1;
   16.14      gzFile initrd_gfd = NULL;
   16.15      int rc, i;
   16.16 -    full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
   16.17 +    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
   16.18      unsigned long nr_pages;
   16.19      char         *image = NULL;
   16.20      unsigned long image_size, initrd_size=0;
   16.21 @@ -400,19 +400,19 @@ int xc_linux_build(int xc_handle,
   16.22       *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
   16.23       *       EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
   16.24       */
   16.25 -    ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS;
   16.26 -    ctxt->cpu_ctxt.es = FLAT_KERNEL_DS;
   16.27 -    ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS;
   16.28 -    ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS;
   16.29 -    ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS;
   16.30 -    ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS;
   16.31 -    ctxt->cpu_ctxt.eip = vkern_entry;
   16.32 -    ctxt->cpu_ctxt.esp = vstartinfo_start + 2*PAGE_SIZE;
   16.33 -    ctxt->cpu_ctxt.esi = vstartinfo_start;
   16.34 -    ctxt->cpu_ctxt.eflags = (1<<9) | (1<<2);
   16.35 +    ctxt->user_regs.ds = FLAT_KERNEL_DS;
   16.36 +    ctxt->user_regs.es = FLAT_KERNEL_DS;
   16.37 +    ctxt->user_regs.fs = FLAT_KERNEL_DS;
   16.38 +    ctxt->user_regs.gs = FLAT_KERNEL_DS;
   16.39 +    ctxt->user_regs.ss = FLAT_KERNEL_DS;
   16.40 +    ctxt->user_regs.cs = FLAT_KERNEL_CS;
   16.41 +    ctxt->user_regs.eip = vkern_entry;
   16.42 +    ctxt->user_regs.esp = vstartinfo_start + 2*PAGE_SIZE;
   16.43 +    ctxt->user_regs.esi = vstartinfo_start;
   16.44 +    ctxt->user_regs.eflags = (1<<9) | (1<<2);
   16.45  
   16.46      /* FPU is set up to default initial state. */
   16.47 -    memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
   16.48 +    memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
   16.49  
   16.50      /* Virtual IDT is empty at start-of-day. */
   16.51      for ( i = 0; i < 256; i++ )
   16.52 @@ -432,8 +432,8 @@ int xc_linux_build(int xc_handle,
   16.53      ctxt->gdt_ents = 0;
   16.54  
   16.55      /* Ring 1 stack is the initial stack. */
   16.56 -    ctxt->kernel_ss  = FLAT_KERNEL_DS;
   16.57 -    ctxt->kernel_esp = vstartinfo_start + 2*PAGE_SIZE;
   16.58 +    ctxt->kernel_ss = FLAT_KERNEL_DS;
   16.59 +    ctxt->kernel_sp = vstartinfo_start + 2*PAGE_SIZE;
   16.60  
   16.61      /* No debugging. */
   16.62      memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
    17.1 --- a/tools/libxc/xc_linux_restore.c	Thu Apr 28 13:54:01 2005 +0000
    17.2 +++ b/tools/libxc/xc_linux_restore.c	Fri Apr 29 07:34:47 2005 +0000
    17.3 @@ -73,7 +73,7 @@ int xc_linux_restore(int xc_handle, XcIO
    17.4      shared_info_t *shared_info = (shared_info_t *)shared_info_page;
    17.5      
    17.6      /* A copy of the CPU context of the guest. */
    17.7 -    full_execution_context_t ctxt;
    17.8 +    vcpu_guest_context_t ctxt;
    17.9  
   17.10      /* First 16 bytes of the state file must contain 'LinuxGuestRecord'. */
   17.11      char signature[16];
   17.12 @@ -505,13 +505,13 @@ int xc_linux_restore(int xc_handle, XcIO
   17.13      }
   17.14  
   17.15      /* Uncanonicalise the suspend-record frame number and poke resume rec. */
   17.16 -    pfn = ctxt.cpu_ctxt.esi;
   17.17 +    pfn = ctxt.user_regs.esi;
   17.18      if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) )
   17.19      {
   17.20          xcio_error(ioctxt, "Suspend record frame number is bad");
   17.21          goto out;
   17.22      }
   17.23 -    ctxt.cpu_ctxt.esi = mfn = pfn_to_mfn_table[pfn];
   17.24 +    ctxt.user_regs.esi = mfn = pfn_to_mfn_table[pfn];
   17.25      p_srec = xc_map_foreign_range(
   17.26          xc_handle, dom, PAGE_SIZE, PROT_WRITE, mfn);
   17.27      p_srec->resume_info.nr_pages    = nr_pfns;
   17.28 @@ -599,7 +599,7 @@ int xc_linux_restore(int xc_handle, XcIO
   17.29  
   17.30      /*
   17.31       * Safety checking of saved context:
   17.32 -     *  1. cpu_ctxt is fine, as Xen checks that on context switch.
   17.33 +     *  1. user_regs is fine, as Xen checks that on context switch.
   17.34       *  2. fpu_ctxt is fine, as it can't hurt Xen.
   17.35       *  3. trap_ctxt needs the code selectors checked.
   17.36       *  4. fast_trap_idx is checked by Xen.
    18.1 --- a/tools/libxc/xc_linux_save.c	Thu Apr 28 13:54:01 2005 +0000
    18.2 +++ b/tools/libxc/xc_linux_save.c	Fri Apr 29 07:34:47 2005 +0000
    18.3 @@ -325,7 +325,7 @@ static int analysis_phase( int xc_handle
    18.4  
    18.5  int suspend_and_state(int xc_handle, XcIOContext *ioctxt,		      
    18.6                        xc_domaininfo_t *info,
    18.7 -                      full_execution_context_t *ctxt)
    18.8 +                      vcpu_guest_context_t *ctxt)
    18.9  {
   18.10      int i=0;
   18.11      
   18.12 @@ -391,7 +391,7 @@ int xc_linux_save(int xc_handle, XcIOCon
   18.13      unsigned long shared_info_frame;
   18.14      
   18.15      /* A copy of the CPU context of the guest. */
   18.16 -    full_execution_context_t ctxt;
   18.17 +    vcpu_guest_context_t ctxt;
   18.18  
   18.19      /* A table containg the type of each PFN (/not/ MFN!). */
   18.20      unsigned long *pfn_type = NULL;
   18.21 @@ -922,7 +922,7 @@ int xc_linux_save(int xc_handle, XcIOCon
   18.22                            "SUSPEND flags %08u shinfo %08lx eip %08u "
   18.23                            "esi %08u\n",info.flags,
   18.24                            info.shared_info_frame,
   18.25 -                          ctxt.cpu_ctxt.eip, ctxt.cpu_ctxt.esi );
   18.26 +                          ctxt.user_regs.eip, ctxt.user_regs.esi );
   18.27              } 
   18.28  
   18.29              if ( xc_shadow_control( xc_handle, domid, 
   18.30 @@ -995,7 +995,7 @@ int xc_linux_save(int xc_handle, XcIOCon
   18.31         domid for this to succeed. */
   18.32      p_srec = xc_map_foreign_range(xc_handle, domid,
   18.33                                     sizeof(*p_srec), PROT_READ, 
   18.34 -                                   ctxt.cpu_ctxt.esi);
   18.35 +                                   ctxt.user_regs.esi);
   18.36      if (!p_srec){
   18.37          xcio_error(ioctxt, "Couldn't map suspend record");
   18.38          goto out;
   18.39 @@ -1009,7 +1009,7 @@ int xc_linux_save(int xc_handle, XcIOCon
   18.40      }
   18.41  
   18.42      /* Canonicalise the suspend-record frame number. */
   18.43 -    if ( !translate_mfn_to_pfn(&ctxt.cpu_ctxt.esi) ){
   18.44 +    if ( !translate_mfn_to_pfn(&ctxt.user_regs.esi) ){
   18.45          xcio_error(ioctxt, "Suspend record is not in range of pseudophys map");
   18.46          goto out;
   18.47      }
    19.1 --- a/tools/libxc/xc_plan9_build.c	Thu Apr 28 13:54:01 2005 +0000
    19.2 +++ b/tools/libxc/xc_plan9_build.c	Fri Apr 29 07:34:47 2005 +0000
    19.3 @@ -113,7 +113,7 @@ setup_guest(int xc_handle,
    19.4  	      unsigned long tot_pages,
    19.5  	      unsigned long *virt_startinfo_addr,
    19.6  	      unsigned long *virt_load_addr,
    19.7 -	      full_execution_context_t * ctxt,
    19.8 +	      vcpu_guest_context_t * ctxt,
    19.9  	      const char *cmdline,
   19.10  	      unsigned long shared_info_frame, 
   19.11  	      unsigned int control_evtchn,
   19.12 @@ -411,7 +411,7 @@ xc_plan9_build(int xc_handle,
   19.13  	int kernel_fd = -1;
   19.14  	gzFile kernel_gfd = NULL;
   19.15  	int rc, i;
   19.16 -	full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
   19.17 +	vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
   19.18  	unsigned long virt_startinfo_addr;
   19.19  
   19.20  	if ((tot_pages = xc_get_tot_pages(xc_handle, domid)) < 0) {
   19.21 @@ -482,23 +482,23 @@ xc_plan9_build(int xc_handle,
   19.22  	 *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
   19.23  	 *       EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
   19.24  	 */
   19.25 -	ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS;
   19.26 -	ctxt->cpu_ctxt.es = FLAT_KERNEL_DS;
   19.27 -	ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS;
   19.28 -	ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS;
   19.29 -	ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS;
   19.30 -	ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS;
   19.31 -	ctxt->cpu_ctxt.eip = load_addr;
   19.32 -	ctxt->cpu_ctxt.eip = 0x80100020;
   19.33 +	ctxt->user_regs.ds = FLAT_KERNEL_DS;
   19.34 +	ctxt->user_regs.es = FLAT_KERNEL_DS;
   19.35 +	ctxt->user_regs.fs = FLAT_KERNEL_DS;
   19.36 +	ctxt->user_regs.gs = FLAT_KERNEL_DS;
   19.37 +	ctxt->user_regs.ss = FLAT_KERNEL_DS;
   19.38 +	ctxt->user_regs.cs = FLAT_KERNEL_CS;
   19.39 +	ctxt->user_regs.eip = load_addr;
   19.40 +	ctxt->user_regs.eip = 0x80100020;
   19.41  	/* put stack at top of second page */
   19.42 -	ctxt->cpu_ctxt.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT);
   19.43 +	ctxt->user_regs.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT);
   19.44  
   19.45  	/* why is this set? */
   19.46 -	ctxt->cpu_ctxt.esi = ctxt->cpu_ctxt.esp;
   19.47 -	ctxt->cpu_ctxt.eflags = (1 << 9) | (1 << 2);
   19.48 +	ctxt->user_regs.esi = ctxt->user_regs.esp;
   19.49 +	ctxt->user_regs.eflags = (1 << 9) | (1 << 2);
   19.50  
   19.51  	/* FPU is set up to default initial state. */
   19.52 -	memset(ctxt->fpu_ctxt, 0, sizeof (ctxt->fpu_ctxt));
   19.53 +	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
   19.54  
   19.55  	/* Virtual IDT is empty at start-of-day. */
   19.56  	for (i = 0; i < 256; i++) {
   19.57 @@ -519,7 +519,7 @@ xc_plan9_build(int xc_handle,
   19.58  	/* Ring 1 stack is the initial stack. */
   19.59  	/* put stack at top of second page */
   19.60  	ctxt->kernel_ss = FLAT_KERNEL_DS;
   19.61 -	ctxt->kernel_esp = ctxt->cpu_ctxt.esp;
   19.62 +	ctxt->kernel_sp = ctxt->user_regs.esp;
   19.63  
   19.64  	/* No debugging. */
   19.65  	memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg));
    20.1 --- a/tools/libxc/xc_ptrace.c	Thu Apr 28 13:54:01 2005 +0000
    20.2 +++ b/tools/libxc/xc_ptrace.c	Fri Apr 29 07:34:47 2005 +0000
    20.3 @@ -132,7 +132,7 @@ static long			nr_pages = 0;
    20.4  unsigned long			*page_array = NULL;
    20.5  static int                      regs_valid[MAX_VIRT_CPUS];
    20.6  static unsigned long            cr3[MAX_VIRT_CPUS];
    20.7 -static full_execution_context_t ctxt[MAX_VIRT_CPUS];
    20.8 +static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
    20.9  
   20.10  /* --------------------- */
   20.11  
   20.12 @@ -220,7 +220,7 @@ waitdomain(int domain, int *status, int 
   20.13  {
   20.14      dom0_op_t op;
   20.15      int retval;
   20.16 -    full_execution_context_t ctxt;
   20.17 +    vcpu_guest_context_t ctxt;
   20.18      struct timespec ts;
   20.19      ts.tv_sec = 0;
   20.20      ts.tv_nsec = 10*1000*1000;
   20.21 @@ -300,7 +300,7 @@ xc_ptrace(enum __ptrace_request request,
   20.22  	FETCH_REGS(cpu);
   20.23  
   20.24  	if (request == PTRACE_GETREGS) {
   20.25 -		SET_PT_REGS(pt, ctxt[cpu].cpu_ctxt); 
   20.26 +		SET_PT_REGS(pt, ctxt[cpu].user_regs); 
   20.27  		memcpy(data, &pt, sizeof(elf_gregset_t));
   20.28  	} else if (request == PTRACE_GETFPREGS)
   20.29  	    memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
   20.30 @@ -309,7 +309,7 @@ xc_ptrace(enum __ptrace_request request,
   20.31  	break;
   20.32      case PTRACE_SETREGS:
   20.33  	op.cmd = DOM0_SETDOMAININFO;
   20.34 -	SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].cpu_ctxt);
   20.35 +	SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].user_regs);
   20.36  	op.u.setdomaininfo.domain = domid;
   20.37  	/* XXX need to understand multiple exec_domains */
   20.38  	op.u.setdomaininfo.exec_domain = cpu;
   20.39 @@ -339,7 +339,7 @@ xc_ptrace(enum __ptrace_request request,
   20.40  	retval = do_dom0_op(xc_handle, &op);
   20.41  	break;
   20.42      case PTRACE_SINGLESTEP:
   20.43 -	ctxt[VCPU].cpu_ctxt.eflags |= PSL_T;
   20.44 +	ctxt[VCPU].user_regs.eflags |= PSL_T;
   20.45  	op.cmd = DOM0_SETDOMAININFO;
   20.46  	op.u.setdomaininfo.domain = domid;
   20.47  	op.u.setdomaininfo.exec_domain = 0;
   20.48 @@ -355,8 +355,8 @@ xc_ptrace(enum __ptrace_request request,
   20.49  	if (request != PTRACE_SINGLESTEP) {
   20.50  	    FETCH_REGS(cpu);
   20.51  	    /* Clear trace flag */
   20.52 -	    if (ctxt[cpu].cpu_ctxt.eflags & PSL_T) {
   20.53 -		ctxt[cpu].cpu_ctxt.eflags &= ~PSL_T;
   20.54 +	    if (ctxt[cpu].user_regs.eflags & PSL_T) {
   20.55 +		ctxt[cpu].user_regs.eflags &= ~PSL_T;
   20.56  		op.cmd = DOM0_SETDOMAININFO;
   20.57  		op.u.setdomaininfo.domain = domid;
   20.58  		op.u.setdomaininfo.exec_domain = cpu;
    21.1 --- a/tools/libxc/xc_vmx_build.c	Thu Apr 28 13:54:01 2005 +0000
    21.2 +++ b/tools/libxc/xc_vmx_build.c	Fri Apr 29 07:34:47 2005 +0000
    21.3 @@ -149,7 +149,7 @@ static int setup_guest(int xc_handle,
    21.4                           char *image, unsigned long image_size,
    21.5                           gzFile initrd_gfd, unsigned long initrd_len,
    21.6                           unsigned long nr_pages,
    21.7 -                         full_execution_context_t *ctxt,
    21.8 +                         vcpu_guest_context_t *ctxt,
    21.9                           const char *cmdline,
   21.10                           unsigned long shared_info_frame,
   21.11                           unsigned int control_evtchn,
   21.12 @@ -422,22 +422,22 @@ static int setup_guest(int xc_handle,
   21.13      /*
   21.14       * Initial register values:
   21.15       */
   21.16 -    ctxt->cpu_ctxt.ds = 0x68;
   21.17 -    ctxt->cpu_ctxt.es = 0x0;
   21.18 -    ctxt->cpu_ctxt.fs = 0x0;
   21.19 -    ctxt->cpu_ctxt.gs = 0x0;
   21.20 -    ctxt->cpu_ctxt.ss = 0x68;
   21.21 -    ctxt->cpu_ctxt.cs = 0x60;
   21.22 -    ctxt->cpu_ctxt.eip = dsi.v_kernentry;
   21.23 -    ctxt->cpu_ctxt.edx = vboot_gdt_start;
   21.24 -    ctxt->cpu_ctxt.eax = 0x800;
   21.25 -    ctxt->cpu_ctxt.esp = vboot_gdt_end;
   21.26 -    ctxt->cpu_ctxt.ebx = 0;	/* startup_32 expects this to be 0 to signal boot cpu */
   21.27 -    ctxt->cpu_ctxt.ecx = mem_mapp->nr_map;
   21.28 -    ctxt->cpu_ctxt.esi = vboot_params_start;
   21.29 -    ctxt->cpu_ctxt.edi = vboot_params_start + 0x2d0;
   21.30 +    ctxt->user_regs.ds = 0x68;
   21.31 +    ctxt->user_regs.es = 0x0;
   21.32 +    ctxt->user_regs.fs = 0x0;
   21.33 +    ctxt->user_regs.gs = 0x0;
   21.34 +    ctxt->user_regs.ss = 0x68;
   21.35 +    ctxt->user_regs.cs = 0x60;
   21.36 +    ctxt->user_regs.eip = dsi.v_kernentry;
   21.37 +    ctxt->user_regs.edx = vboot_gdt_start;
   21.38 +    ctxt->user_regs.eax = 0x800;
   21.39 +    ctxt->user_regs.esp = vboot_gdt_end;
   21.40 +    ctxt->user_regs.ebx = 0;	/* startup_32 expects this to be 0 to signal boot cpu */
   21.41 +    ctxt->user_regs.ecx = mem_mapp->nr_map;
   21.42 +    ctxt->user_regs.esi = vboot_params_start;
   21.43 +    ctxt->user_regs.edi = vboot_params_start + 0x2d0;
   21.44  
   21.45 -    ctxt->cpu_ctxt.eflags = (1<<2);
   21.46 +    ctxt->user_regs.eflags = (1<<2);
   21.47  
   21.48      return 0;
   21.49  
   21.50 @@ -488,7 +488,7 @@ int xc_vmx_build(int xc_handle,
   21.51      int initrd_fd = -1;
   21.52      gzFile initrd_gfd = NULL;
   21.53      int rc, i;
   21.54 -    full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
   21.55 +    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
   21.56      unsigned long nr_pages;
   21.57      char         *image = NULL;
   21.58      unsigned long image_size, initrd_size=0;
   21.59 @@ -565,9 +565,9 @@ int xc_vmx_build(int xc_handle,
   21.60      if ( image != NULL )
   21.61          free(image);
   21.62  
   21.63 -    ctxt->flags = ECF_VMX_GUEST;
   21.64 +    ctxt->flags = VGCF_VMX_GUEST;
   21.65      /* FPU is set up to default initial state. */
   21.66 -    memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
   21.67 +    memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
   21.68  
   21.69      /* Virtual IDT is empty at start-of-day. */
   21.70      for ( i = 0; i < 256; i++ )
   21.71 @@ -588,8 +588,8 @@ int xc_vmx_build(int xc_handle,
   21.72  
   21.73      /* Ring 1 stack is the initial stack. */
   21.74  /*
   21.75 -    ctxt->kernel_ss  = FLAT_KERNEL_DS;
   21.76 -    ctxt->kernel_esp = vstartinfo_start;
   21.77 +    ctxt->kernel_ss = FLAT_KERNEL_DS;
   21.78 +    ctxt->kernel_sp = vstartinfo_start;
   21.79  */
   21.80      /* No debugging. */
   21.81      memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
    22.1 --- a/tools/xentrace/xenctx.c	Thu Apr 28 13:54:01 2005 +0000
    22.2 +++ b/tools/xentrace/xenctx.c	Fri Apr 29 07:34:47 2005 +0000
    22.3 @@ -24,27 +24,26 @@
    22.4  #include "xc.h"
    22.5  
    22.6  #ifdef __i386__
    22.7 -void
    22.8 -print_ctx(full_execution_context_t *ctx1)
    22.9 +void print_ctx(vcpu_guest_context_t *ctx1)
   22.10  {
   22.11 -    execution_context_t *ctx = &ctx1->cpu_ctxt;
   22.12 -
   22.13 -    printf("eip: %08lx\t", ctx->eip);
   22.14 -    printf("esp: %08lx\n", ctx->esp);
   22.15 +    struct cpu_user_regs *regs = &ctx1->user_regs;
   22.16  
   22.17 -    printf("eax: %08lx\t", ctx->eax);
   22.18 -    printf("ebx: %08lx\t", ctx->ebx);
   22.19 -    printf("ecx: %08lx\t", ctx->ecx);
   22.20 -    printf("edx: %08lx\n", ctx->edx);
   22.21 +    printf("eip: %08lx\t", regs->eip);
   22.22 +    printf("esp: %08lx\n", regs->esp);
   22.23  
   22.24 -    printf("esi: %08lx\t", ctx->esi);
   22.25 -    printf("edi: %08lx\t", ctx->edi);
   22.26 -    printf("ebp: %08lx\n", ctx->ebp);
   22.27 +    printf("eax: %08lx\t", regs->eax);
   22.28 +    printf("ebx: %08lx\t", regs->ebx);
   22.29 +    printf("ecx: %08lx\t", regs->ecx);
   22.30 +    printf("edx: %08lx\n", regs->edx);
   22.31  
   22.32 -    printf(" cs: %08lx\t", ctx->cs);
   22.33 -    printf(" ds: %08lx\t", ctx->ds);
   22.34 -    printf(" fs: %08lx\t", ctx->fs);
   22.35 -    printf(" gs: %08lx\n", ctx->gs);
   22.36 +    printf("esi: %08lx\t", regs->esi);
   22.37 +    printf("edi: %08lx\t", regs->edi);
   22.38 +    printf("ebp: %08lx\n", regs->ebp);
   22.39 +
   22.40 +    printf(" cs: %08lx\t", regs->cs);
   22.41 +    printf(" ds: %08lx\t", regs->ds);
   22.42 +    printf(" fs: %08lx\t", regs->fs);
   22.43 +    printf(" gs: %08lx\n", regs->gs);
   22.44  
   22.45  }
   22.46  #endif
   22.47 @@ -53,7 +52,7 @@ void dump_ctx(u32 domid, u32 vcpu)
   22.48  {
   22.49      int ret;
   22.50      xc_domaininfo_t info;
   22.51 -    full_execution_context_t ctx;
   22.52 +    vcpu_guest_context_t ctx;
   22.53  
   22.54      int xc_handle = xc_interface_open(); /* for accessing control interface */
   22.55  
    23.1 --- a/xen/arch/ia64/dom0_ops.c	Thu Apr 28 13:54:01 2005 +0000
    23.2 +++ b/xen/arch/ia64/dom0_ops.c	Fri Apr 29 07:34:47 2005 +0000
    23.3 @@ -47,7 +47,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    23.4      return ret;
    23.5  }
    23.6  
    23.7 -void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c)
    23.8 +void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c)
    23.9  { 
   23.10      int i;
   23.11  
    24.1 --- a/xen/arch/ia64/domain.c	Thu Apr 28 13:54:01 2005 +0000
    24.2 +++ b/xen/arch/ia64/domain.c	Fri Apr 29 07:34:47 2005 +0000
    24.3 @@ -199,13 +199,13 @@ void arch_do_boot_vcpu(struct exec_domai
    24.4  	return;
    24.5  }
    24.6  
    24.7 -int arch_set_info_guest(struct exec_domain *p, full_execution_context_t *c)
    24.8 +int arch_set_info_guest(struct exec_domain *p, struct vcpu_guest_context *c)
    24.9  {
   24.10  	dummy();
   24.11  	return 1;
   24.12  }
   24.13  
   24.14 -int arch_final_setup_guest(struct exec_domain *p, full_execution_context_t *c)
   24.15 +int arch_final_setup_guest(struct exec_domain *p, struct vcpu_guest_context *c)
   24.16  {
   24.17  	dummy();
   24.18  	return 1;
    25.1 --- a/xen/arch/ia64/xenmisc.c	Thu Apr 28 13:54:01 2005 +0000
    25.2 +++ b/xen/arch/ia64/xenmisc.c	Fri Apr 29 07:34:47 2005 +0000
    25.3 @@ -66,7 +66,7 @@ void grant_table_destroy(struct domain *
    25.4  	return;
    25.5  }
    25.6  
    25.7 -struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); }
    25.8 +struct pt_regs *get_cpu_user_regs(void) { return ia64_task_regs(current); }
    25.9  
   25.10  void raise_actimer_softirq(void)
   25.11  {
   25.12 @@ -278,6 +278,11 @@ if (!i--) { printk("+",id); cnt[id] = 10
   25.13  	if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
   25.14  }
   25.15  
   25.16 +void continue_running(struct exec_domain *same)
   25.17 +{
   25.18 +    /* nothing to do */
   25.19 +}
   25.20 +
   25.21  void panic_domain(struct pt_regs *regs, const char *fmt, ...)
   25.22  {
   25.23  	va_list args;
    26.1 --- a/xen/arch/x86/apic.c	Thu Apr 28 13:54:01 2005 +0000
    26.2 +++ b/xen/arch/x86/apic.c	Fri Apr 29 07:34:47 2005 +0000
    26.3 @@ -825,7 +825,7 @@ int reprogram_ac_timer(s_time_t timeout)
    26.4      return 1;
    26.5  }
    26.6  
    26.7 -void smp_apic_timer_interrupt(struct xen_regs * regs)
    26.8 +void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
    26.9  {
   26.10      ack_APIC_irq();
   26.11      perfc_incrc(apic_timer);
   26.12 @@ -835,7 +835,7 @@ void smp_apic_timer_interrupt(struct xen
   26.13  /*
   26.14   * This interrupt should _never_ happen with our APIC/SMP architecture
   26.15   */
   26.16 -asmlinkage void smp_spurious_interrupt(struct xen_regs *regs)
   26.17 +asmlinkage void smp_spurious_interrupt(struct cpu_user_regs *regs)
   26.18  {
   26.19      unsigned long v;
   26.20  
   26.21 @@ -857,7 +857,7 @@ asmlinkage void smp_spurious_interrupt(s
   26.22   * This interrupt should never happen with our APIC/SMP architecture
   26.23   */
   26.24  
   26.25 -asmlinkage void smp_error_interrupt(struct xen_regs *regs)
   26.26 +asmlinkage void smp_error_interrupt(struct cpu_user_regs *regs)
   26.27  {
   26.28      unsigned long v, v1;
   26.29  
    27.1 --- a/xen/arch/x86/boot/x86_32.S	Thu Apr 28 13:54:01 2005 +0000
    27.2 +++ b/xen/arch/x86/boot/x86_32.S	Fri Apr 29 07:34:47 2005 +0000
    27.3 @@ -24,10 +24,10 @@ bad_cpu_msg:
    27.4  not_multiboot_msg:
    27.5          .asciz "ERR: Not a Multiboot bootloader!"
    27.6  bad_cpu:
    27.7 -        mov     $SYMBOL_NAME(bad_cpu_msg)-__PAGE_OFFSET,%esi
    27.8 +        mov     $bad_cpu_msg-__PAGE_OFFSET,%esi
    27.9          jmp     print_err
   27.10  not_multiboot:
   27.11 -        mov     $SYMBOL_NAME(not_multiboot_msg)-__PAGE_OFFSET,%esi
   27.12 +        mov     $not_multiboot_msg-__PAGE_OFFSET,%esi
   27.13  print_err:
   27.14          mov     $0xB8000,%edi  # VGA framebuffer
   27.15  1:      mov     (%esi),%bl
   27.16 @@ -118,7 +118,7 @@ 1:      stosl   /* low mappings cover as
   27.17          mov     $(__HYPERVISOR_CS << 16),%eax
   27.18          mov     %dx,%ax            /* selector = 0x0010 = cs */
   27.19          mov     $0x8E00,%dx        /* interrupt gate - dpl=0, present */
   27.20 -        lea     SYMBOL_NAME(idt_table)-__PAGE_OFFSET,%edi
   27.21 +        lea     idt_table-__PAGE_OFFSET,%edi
   27.22          mov     $256,%ecx
   27.23  1:      mov     %eax,(%edi)
   27.24          mov     %edx,4(%edi)
   27.25 @@ -163,38 +163,38 @@ ignore_int:
   27.26          mov     %eax,%ds
   27.27          mov     %eax,%es
   27.28          pushl   $int_msg
   27.29 -        call    SYMBOL_NAME(printf)
   27.30 +        call    printf
   27.31  1:      jmp     1b
   27.32  
   27.33  /*** STACK LOCATION ***/
   27.34          
   27.35  ENTRY(stack_start)
   27.36 -        .long SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200 - __PAGE_OFFSET
   27.37 +        .long cpu0_stack + STACK_SIZE - 200 - __PAGE_OFFSET
   27.38          .long __HYPERVISOR_DS
   27.39          
   27.40  /*** DESCRIPTOR TABLES ***/
   27.41  
   27.42 -.globl SYMBOL_NAME(idt)
   27.43 -.globl SYMBOL_NAME(gdt)        
   27.44 +.globl idt
   27.45 +.globl gdt        
   27.46  
   27.47          ALIGN
   27.48          
   27.49          .word   0    
   27.50  idt_descr:
   27.51  	.word	256*8-1
   27.52 -SYMBOL_NAME(idt):
   27.53 -        .long	SYMBOL_NAME(idt_table)
   27.54 +idt:
   27.55 +        .long	idt_table
   27.56  
   27.57          .word   0
   27.58  gdt_descr:
   27.59  	.word	(LAST_RESERVED_GDT_ENTRY*8)+7
   27.60 -SYMBOL_NAME(gdt):       
   27.61 -        .long   SYMBOL_NAME(gdt_table)	/* gdt base */
   27.62 +gdt:       
   27.63 +        .long   gdt_table	/* gdt base */
   27.64  
   27.65          .word   0
   27.66  nopaging_gdt_descr:
   27.67          .word   (LAST_RESERVED_GDT_ENTRY*8)+7
   27.68 -        .long   SYMBOL_NAME(gdt_table)-__PAGE_OFFSET
   27.69 +        .long   gdt_table-__PAGE_OFFSET
   27.70          
   27.71          ALIGN
   27.72  /* NB. Rings != 0 get access up to 0xFC400000. This allows access to the */
    28.1 --- a/xen/arch/x86/boot/x86_64.S	Thu Apr 28 13:54:01 2005 +0000
    28.2 +++ b/xen/arch/x86/boot/x86_64.S	Fri Apr 29 07:34:47 2005 +0000
    28.3 @@ -7,10 +7,10 @@
    28.4                  
    28.5          .text
    28.6          .code32
    28.7 -        
    28.8 +
    28.9  ENTRY(start)
   28.10          jmp __start
   28.11 -        
   28.12 +
   28.13          .org    0x004
   28.14  /*** MULTIBOOT HEADER ****/
   28.15          /* Magic number indicating a Multiboot header. */
   28.16 @@ -180,8 +180,8 @@ 1:      movq    %rax,(%rdi)
   28.17                          
   28.18  /*** DESCRIPTOR TABLES ***/
   28.19  
   28.20 -.globl SYMBOL_NAME(idt)
   28.21 -.globl SYMBOL_NAME(gdt)        
   28.22 +.globl idt
   28.23 +.globl gdt        
   28.24  
   28.25          .org    0x1f0
   28.26          .word   (LAST_RESERVED_GDT_ENTRY*8)+7
   28.27 @@ -203,17 +203,17 @@ ENTRY(gdt_table)
   28.28          .word   0
   28.29  gdt_descr:
   28.30          .word   (LAST_RESERVED_GDT_ENTRY*8)+7
   28.31 -SYMBOL_NAME(gdt):       
   28.32 -        .quad   SYMBOL_NAME(gdt_table)
   28.33 +gdt:       
   28.34 +        .quad   gdt_table
   28.35  
   28.36          .word   0    
   28.37  idt_descr:
   28.38          .word   256*16-1
   28.39 -SYMBOL_NAME(idt):
   28.40 -        .quad   SYMBOL_NAME(idt_table)
   28.41 +idt:
   28.42 +        .quad   idt_table
   28.43  
   28.44  ENTRY(stack_start)
   28.45 -        .quad   SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200
   28.46 +        .quad   cpu0_stack + STACK_SIZE - 200
   28.47  
   28.48  high_start:
   28.49          .quad   __high_start
   28.50 @@ -258,6 +258,6 @@ int_msg:
   28.51  ignore_int:
   28.52          cld
   28.53          leaq    int_msg(%rip),%rdi
   28.54 -        call    SYMBOL_NAME(printf)
   28.55 +        call    printf
   28.56  1:      jmp     1b
   28.57  
    29.1 --- a/xen/arch/x86/cdb.c	Thu Apr 28 13:54:01 2005 +0000
    29.2 +++ b/xen/arch/x86/cdb.c	Fri Apr 29 07:34:47 2005 +0000
    29.3 @@ -214,7 +214,7 @@ xendbg_send_reply(const char *buf, struc
    29.4  }
    29.5  
    29.6  static int
    29.7 -handle_register_read_command(struct xen_regs *regs, struct xendbg_context *ctx)
    29.8 +handle_register_read_command(struct cpu_user_regs *regs, struct xendbg_context *ctx)
    29.9  {
   29.10  	char buf[121];
   29.11  
   29.12 @@ -240,7 +240,7 @@ handle_register_read_command(struct xen_
   29.13  }
   29.14  
   29.15  static int
   29.16 -process_command(char *received_packet, struct xen_regs *regs,
   29.17 +process_command(char *received_packet, struct cpu_user_regs *regs,
   29.18  		struct xendbg_context *ctx)
   29.19  {
   29.20  	char *ptr;
   29.21 @@ -318,7 +318,7 @@ xdb_ctx = {
   29.22  };
   29.23  
   29.24  int
   29.25 -__trap_to_cdb(struct xen_regs *regs)
   29.26 +__trap_to_cdb(struct cpu_user_regs *regs)
   29.27  {
   29.28  	int resume = 0;
   29.29  	int r;
    30.1 --- a/xen/arch/x86/dom0_ops.c	Thu Apr 28 13:54:01 2005 +0000
    30.2 +++ b/xen/arch/x86/dom0_ops.c	Fri Apr 29 07:34:47 2005 +0000
    30.3 @@ -374,54 +374,44 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    30.4  }
    30.5  
    30.6  void arch_getdomaininfo_ctxt(
    30.7 -    struct exec_domain *ed, full_execution_context_t *c)
    30.8 +    struct exec_domain *ed, struct vcpu_guest_context *c)
    30.9  { 
   30.10      int i;
   30.11  #ifdef __i386__  /* Remove when x86_64 VMX is implemented */
   30.12  #ifdef CONFIG_VMX
   30.13 -    extern void save_vmx_execution_context(execution_context_t *);
   30.14 +    extern void save_vmx_cpu_user_regs(struct cpu_user_regs *);
   30.15  #endif
   30.16  #endif
   30.17  
   30.18 -    c->flags = 0;
   30.19 -    memcpy(&c->cpu_ctxt, 
   30.20 -           &ed->arch.user_ctxt,
   30.21 -           sizeof(ed->arch.user_ctxt));
   30.22 +    memcpy(c, &ed->arch.guest_context, sizeof(*c));
   30.23 +
   30.24      /* IOPL privileges are virtualised -- merge back into returned eflags. */
   30.25 -    BUG_ON((c->cpu_ctxt.eflags & EF_IOPL) != 0);
   30.26 -    c->cpu_ctxt.eflags |= ed->arch.iopl << 12;
   30.27 +    BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
   30.28 +    c->user_regs.eflags |= ed->arch.iopl << 12;
   30.29  
   30.30  #ifdef __i386__
   30.31  #ifdef CONFIG_VMX
   30.32      if ( VMX_DOMAIN(ed) )
   30.33 -        save_vmx_execution_context(&c->cpu_ctxt);
   30.34 +        save_vmx_cpu_user_regs(&c->user_regs);
   30.35  #endif
   30.36  #endif
   30.37  
   30.38 +    c->flags = 0;
   30.39      if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
   30.40 -        c->flags |= ECF_I387_VALID;
   30.41 -    if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) )
   30.42 -        c->flags |= ECF_IN_KERNEL;
   30.43 +        c->flags |= VGCF_I387_VALID;
   30.44 +    if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) )
   30.45 +        c->flags |= VGCF_IN_KERNEL;
   30.46  #ifdef CONFIG_VMX
   30.47      if (VMX_DOMAIN(ed))
   30.48 -        c->flags |= ECF_VMX_GUEST;
   30.49 +        c->flags |= VGCF_VMX_GUEST;
   30.50  #endif
   30.51 -    memcpy(&c->fpu_ctxt,
   30.52 -           &ed->arch.i387,
   30.53 -           sizeof(ed->arch.i387));
   30.54 -    memcpy(&c->trap_ctxt,
   30.55 -           ed->arch.traps,
   30.56 -           sizeof(ed->arch.traps));
   30.57 +
   30.58  #ifdef ARCH_HAS_FAST_TRAP
   30.59      if ( (ed->arch.fast_trap_desc.a == 0) &&
   30.60           (ed->arch.fast_trap_desc.b == 0) )
   30.61          c->fast_trap_idx = 0;
   30.62 -    else
   30.63 -        c->fast_trap_idx = 
   30.64 -            ed->arch.fast_trap_idx;
   30.65  #endif
   30.66 -    c->ldt_base = ed->arch.ldt_base;
   30.67 -    c->ldt_ents = ed->arch.ldt_ents;
   30.68 +
   30.69      c->gdt_ents = 0;
   30.70      if ( GET_GDT_ADDRESS(ed) == GDT_VIRT_START(ed) )
   30.71      {
   30.72 @@ -430,22 +420,8 @@ void arch_getdomaininfo_ctxt(
   30.73                  l1e_get_pfn(ed->arch.perdomain_ptes[i]);
   30.74          c->gdt_ents = GET_GDT_ENTRIES(ed);
   30.75      }
   30.76 -    c->kernel_ss  = ed->arch.kernel_ss;
   30.77 -    c->kernel_esp = ed->arch.kernel_sp;
   30.78 -    c->pt_base   = 
   30.79 -        pagetable_val(ed->arch.guest_table);
   30.80 -    memcpy(c->debugreg, 
   30.81 -           ed->arch.debugreg, 
   30.82 -           sizeof(ed->arch.debugreg));
   30.83 -#if defined(__i386__)
   30.84 -    c->event_callback_cs     = ed->arch.event_selector;
   30.85 -    c->event_callback_eip    = ed->arch.event_address;
   30.86 -    c->failsafe_callback_cs  = ed->arch.failsafe_selector;
   30.87 -    c->failsafe_callback_eip = ed->arch.failsafe_address;
   30.88 -#elif defined(__x86_64__)
   30.89 -    c->event_callback_eip    = ed->arch.event_address;
   30.90 -    c->failsafe_callback_eip = ed->arch.failsafe_address;
   30.91 -    c->syscall_callback_eip  = ed->arch.syscall_address;
   30.92 -#endif
   30.93 +
   30.94 +    c->pt_base = pagetable_val(ed->arch.guest_table);
   30.95 +
   30.96      c->vm_assist = ed->domain->vm_assist;
   30.97  }
    31.1 --- a/xen/arch/x86/domain.c	Thu Apr 28 13:54:01 2005 +0000
    31.2 +++ b/xen/arch/x86/domain.c	Fri Apr 29 07:34:47 2005 +0000
    31.3 @@ -50,6 +50,16 @@ struct percpu_ctxt {
    31.4  } __cacheline_aligned;
    31.5  static struct percpu_ctxt percpu_ctxt[NR_CPUS];
    31.6  
    31.7 +static void continue_idle_task(struct exec_domain *ed)
    31.8 +{
    31.9 +    reset_stack_and_jump(idle_loop);
   31.10 +}
   31.11 +
   31.12 +static void continue_nonidle_task(struct exec_domain *ed)
   31.13 +{
   31.14 +    reset_stack_and_jump(ret_from_intr);
   31.15 +}
   31.16 +
   31.17  static void default_idle(void)
   31.18  {
   31.19      local_irq_disable();
   31.20 @@ -74,24 +84,32 @@ static __attribute_used__ void idle_loop
   31.21      }
   31.22  }
   31.23  
   31.24 +static void __startup_cpu_idle_loop(struct exec_domain *ed)
   31.25 +{
   31.26 +    /* Signal to boot CPU that we are done. */
   31.27 +    init_idle();
   31.28 +
   31.29 +    /* Start normal idle loop. */
   31.30 +    ed->arch.schedule_tail = continue_idle_task;
   31.31 +    reset_stack_and_jump(idle_loop);
   31.32 +}
   31.33 +
   31.34  void startup_cpu_idle_loop(void)
   31.35  {
   31.36 +    struct exec_domain *ed = current;
   31.37 +
   31.38      /* Just some sanity to ensure that the scheduler is set up okay. */
   31.39 -    ASSERT(current->domain->id == IDLE_DOMAIN_ID);
   31.40 -    percpu_ctxt[smp_processor_id()].curr_ed = current;
   31.41 -    set_bit(smp_processor_id(), &current->domain->cpuset);
   31.42 -    domain_unpause_by_systemcontroller(current->domain);
   31.43 +    ASSERT(ed->domain->id == IDLE_DOMAIN_ID);
   31.44 +    percpu_ctxt[smp_processor_id()].curr_ed = ed;
   31.45 +    set_bit(smp_processor_id(), &ed->domain->cpuset);
   31.46 +    domain_unpause_by_systemcontroller(ed->domain);
   31.47 +
   31.48 +    ed->arch.schedule_tail = __startup_cpu_idle_loop;
   31.49      raise_softirq(SCHEDULE_SOFTIRQ);
   31.50      do_softirq();
   31.51  
   31.52 -    /*
   31.53 -     * Declares CPU setup done to the boot processor.
   31.54 -     * Therefore memory barrier to ensure state is visible.
   31.55 -     */
   31.56 -    smp_mb();
   31.57 -    init_idle();
   31.58 -
   31.59 -    idle_loop();
   31.60 +    /* End up in __startup_cpu_idle_loop, not here. */
   31.61 +    BUG();
   31.62  }
   31.63  
   31.64  static long no_idt[2];
   31.65 @@ -219,16 +237,6 @@ void free_perdomain_pt(struct domain *d)
   31.66  #endif
   31.67  }
   31.68  
   31.69 -static void continue_idle_task(struct exec_domain *ed)
   31.70 -{
   31.71 -    reset_stack_and_jump(idle_loop);
   31.72 -}
   31.73 -
   31.74 -static void continue_nonidle_task(struct exec_domain *ed)
   31.75 -{
   31.76 -    reset_stack_and_jump(ret_from_intr);
   31.77 -}
   31.78 -
   31.79  void arch_do_createdomain(struct exec_domain *ed)
   31.80  {
   31.81      struct domain *d = ed->domain;
   31.82 @@ -237,11 +245,7 @@ void arch_do_createdomain(struct exec_do
   31.83  
   31.84      ed->arch.flags = TF_kernel_mode;
   31.85  
   31.86 -    if ( d->id == IDLE_DOMAIN_ID )
   31.87 -    {
   31.88 -        ed->arch.schedule_tail = continue_idle_task;
   31.89 -    }
   31.90 -    else
   31.91 +    if ( d->id != IDLE_DOMAIN_ID )
   31.92      {
   31.93          ed->arch.schedule_tail = continue_nonidle_task;
   31.94  
   31.95 @@ -312,14 +316,14 @@ void arch_vmx_do_launch(struct exec_doma
   31.96      reset_stack_and_jump(vmx_asm_do_launch);
   31.97  }
   31.98  
   31.99 -static int vmx_final_setup_guest(struct exec_domain *ed,
  31.100 -                                   full_execution_context_t *full_context)
  31.101 +static int vmx_final_setup_guest(
  31.102 +    struct exec_domain *ed, struct vcpu_guest_context *ctxt)
  31.103  {
  31.104      int error;
  31.105 -    execution_context_t *context;
  31.106 +    struct cpu_user_regs *regs;
  31.107      struct vmcs_struct *vmcs;
  31.108  
  31.109 -    context = &full_context->cpu_ctxt;
  31.110 +    regs = &ctxt->user_regs;
  31.111  
  31.112      /*
  31.113       * Create a new VMCS
  31.114 @@ -333,7 +337,7 @@ static int vmx_final_setup_guest(struct 
  31.115  
  31.116      ed->arch.arch_vmx.vmcs = vmcs;
  31.117      error = construct_vmcs(
  31.118 -        &ed->arch.arch_vmx, context, full_context, VMCS_USE_HOST_ENV);
  31.119 +        &ed->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV);
  31.120      if ( error < 0 )
  31.121      {
  31.122          printk("Failed to construct a new VMCS\n");
  31.123 @@ -345,7 +349,7 @@ static int vmx_final_setup_guest(struct 
  31.124  
  31.125  #if defined (__i386)
  31.126      ed->arch.arch_vmx.vmx_platform.real_mode_data = 
  31.127 -        (unsigned long *) context->esi;
  31.128 +        (unsigned long *) regs->esi;
  31.129  #endif
  31.130  
  31.131      if (ed == ed->domain->exec_domain[0]) {
  31.132 @@ -374,7 +378,7 @@ out:
  31.133  
  31.134  /* This is called by arch_final_setup_guest and do_boot_vcpu */
  31.135  int arch_set_info_guest(
  31.136 -    struct exec_domain *ed, full_execution_context_t *c)
  31.137 +    struct exec_domain *ed, struct vcpu_guest_context *c)
  31.138  {
  31.139      struct domain *d = ed->domain;
  31.140      unsigned long phys_basetab;
  31.141 @@ -385,65 +389,42 @@ int arch_set_info_guest(
  31.142       * #GP. If DS, ES, FS, GS are DPL 0 then they'll be cleared automatically.
  31.143       * If SS RPL or DPL differs from CS RPL then we'll #GP.
  31.144       */
  31.145 -    if (!(c->flags & ECF_VMX_GUEST)) 
  31.146 -        if ( ((c->cpu_ctxt.cs & 3) == 0) ||
  31.147 -             ((c->cpu_ctxt.ss & 3) == 0) )
  31.148 +    if ( !(c->flags & VGCF_VMX_GUEST) )
  31.149 +    {
  31.150 +        if ( ((c->user_regs.cs & 3) == 0) ||
  31.151 +             ((c->user_regs.ss & 3) == 0) )
  31.152                  return -EINVAL;
  31.153 +    }
  31.154  
  31.155      clear_bit(EDF_DONEFPUINIT, &ed->ed_flags);
  31.156 -    if ( c->flags & ECF_I387_VALID )
  31.157 +    if ( c->flags & VGCF_I387_VALID )
  31.158          set_bit(EDF_DONEFPUINIT, &ed->ed_flags);
  31.159  
  31.160      ed->arch.flags &= ~TF_kernel_mode;
  31.161 -    if ( c->flags & ECF_IN_KERNEL )
  31.162 +    if ( c->flags & VGCF_IN_KERNEL )
  31.163          ed->arch.flags |= TF_kernel_mode;
  31.164  
  31.165 -    memcpy(&ed->arch.user_ctxt,
  31.166 -           &c->cpu_ctxt,
  31.167 -           sizeof(ed->arch.user_ctxt));
  31.168 -
  31.169 -    memcpy(&ed->arch.i387,
  31.170 -           &c->fpu_ctxt,
  31.171 -           sizeof(ed->arch.i387));
  31.172 +    memcpy(&ed->arch.guest_context, c, sizeof(*c));
  31.173  
  31.174      /* IOPL privileges are virtualised. */
  31.175 -    ed->arch.iopl = (ed->arch.user_ctxt.eflags >> 12) & 3;
  31.176 -    ed->arch.user_ctxt.eflags &= ~EF_IOPL;
  31.177 +    ed->arch.iopl = (ed->arch.guest_context.user_regs.eflags >> 12) & 3;
  31.178 +    ed->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
  31.179  
  31.180      /* Clear IOPL for unprivileged domains. */
  31.181 -    if (!IS_PRIV(d))
  31.182 -        ed->arch.user_ctxt.eflags &= 0xffffcfff;
  31.183 +    if ( !IS_PRIV(d) )
  31.184 +        ed->arch.guest_context.user_regs.eflags &= 0xffffcfff;
  31.185  
  31.186 -    if (test_bit(EDF_DONEINIT, &ed->ed_flags))
  31.187 +    if ( test_bit(EDF_DONEINIT, &ed->ed_flags) )
  31.188          return 0;
  31.189  
  31.190 -    memcpy(ed->arch.traps,
  31.191 -           &c->trap_ctxt,
  31.192 -           sizeof(ed->arch.traps));
  31.193 -
  31.194      if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 )
  31.195          return rc;
  31.196  
  31.197 -    ed->arch.ldt_base = c->ldt_base;
  31.198 -    ed->arch.ldt_ents = c->ldt_ents;
  31.199 -
  31.200 -    ed->arch.kernel_ss = c->kernel_ss;
  31.201 -    ed->arch.kernel_sp = c->kernel_esp;
  31.202 -
  31.203 +    memset(ed->arch.guest_context.debugreg, 0,
  31.204 +           sizeof(ed->arch.guest_context.debugreg));
  31.205      for ( i = 0; i < 8; i++ )
  31.206          (void)set_debugreg(ed, i, c->debugreg[i]);
  31.207  
  31.208 -#if defined(__i386__)
  31.209 -    ed->arch.event_selector    = c->event_callback_cs;
  31.210 -    ed->arch.event_address     = c->event_callback_eip;
  31.211 -    ed->arch.failsafe_selector = c->failsafe_callback_cs;
  31.212 -    ed->arch.failsafe_address  = c->failsafe_callback_eip;
  31.213 -#elif defined(__x86_64__)
  31.214 -    ed->arch.event_address     = c->event_callback_eip;
  31.215 -    ed->arch.failsafe_address  = c->failsafe_callback_eip;
  31.216 -    ed->arch.syscall_address   = c->syscall_callback_eip;
  31.217 -#endif
  31.218 -
  31.219      if ( ed->eid == 0 )
  31.220          d->vm_assist = c->vm_assist;
  31.221  
  31.222 @@ -475,7 +456,7 @@ int arch_set_info_guest(
  31.223      }
  31.224  
  31.225  #ifdef CONFIG_VMX
  31.226 -    if ( c->flags & ECF_VMX_GUEST )
  31.227 +    if ( c->flags & VGCF_VMX_GUEST )
  31.228      {
  31.229          int error;
  31.230  
  31.231 @@ -507,7 +488,7 @@ void new_thread(struct exec_domain *d,
  31.232                  unsigned long start_stack,
  31.233                  unsigned long start_info)
  31.234  {
  31.235 -    execution_context_t *ec = &d->arch.user_ctxt;
  31.236 +    struct cpu_user_regs *regs = &d->arch.guest_context.user_regs;
  31.237  
  31.238      /*
  31.239       * Initial register values:
  31.240 @@ -517,15 +498,15 @@ void new_thread(struct exec_domain *d,
  31.241       *          ESI = start_info
  31.242       *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
  31.243       */
  31.244 -    ec->ds = ec->es = ec->fs = ec->gs = FLAT_KERNEL_DS;
  31.245 -    ec->ss = FLAT_KERNEL_SS;
  31.246 -    ec->cs = FLAT_KERNEL_CS;
  31.247 -    ec->eip = start_pc;
  31.248 -    ec->esp = start_stack;
  31.249 -    ec->esi = start_info;
  31.250 +    regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS;
  31.251 +    regs->ss = FLAT_KERNEL_SS;
  31.252 +    regs->cs = FLAT_KERNEL_CS;
  31.253 +    regs->eip = start_pc;
  31.254 +    regs->esp = start_stack;
  31.255 +    regs->esi = start_info;
  31.256  
  31.257 -    __save_flags(ec->eflags);
  31.258 -    ec->eflags |= X86_EFLAGS_IF;
  31.259 +    __save_flags(regs->eflags);
  31.260 +    regs->eflags |= X86_EFLAGS_IF;
  31.261  }
  31.262  
  31.263  
  31.264 @@ -557,63 +538,63 @@ void toggle_guest_mode(struct exec_domai
  31.265  
  31.266  static void load_segments(struct exec_domain *p, struct exec_domain *n)
  31.267  {
  31.268 +    struct vcpu_guest_context *pctxt = &p->arch.guest_context;
  31.269 +    struct vcpu_guest_context *nctxt = &n->arch.guest_context;
  31.270      int all_segs_okay = 1;
  31.271  
  31.272      /* Either selector != 0 ==> reload. */
  31.273 -    if ( unlikely(p->arch.user_ctxt.ds |
  31.274 -                  n->arch.user_ctxt.ds) )
  31.275 -        all_segs_okay &= loadsegment(ds, n->arch.user_ctxt.ds);
  31.276 +    if ( unlikely(pctxt->user_regs.ds | nctxt->user_regs.ds) )
  31.277 +        all_segs_okay &= loadsegment(ds, nctxt->user_regs.ds);
  31.278  
  31.279      /* Either selector != 0 ==> reload. */
  31.280 -    if ( unlikely(p->arch.user_ctxt.es |
  31.281 -                  n->arch.user_ctxt.es) )
  31.282 -        all_segs_okay &= loadsegment(es, n->arch.user_ctxt.es);
  31.283 +    if ( unlikely(pctxt->user_regs.es | nctxt->user_regs.es) )
  31.284 +        all_segs_okay &= loadsegment(es, nctxt->user_regs.es);
  31.285  
  31.286      /*
  31.287       * Either selector != 0 ==> reload.
  31.288       * Also reload to reset FS_BASE if it was non-zero.
  31.289       */
  31.290 -    if ( unlikely(p->arch.user_ctxt.fs |
  31.291 -                  p->arch.user_ctxt.fs_base |
  31.292 -                  n->arch.user_ctxt.fs) )
  31.293 +    if ( unlikely(pctxt->user_regs.fs |
  31.294 +                  pctxt->fs_base |
  31.295 +                  nctxt->user_regs.fs) )
  31.296      {
  31.297 -        all_segs_okay &= loadsegment(fs, n->arch.user_ctxt.fs);
  31.298 -        if ( p->arch.user_ctxt.fs ) /* != 0 selector kills fs_base */
  31.299 -            p->arch.user_ctxt.fs_base = 0;
  31.300 +        all_segs_okay &= loadsegment(fs, nctxt->user_regs.fs);
  31.301 +        if ( pctxt->user_regs.fs ) /* != 0 selector kills fs_base */
  31.302 +            pctxt->fs_base = 0;
  31.303      }
  31.304  
  31.305      /*
  31.306       * Either selector != 0 ==> reload.
  31.307       * Also reload to reset GS_BASE if it was non-zero.
  31.308       */
  31.309 -    if ( unlikely(p->arch.user_ctxt.gs |
  31.310 -                  p->arch.user_ctxt.gs_base_user |
  31.311 -                  n->arch.user_ctxt.gs) )
  31.312 +    if ( unlikely(pctxt->user_regs.gs |
  31.313 +                  pctxt->gs_base_user |
  31.314 +                  nctxt->user_regs.gs) )
  31.315      {
  31.316          /* Reset GS_BASE with user %gs? */
  31.317 -        if ( p->arch.user_ctxt.gs || !n->arch.user_ctxt.gs_base_user )
  31.318 -            all_segs_okay &= loadsegment(gs, n->arch.user_ctxt.gs);
  31.319 -        if ( p->arch.user_ctxt.gs ) /* != 0 selector kills gs_base_user */
  31.320 -            p->arch.user_ctxt.gs_base_user = 0;
  31.321 +        if ( pctxt->user_regs.gs || !nctxt->gs_base_user )
  31.322 +            all_segs_okay &= loadsegment(gs, nctxt->user_regs.gs);
  31.323 +        if ( pctxt->user_regs.gs ) /* != 0 selector kills gs_base_user */
  31.324 +            pctxt->gs_base_user = 0;
  31.325      }
  31.326  
  31.327      /* This can only be non-zero if selector is NULL. */
  31.328 -    if ( n->arch.user_ctxt.fs_base )
  31.329 +    if ( nctxt->fs_base )
  31.330          wrmsr(MSR_FS_BASE,
  31.331 -              n->arch.user_ctxt.fs_base,
  31.332 -              n->arch.user_ctxt.fs_base>>32);
  31.333 +              nctxt->fs_base,
  31.334 +              nctxt->fs_base>>32);
  31.335  
  31.336      /* Most kernels have non-zero GS base, so don't bother testing. */
  31.337      /* (This is also a serialising instruction, avoiding AMD erratum #88.) */
  31.338      wrmsr(MSR_SHADOW_GS_BASE,
  31.339 -          n->arch.user_ctxt.gs_base_kernel,
  31.340 -          n->arch.user_ctxt.gs_base_kernel>>32);
  31.341 +          nctxt->gs_base_kernel,
  31.342 +          nctxt->gs_base_kernel>>32);
  31.343  
  31.344      /* This can only be non-zero if selector is NULL. */
  31.345 -    if ( n->arch.user_ctxt.gs_base_user )
  31.346 +    if ( nctxt->gs_base_user )
  31.347          wrmsr(MSR_GS_BASE,
  31.348 -              n->arch.user_ctxt.gs_base_user,
  31.349 -              n->arch.user_ctxt.gs_base_user>>32);
  31.350 +              nctxt->gs_base_user,
  31.351 +              nctxt->gs_base_user>>32);
  31.352  
  31.353      /* If in kernel mode then switch the GS bases around. */
  31.354      if ( n->arch.flags & TF_kernel_mode )
  31.355 @@ -621,28 +602,28 @@ static void load_segments(struct exec_do
  31.356  
  31.357      if ( unlikely(!all_segs_okay) )
  31.358      {
  31.359 -        struct xen_regs *regs = get_execution_context();
  31.360 +        struct cpu_user_regs *regs = get_cpu_user_regs();
  31.361          unsigned long   *rsp =
  31.362              (n->arch.flags & TF_kernel_mode) ?
  31.363              (unsigned long *)regs->rsp : 
  31.364 -            (unsigned long *)n->arch.kernel_sp;
  31.365 +            (unsigned long *)nctxt->kernel_sp;
  31.366  
  31.367          if ( !(n->arch.flags & TF_kernel_mode) )
  31.368              toggle_guest_mode(n);
  31.369          else
  31.370              regs->cs &= ~3;
  31.371  
  31.372 -        if ( put_user(regs->ss,             rsp- 1) |
  31.373 -             put_user(regs->rsp,            rsp- 2) |
  31.374 -             put_user(regs->rflags,         rsp- 3) |
  31.375 -             put_user(regs->cs,             rsp- 4) |
  31.376 -             put_user(regs->rip,            rsp- 5) |
  31.377 -             put_user(n->arch.user_ctxt.gs, rsp- 6) |
  31.378 -             put_user(n->arch.user_ctxt.fs, rsp- 7) |
  31.379 -             put_user(n->arch.user_ctxt.es, rsp- 8) |
  31.380 -             put_user(n->arch.user_ctxt.ds, rsp- 9) |
  31.381 -             put_user(regs->r11,            rsp-10) |
  31.382 -             put_user(regs->rcx,            rsp-11) )
  31.383 +        if ( put_user(regs->ss,            rsp- 1) |
  31.384 +             put_user(regs->rsp,           rsp- 2) |
  31.385 +             put_user(regs->rflags,        rsp- 3) |
  31.386 +             put_user(regs->cs,            rsp- 4) |
  31.387 +             put_user(regs->rip,           rsp- 5) |
  31.388 +             put_user(nctxt->user_regs.gs, rsp- 6) |
  31.389 +             put_user(nctxt->user_regs.fs, rsp- 7) |
  31.390 +             put_user(nctxt->user_regs.es, rsp- 8) |
  31.391 +             put_user(nctxt->user_regs.ds, rsp- 9) |
  31.392 +             put_user(regs->r11,           rsp-10) |
  31.393 +             put_user(regs->rcx,           rsp-11) )
  31.394          {
  31.395              DPRINTK("Error while creating failsafe callback frame.\n");
  31.396              domain_crash();
  31.397 @@ -653,16 +634,17 @@ static void load_segments(struct exec_do
  31.398          regs->ss            = __GUEST_SS;
  31.399          regs->rsp           = (unsigned long)(rsp-11);
  31.400          regs->cs            = __GUEST_CS;
  31.401 -        regs->rip           = n->arch.failsafe_address;
  31.402 +        regs->rip           = nctxt->failsafe_callback_eip;
  31.403      }
  31.404  }
  31.405  
  31.406 -static void save_segments(struct exec_domain *p)
  31.407 +static void save_segments(struct exec_domain *ed)
  31.408  {
  31.409 -    __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_ctxt.ds) );
  31.410 -    __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_ctxt.es) );
  31.411 -    __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_ctxt.fs) );
  31.412 -    __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_ctxt.gs) );
  31.413 +    struct cpu_user_regs *regs = &ed->arch.guest_context.user_regs;
  31.414 +    __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (regs->ds) );
  31.415 +    __asm__ __volatile__ ( "movl %%es,%0" : "=m" (regs->es) );
  31.416 +    __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (regs->fs) );
  31.417 +    __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (regs->gs) );
  31.418  }
  31.419  
  31.420  static void clear_segments(void)
  31.421 @@ -679,7 +661,7 @@ static void clear_segments(void)
  31.422  
  31.423  long do_switch_to_user(void)
  31.424  {
  31.425 -    struct xen_regs       *regs = get_execution_context();
  31.426 +    struct cpu_user_regs  *regs = get_cpu_user_regs();
  31.427      struct switch_to_user  stu;
  31.428      struct exec_domain    *ed = current;
  31.429  
  31.430 @@ -695,7 +677,7 @@ long do_switch_to_user(void)
  31.431      regs->rsp    = stu.rsp;
  31.432      regs->ss     = stu.ss | 3; /* force guest privilege */
  31.433  
  31.434 -    if ( !(stu.flags & ECF_IN_SYSCALL) )
  31.435 +    if ( !(stu.flags & VGCF_IN_SYSCALL) )
  31.436      {
  31.437          regs->entry_vector = 0;
  31.438          regs->r11 = stu.r11;
  31.439 @@ -717,8 +699,8 @@ long do_switch_to_user(void)
  31.440  static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu)
  31.441  {
  31.442      struct tss_struct *tss = &init_tss[cpu];
  31.443 -    tss->esp1 = n->arch.kernel_sp;
  31.444 -    tss->ss1  = n->arch.kernel_ss;
  31.445 +    tss->esp1 = n->arch.guest_context.kernel_sp;
  31.446 +    tss->ss1  = n->arch.guest_context.kernel_ss;
  31.447  }
  31.448  
  31.449  #endif
  31.450 @@ -728,15 +710,15 @@ static inline void switch_kernel_stack(s
  31.451  
  31.452  static void __context_switch(void)
  31.453  {
  31.454 -    execution_context_t *stack_ec = get_execution_context();
  31.455 +    struct cpu_user_regs *stack_regs = get_cpu_user_regs();
  31.456      unsigned int         cpu = smp_processor_id();
  31.457      struct exec_domain  *p = percpu_ctxt[cpu].curr_ed;
  31.458      struct exec_domain  *n = current;
  31.459  
  31.460      if ( !is_idle_task(p->domain) )
  31.461      {
  31.462 -        memcpy(&p->arch.user_ctxt,
  31.463 -               stack_ec, 
  31.464 +        memcpy(&p->arch.guest_context.user_regs,
  31.465 +               stack_regs, 
  31.466                 CTXT_SWITCH_STACK_BYTES);
  31.467          unlazy_fpu(p);
  31.468          CLEAR_FAST_TRAP(&p->arch);
  31.469 @@ -745,20 +727,20 @@ static void __context_switch(void)
  31.470  
  31.471      if ( !is_idle_task(n->domain) )
  31.472      {
  31.473 -        memcpy(stack_ec,
  31.474 -               &n->arch.user_ctxt,
  31.475 +        memcpy(stack_regs,
  31.476 +               &n->arch.guest_context.user_regs,
  31.477                 CTXT_SWITCH_STACK_BYTES);
  31.478  
  31.479          /* Maybe switch the debug registers. */
  31.480 -        if ( unlikely(n->arch.debugreg[7]) )
  31.481 +        if ( unlikely(n->arch.guest_context.debugreg[7]) )
  31.482          {
  31.483 -            loaddebug(&n->arch, 0);
  31.484 -            loaddebug(&n->arch, 1);
  31.485 -            loaddebug(&n->arch, 2);
  31.486 -            loaddebug(&n->arch, 3);
  31.487 +            loaddebug(&n->arch.guest_context, 0);
  31.488 +            loaddebug(&n->arch.guest_context, 1);
  31.489 +            loaddebug(&n->arch.guest_context, 2);
  31.490 +            loaddebug(&n->arch.guest_context, 3);
  31.491              /* no 4 and 5 */
  31.492 -            loaddebug(&n->arch, 6);
  31.493 -            loaddebug(&n->arch, 7);
  31.494 +            loaddebug(&n->arch.guest_context, 6);
  31.495 +            loaddebug(&n->arch.guest_context, 7);
  31.496          }
  31.497  
  31.498          if ( !VMX_DOMAIN(n) )
  31.499 @@ -816,7 +798,12 @@ void context_switch(struct exec_domain *
  31.500      clear_bit(EDF_RUNNING, &prev->ed_flags);
  31.501  
  31.502      schedule_tail(next);
  31.503 +    BUG();
  31.504 +}
  31.505  
  31.506 +void continue_running(struct exec_domain *same)
  31.507 +{
  31.508 +    schedule_tail(same);
  31.509      BUG();
  31.510  }
  31.511  
  31.512 @@ -844,7 +831,7 @@ unsigned long __hypercall_create_continu
  31.513      unsigned int op, unsigned int nr_args, ...)
  31.514  {
  31.515      struct mc_state *mcs = &mc_state[smp_processor_id()];
  31.516 -    execution_context_t *ec;
  31.517 +    struct cpu_user_regs *regs;
  31.518      unsigned int i;
  31.519      va_list args;
  31.520  
  31.521 @@ -859,37 +846,37 @@ unsigned long __hypercall_create_continu
  31.522      }
  31.523      else
  31.524      {
  31.525 -        ec       = get_execution_context();
  31.526 +        regs       = get_cpu_user_regs();
  31.527  #if defined(__i386__)
  31.528 -        ec->eax  = op;
  31.529 -        ec->eip -= 2;  /* re-execute 'int 0x82' */
  31.530 +        regs->eax  = op;
  31.531 +        regs->eip -= 2;  /* re-execute 'int 0x82' */
  31.532          
  31.533          for ( i = 0; i < nr_args; i++ )
  31.534          {
  31.535              switch ( i )
  31.536              {
  31.537 -            case 0: ec->ebx = va_arg(args, unsigned long); break;
  31.538 -            case 1: ec->ecx = va_arg(args, unsigned long); break;
  31.539 -            case 2: ec->edx = va_arg(args, unsigned long); break;
  31.540 -            case 3: ec->esi = va_arg(args, unsigned long); break;
  31.541 -            case 4: ec->edi = va_arg(args, unsigned long); break;
  31.542 -            case 5: ec->ebp = va_arg(args, unsigned long); break;
  31.543 +            case 0: regs->ebx = va_arg(args, unsigned long); break;
  31.544 +            case 1: regs->ecx = va_arg(args, unsigned long); break;
  31.545 +            case 2: regs->edx = va_arg(args, unsigned long); break;
  31.546 +            case 3: regs->esi = va_arg(args, unsigned long); break;
  31.547 +            case 4: regs->edi = va_arg(args, unsigned long); break;
  31.548 +            case 5: regs->ebp = va_arg(args, unsigned long); break;
  31.549              }
  31.550          }
  31.551  #elif defined(__x86_64__)
  31.552 -        ec->rax  = op;
  31.553 -        ec->rip -= 2;  /* re-execute 'syscall' */
  31.554 +        regs->rax  = op;
  31.555 +        regs->rip -= 2;  /* re-execute 'syscall' */
  31.556          
  31.557          for ( i = 0; i < nr_args; i++ )
  31.558          {
  31.559              switch ( i )
  31.560              {
  31.561 -            case 0: ec->rdi = va_arg(args, unsigned long); break;
  31.562 -            case 1: ec->rsi = va_arg(args, unsigned long); break;
  31.563 -            case 2: ec->rdx = va_arg(args, unsigned long); break;
  31.564 -            case 3: ec->r10 = va_arg(args, unsigned long); break;
  31.565 -            case 4: ec->r8  = va_arg(args, unsigned long); break;
  31.566 -            case 5: ec->r9  = va_arg(args, unsigned long); break;
  31.567 +            case 0: regs->rdi = va_arg(args, unsigned long); break;
  31.568 +            case 1: regs->rsi = va_arg(args, unsigned long); break;
  31.569 +            case 2: regs->rdx = va_arg(args, unsigned long); break;
  31.570 +            case 3: regs->r10 = va_arg(args, unsigned long); break;
  31.571 +            case 4: regs->r8  = va_arg(args, unsigned long); break;
  31.572 +            case 5: regs->r9  = va_arg(args, unsigned long); break;
  31.573              }
  31.574          }
  31.575  #endif
    32.1 --- a/xen/arch/x86/domain_build.c	Thu Apr 28 13:54:01 2005 +0000
    32.2 +++ b/xen/arch/x86/domain_build.c	Fri Apr 29 07:34:47 2005 +0000
    32.3 @@ -222,14 +222,15 @@ int construct_dom0(struct domain *d,
    32.4       * We're basically forcing default RPLs to 1, so that our "what privilege
    32.5       * level are we returning to?" logic works.
    32.6       */
    32.7 -    ed->arch.failsafe_selector = FLAT_KERNEL_CS;
    32.8 -    ed->arch.event_selector    = FLAT_KERNEL_CS;
    32.9 -    ed->arch.kernel_ss = FLAT_KERNEL_SS;
   32.10 +    ed->arch.guest_context.kernel_ss = FLAT_KERNEL_SS;
   32.11      for ( i = 0; i < 256; i++ ) 
   32.12 -        ed->arch.traps[i].cs = FLAT_KERNEL_CS;
   32.13 +        ed->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS;
   32.14  
   32.15  #if defined(__i386__)
   32.16  
   32.17 +    ed->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS;
   32.18 +    ed->arch.guest_context.event_callback_cs    = FLAT_KERNEL_CS;
   32.19 +
   32.20      /*
   32.21       * Protect the lowest 1GB of memory. We use a temporary mapping there
   32.22       * from which we copy the kernel and ramdisk images.
    33.1 --- a/xen/arch/x86/extable.c	Thu Apr 28 13:54:01 2005 +0000
    33.2 +++ b/xen/arch/x86/extable.c	Fri Apr 29 07:34:47 2005 +0000
    33.3 @@ -68,7 +68,7 @@ search_exception_table(unsigned long add
    33.4  }
    33.5  
    33.6  unsigned long
    33.7 -search_pre_exception_table(struct xen_regs *regs)
    33.8 +search_pre_exception_table(struct cpu_user_regs *regs)
    33.9  {
   33.10      unsigned long addr = (unsigned long)regs->eip;
   33.11      unsigned long fixup = search_one_table(
    34.1 --- a/xen/arch/x86/i387.c	Thu Apr 28 13:54:01 2005 +0000
    34.2 +++ b/xen/arch/x86/i387.c	Fri Apr 29 07:34:47 2005 +0000
    34.3 @@ -34,11 +34,11 @@ void save_init_fpu(struct exec_domain *t
    34.4      if ( cpu_has_fxsr )
    34.5          __asm__ __volatile__ (
    34.6              "fxsave %0 ; fnclex"
    34.7 -            : "=m" (tsk->arch.i387) );
    34.8 +            : "=m" (tsk->arch.guest_context.fpu_ctxt) );
    34.9      else
   34.10          __asm__ __volatile__ (
   34.11              "fnsave %0 ; fwait"
   34.12 -            : "=m" (tsk->arch.i387) );
   34.13 +            : "=m" (tsk->arch.guest_context.fpu_ctxt) );
   34.14  
   34.15      clear_bit(EDF_USEDFPU, &tsk->ed_flags);
   34.16      stts();
   34.17 @@ -46,14 +46,38 @@ void save_init_fpu(struct exec_domain *t
   34.18  
   34.19  void restore_fpu(struct exec_domain *tsk)
   34.20  {
   34.21 +    /*
   34.22 +     * FXRSTOR can fault if passed a corrupted data block. We handle this
   34.23 +     * possibility, which may occur if the block was passed to us by control
   34.24 +     * tools, by silently clearing the block.
   34.25 +     */
   34.26      if ( cpu_has_fxsr )
   34.27          __asm__ __volatile__ (
   34.28 -            "fxrstor %0"
   34.29 -            : : "m" (tsk->arch.i387) );
   34.30 +            "1: fxrstor %0            \n"
   34.31 +            ".section .fixup,\"ax\"   \n"
   34.32 +            "2: push %%"__OP"ax       \n"
   34.33 +            "   push %%"__OP"cx       \n"
   34.34 +            "   push %%"__OP"di       \n"
   34.35 +            "   lea  %0,%%"__OP"di    \n"
   34.36 +            "   mov  %1,%%ecx         \n"
   34.37 +            "   xor  %%eax,%%eax      \n"
   34.38 +            "   rep ; stosl           \n"
   34.39 +            "   pop  %%"__OP"di       \n"
   34.40 +            "   pop  %%"__OP"cx       \n"
   34.41 +            "   pop  %%"__OP"ax       \n"
   34.42 +            "   jmp  1b               \n"
   34.43 +            ".previous                \n"
   34.44 +            ".section __ex_table,\"a\"\n"
   34.45 +            "   "__FIXUP_ALIGN"       \n"
   34.46 +            "   "__FIXUP_WORD" 1b,2b  \n"
   34.47 +            ".previous                \n"
   34.48 +            : 
   34.49 +            : "m" (tsk->arch.guest_context.fpu_ctxt),
   34.50 +              "i" (sizeof(tsk->arch.guest_context.fpu_ctxt)/4) );
   34.51      else
   34.52          __asm__ __volatile__ (
   34.53              "frstor %0"
   34.54 -            : : "m" (tsk->arch.i387) );
   34.55 +            : : "m" (tsk->arch.guest_context.fpu_ctxt) );
   34.56  }
   34.57  
   34.58  /*
    35.1 --- a/xen/arch/x86/irq.c	Thu Apr 28 13:54:01 2005 +0000
    35.2 +++ b/xen/arch/x86/irq.c	Fri Apr 29 07:34:47 2005 +0000
    35.3 @@ -17,7 +17,7 @@ irq_desc_t irq_desc[NR_IRQS];
    35.4  
    35.5  static void __do_IRQ_guest(int irq);
    35.6  
    35.7 -void no_action(int cpl, void *dev_id, struct xen_regs *regs) { }
    35.8 +void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
    35.9  
   35.10  static void enable_none(unsigned int irq) { }
   35.11  static unsigned int startup_none(unsigned int irq) { return 0; }
   35.12 @@ -87,7 +87,7 @@ void enable_irq(unsigned int irq)
   35.13      spin_unlock_irqrestore(&desc->lock, flags);
   35.14  }
   35.15  
   35.16 -asmlinkage void do_IRQ(struct xen_regs *regs)
   35.17 +asmlinkage void do_IRQ(struct cpu_user_regs *regs)
   35.18  {       
   35.19      unsigned int      irq = regs->entry_vector;
   35.20      irq_desc_t       *desc = &irq_desc[irq];
    36.1 --- a/xen/arch/x86/mm.c	Thu Apr 28 13:54:01 2005 +0000
    36.2 +++ b/xen/arch/x86/mm.c	Fri Apr 29 07:34:47 2005 +0000
    36.3 @@ -285,7 +285,7 @@ int map_ldt_shadow_page(unsigned int off
    36.4      struct domain *d = ed->domain;
    36.5      unsigned long gpfn, gmfn;
    36.6      l1_pgentry_t l1e, nl1e;
    36.7 -    unsigned gva = ed->arch.ldt_base + (off << PAGE_SHIFT);
    36.8 +    unsigned gva = ed->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
    36.9      int res;
   36.10  
   36.11  #if defined(__x86_64__)
   36.12 @@ -1639,12 +1639,12 @@ int do_mmuext_op(
   36.13                  okay = 0;
   36.14                  MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
   36.15              }
   36.16 -            else if ( (ed->arch.ldt_ents != ents) || 
   36.17 -                      (ed->arch.ldt_base != ptr) )
   36.18 +            else if ( (ed->arch.guest_context.ldt_ents != ents) || 
   36.19 +                      (ed->arch.guest_context.ldt_base != ptr) )
   36.20              {
   36.21                  invalidate_shadow_ldt(ed);
   36.22 -                ed->arch.ldt_base = ptr;
   36.23 -                ed->arch.ldt_ents = ents;
   36.24 +                ed->arch.guest_context.ldt_base = ptr;
   36.25 +                ed->arch.guest_context.ldt_ents = ents;
   36.26                  load_LDT(ed);
   36.27                  percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
   36.28                  if ( ents != 0 )
   36.29 @@ -2842,7 +2842,7 @@ int ptwr_do_page_fault(struct domain *d,
   36.30      return EXCRET_fault_fixed;
   36.31  
   36.32   emulate:
   36.33 -    if ( x86_emulate_memop(get_execution_context(), addr,
   36.34 +    if ( x86_emulate_memop(get_cpu_user_regs(), addr,
   36.35                             &ptwr_mem_emulator, BITS_PER_LONG/8) )
   36.36          return 0;
   36.37      perfc_incrc(ptwr_emulations);
    37.1 --- a/xen/arch/x86/nmi.c	Thu Apr 28 13:54:01 2005 +0000
    37.2 +++ b/xen/arch/x86/nmi.c	Fri Apr 29 07:34:47 2005 +0000
    37.3 @@ -267,7 +267,7 @@ void touch_nmi_watchdog (void)
    37.4          alert_counter[i] = 0;
    37.5  }
    37.6  
    37.7 -void nmi_watchdog_tick (struct xen_regs * regs)
    37.8 +void nmi_watchdog_tick (struct cpu_user_regs * regs)
    37.9  {
   37.10      int sum, cpu = smp_processor_id();
   37.11  
    38.1 --- a/xen/arch/x86/shadow.c	Thu Apr 28 13:54:01 2005 +0000
    38.2 +++ b/xen/arch/x86/shadow.c	Fri Apr 29 07:34:47 2005 +0000
    38.3 @@ -2421,7 +2421,7 @@ void __shadow_sync_all(struct domain *d)
    38.4      free_out_of_sync_state(d);
    38.5  }
    38.6  
    38.7 -int shadow_fault(unsigned long va, struct xen_regs *regs)
    38.8 +int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
    38.9  {
   38.10      l1_pgentry_t gpte, spte, orig_gpte;
   38.11      struct exec_domain *ed = current;
    39.1 --- a/xen/arch/x86/time.c	Thu Apr 28 13:54:01 2005 +0000
    39.2 +++ b/xen/arch/x86/time.c	Fri Apr 29 07:34:47 2005 +0000
    39.3 @@ -51,7 +51,7 @@ static s_time_t        stime_irq;       
    39.4  static unsigned long   wc_sec, wc_usec; /* UTC time at last 'time update'.   */
    39.5  static rwlock_t        time_lock = RW_LOCK_UNLOCKED;
    39.6  
    39.7 -void timer_interrupt(int irq, void *dev_id, struct xen_regs *regs)
    39.8 +void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
    39.9  {
   39.10      write_lock_irq(&time_lock);
   39.11  
    40.1 --- a/xen/arch/x86/trampoline.S	Thu Apr 28 13:54:01 2005 +0000
    40.2 +++ b/xen/arch/x86/trampoline.S	Fri Apr 29 07:34:47 2005 +0000
    40.3 @@ -60,8 +60,7 @@ gdt_48:
    40.4  #else
    40.5  	.long   0x100200 # gdt_table
    40.6  #endif
    40.7 -        
    40.8 -.globl SYMBOL_NAME(trampoline_end)
    40.9 -SYMBOL_NAME_LABEL(trampoline_end)
   40.10 +
   40.11 +ENTRY(trampoline_end)
   40.12  
   40.13  #endif /* CONFIG_SMP */
    41.1 --- a/xen/arch/x86/traps.c	Thu Apr 28 13:54:01 2005 +0000
    41.2 +++ b/xen/arch/x86/traps.c	Fri Apr 29 07:34:47 2005 +0000
    41.3 @@ -95,7 +95,7 @@ asmlinkage void machine_check(void);
    41.4   * are disabled). In such situations we can't do much that is safe. We try to
    41.5   * print out some tracing and then we just spin.
    41.6   */
    41.7 -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs)
    41.8 +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
    41.9  {
   41.10      int cpu = smp_processor_id();
   41.11      unsigned long cr2;
   41.12 @@ -136,7 +136,7 @@ asmlinkage void fatal_trap(int trapnr, s
   41.13  }
   41.14  
   41.15  static inline int do_trap(int trapnr, char *str,
   41.16 -                          struct xen_regs *regs, 
   41.17 +                          struct cpu_user_regs *regs, 
   41.18                            int use_error_code)
   41.19  {
   41.20      struct exec_domain *ed = current;
   41.21 @@ -150,11 +150,12 @@ static inline int do_trap(int trapnr, ch
   41.22          goto xen_fault;
   41.23  
   41.24  #ifndef NDEBUG
   41.25 -    if ( (ed->arch.traps[trapnr].address == 0) && (ed->domain->id == 0) )
   41.26 +    if ( (ed->arch.guest_context.trap_ctxt[trapnr].address == 0) &&
   41.27 +         (ed->domain->id == 0) )
   41.28          goto xen_fault;
   41.29  #endif
   41.30  
   41.31 -    ti = current->arch.traps + trapnr;
   41.32 +    ti = &current->arch.guest_context.trap_ctxt[trapnr];
   41.33      tb->flags = TBF_EXCEPTION;
   41.34      tb->cs    = ti->cs;
   41.35      tb->eip   = ti->address;
   41.36 @@ -186,13 +187,13 @@ static inline int do_trap(int trapnr, ch
   41.37  }
   41.38  
   41.39  #define DO_ERROR_NOCODE(trapnr, str, name) \
   41.40 -asmlinkage int do_##name(struct xen_regs *regs) \
   41.41 +asmlinkage int do_##name(struct cpu_user_regs *regs) \
   41.42  { \
   41.43      return do_trap(trapnr, str, regs, 0); \
   41.44  }
   41.45  
   41.46  #define DO_ERROR(trapnr, str, name) \
   41.47 -asmlinkage int do_##name(struct xen_regs *regs) \
   41.48 +asmlinkage int do_##name(struct cpu_user_regs *regs) \
   41.49  { \
   41.50      return do_trap(trapnr, str, regs, 1); \
   41.51  }
   41.52 @@ -209,7 +210,7 @@ DO_ERROR_NOCODE(16, "fpu error", coproce
   41.53  DO_ERROR(17, "alignment check", alignment_check)
   41.54  DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
   41.55  
   41.56 -asmlinkage int do_int3(struct xen_regs *regs)
   41.57 +asmlinkage int do_int3(struct cpu_user_regs *regs)
   41.58  {
   41.59      struct exec_domain *ed = current;
   41.60      struct trap_bounce *tb = &ed->arch.trap_bounce;
   41.61 @@ -224,7 +225,7 @@ asmlinkage int do_int3(struct xen_regs *
   41.62          panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id());
   41.63      } 
   41.64  
   41.65 -    ti = current->arch.traps + 3;
   41.66 +    ti = &current->arch.guest_context.trap_ctxt[TRAP_int3];
   41.67      tb->flags = TBF_EXCEPTION;
   41.68      tb->cs    = ti->cs;
   41.69      tb->eip   = ti->address;
   41.70 @@ -234,7 +235,7 @@ asmlinkage int do_int3(struct xen_regs *
   41.71      return 0;
   41.72  }
   41.73  
   41.74 -asmlinkage void do_machine_check(struct xen_regs *regs)
   41.75 +asmlinkage void do_machine_check(struct cpu_user_regs *regs)
   41.76  {
   41.77      fatal_trap(TRAP_machine_check, regs);
   41.78  }
   41.79 @@ -245,7 +246,7 @@ void propagate_page_fault(unsigned long 
   41.80      struct exec_domain *ed = current;
   41.81      struct trap_bounce *tb = &ed->arch.trap_bounce;
   41.82  
   41.83 -    ti = ed->arch.traps + 14;
   41.84 +    ti = &ed->arch.guest_context.trap_ctxt[TRAP_page_fault];
   41.85      tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
   41.86      tb->cr2        = addr;
   41.87      tb->error_code = error_code;
   41.88 @@ -257,7 +258,7 @@ void propagate_page_fault(unsigned long 
   41.89      ed->arch.guest_cr2 = addr;
   41.90  }
   41.91  
   41.92 -asmlinkage int do_page_fault(struct xen_regs *regs)
   41.93 +asmlinkage int do_page_fault(struct cpu_user_regs *regs)
   41.94  {
   41.95      unsigned long off, addr, fixup;
   41.96      struct exec_domain *ed = current;
   41.97 @@ -303,7 +304,8 @@ asmlinkage int do_page_fault(struct xen_
   41.98      }
   41.99  
  41.100      if ( unlikely(addr >= LDT_VIRT_START(ed)) && 
  41.101 -         (addr < (LDT_VIRT_START(ed) + (ed->arch.ldt_ents*LDT_ENTRY_SIZE))) )
  41.102 +         (addr < (LDT_VIRT_START(ed) + 
  41.103 +                  (ed->arch.guest_context.ldt_ents*LDT_ENTRY_SIZE))) )
  41.104      {
  41.105          /*
  41.106           * Copy a mapping from the guest's LDT, if it is valid. Otherwise we
  41.107 @@ -312,7 +314,7 @@ asmlinkage int do_page_fault(struct xen_
  41.108          extern int map_ldt_shadow_page(unsigned int);
  41.109          LOCK_BIGLOCK(d);
  41.110          off  = addr - LDT_VIRT_START(ed);
  41.111 -        addr = ed->arch.ldt_base + off;
  41.112 +        addr = ed->arch.guest_context.ldt_base + off;
  41.113          ret = map_ldt_shadow_page(off >> PAGE_SHIFT);
  41.114          UNLOCK_BIGLOCK(d);
  41.115          if ( likely(ret) )
  41.116 @@ -323,7 +325,8 @@ asmlinkage int do_page_fault(struct xen_
  41.117          goto xen_fault;
  41.118  
  41.119  #ifndef NDEBUG
  41.120 -    if ( (ed->arch.traps[TRAP_page_fault].address == 0) && (d->id == 0) )
  41.121 +    if ( (ed->arch.guest_context.trap_ctxt[TRAP_page_fault].address == 0) &&
  41.122 +         (d->id == 0) )
  41.123          goto xen_fault;
  41.124  #endif
  41.125  
  41.126 @@ -374,7 +377,7 @@ long do_fpu_taskswitch(int set)
  41.127  /* Has the guest requested sufficient permission for this I/O access? */
  41.128  static inline int guest_io_okay(
  41.129      unsigned int port, unsigned int bytes,
  41.130 -    struct exec_domain *ed, struct xen_regs *regs)
  41.131 +    struct exec_domain *ed, struct cpu_user_regs *regs)
  41.132  {
  41.133      u16 x;
  41.134  #if defined(__x86_64__)
  41.135 @@ -404,7 +407,7 @@ static inline int guest_io_okay(
  41.136  /* Has the administrator granted sufficient permission for this I/O access? */
  41.137  static inline int admin_io_okay(
  41.138      unsigned int port, unsigned int bytes,
  41.139 -    struct exec_domain *ed, struct xen_regs *regs)
  41.140 +    struct exec_domain *ed, struct cpu_user_regs *regs)
  41.141  {
  41.142      struct domain *d = ed->domain;
  41.143      u16 x;
  41.144 @@ -436,7 +439,7 @@ static inline int admin_io_okay(
  41.145          goto read_fault;                        \
  41.146      eip += _size; (_type)_x; })
  41.147  
  41.148 -static int emulate_privileged_op(struct xen_regs *regs)
  41.149 +static int emulate_privileged_op(struct cpu_user_regs *regs)
  41.150  {
  41.151      struct exec_domain *ed = current;
  41.152      unsigned long *reg, eip = regs->eip;
  41.153 @@ -743,7 +746,7 @@ static int emulate_privileged_op(struct 
  41.154      return EXCRET_fault_fixed;
  41.155  }
  41.156  
  41.157 -asmlinkage int do_general_protection(struct xen_regs *regs)
  41.158 +asmlinkage int do_general_protection(struct cpu_user_regs *regs)
  41.159  {
  41.160      struct exec_domain *ed = current;
  41.161      struct trap_bounce *tb = &ed->arch.trap_bounce;
  41.162 @@ -781,7 +784,7 @@ asmlinkage int do_general_protection(str
  41.163      if ( (regs->error_code & 3) == 2 )
  41.164      {
  41.165          /* This fault must be due to <INT n> instruction. */
  41.166 -        ti = current->arch.traps + (regs->error_code>>3);
  41.167 +        ti = &current->arch.guest_context.trap_ctxt[regs->error_code>>3];
  41.168          if ( PERMIT_SOFTINT(TI_GET_DPL(ti), ed, regs) )
  41.169          {
  41.170              tb->flags = TBF_EXCEPTION;
  41.171 @@ -803,13 +806,13 @@ asmlinkage int do_general_protection(str
  41.172  #endif
  41.173  
  41.174  #ifndef NDEBUG
  41.175 -    if ( (ed->arch.traps[TRAP_gp_fault].address == 0) &&
  41.176 +    if ( (ed->arch.guest_context.trap_ctxt[TRAP_gp_fault].address == 0) &&
  41.177           (ed->domain->id == 0) )
  41.178          goto gp_in_kernel;
  41.179  #endif
  41.180  
  41.181      /* Pass on GPF as is. */
  41.182 -    ti = current->arch.traps + 13;
  41.183 +    ti = &current->arch.guest_context.trap_ctxt[TRAP_gp_fault];
  41.184      tb->flags      = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
  41.185      tb->error_code = regs->error_code;
  41.186   finish_propagation:
  41.187 @@ -851,7 +854,7 @@ static void nmi_softirq(void)
  41.188          send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
  41.189  }
  41.190  
  41.191 -asmlinkage void mem_parity_error(struct xen_regs *regs)
  41.192 +asmlinkage void mem_parity_error(struct cpu_user_regs *regs)
  41.193  {
  41.194      /* Clear and disable the parity-error line. */
  41.195      outb((inb(0x61)&15)|4,0x61);
  41.196 @@ -870,7 +873,7 @@ asmlinkage void mem_parity_error(struct 
  41.197      }
  41.198  }
  41.199  
  41.200 -asmlinkage void io_check_error(struct xen_regs *regs)
  41.201 +asmlinkage void io_check_error(struct cpu_user_regs *regs)
  41.202  {
  41.203      /* Clear and disable the I/O-error line. */
  41.204      outb((inb(0x61)&15)|8,0x61);
  41.205 @@ -896,7 +899,7 @@ static void unknown_nmi_error(unsigned c
  41.206      printk("Do you have a strange power saving mode enabled?\n");
  41.207  }
  41.208  
  41.209 -asmlinkage void do_nmi(struct xen_regs *regs, unsigned long reason)
  41.210 +asmlinkage void do_nmi(struct cpu_user_regs *regs, unsigned long reason)
  41.211  {
  41.212      ++nmi_count(smp_processor_id());
  41.213  
  41.214 @@ -911,32 +914,31 @@ asmlinkage void do_nmi(struct xen_regs *
  41.215          unknown_nmi_error((unsigned char)(reason&0xff));
  41.216  }
  41.217  
  41.218 -asmlinkage int math_state_restore(struct xen_regs *regs)
  41.219 +asmlinkage int math_state_restore(struct cpu_user_regs *regs)
  41.220  {
  41.221      /* Prevent recursion. */
  41.222      clts();
  41.223  
  41.224 -    if ( !test_bit(EDF_USEDFPU, &current->ed_flags) )
  41.225 +    if ( !test_and_set_bit(EDF_USEDFPU, &current->ed_flags) )
  41.226      {
  41.227          if ( test_bit(EDF_DONEFPUINIT, &current->ed_flags) )
  41.228              restore_fpu(current);
  41.229          else
  41.230              init_fpu();
  41.231 -        set_bit(EDF_USEDFPU, &current->ed_flags); /* so we fnsave on switch_to() */
  41.232      }
  41.233  
  41.234      if ( test_and_clear_bit(EDF_GUEST_STTS, &current->ed_flags) )
  41.235      {
  41.236          struct trap_bounce *tb = &current->arch.trap_bounce;
  41.237 -        tb->flags      = TBF_EXCEPTION;
  41.238 -        tb->cs         = current->arch.traps[7].cs;
  41.239 -        tb->eip        = current->arch.traps[7].address;
  41.240 +        tb->flags = TBF_EXCEPTION;
  41.241 +        tb->cs    = current->arch.guest_context.trap_ctxt[7].cs;
  41.242 +        tb->eip   = current->arch.guest_context.trap_ctxt[7].address;
  41.243      }
  41.244  
  41.245      return EXCRET_fault_fixed;
  41.246  }
  41.247  
  41.248 -asmlinkage int do_debug(struct xen_regs *regs)
  41.249 +asmlinkage int do_debug(struct cpu_user_regs *regs)
  41.250  {
  41.251      unsigned long condition;
  41.252      struct exec_domain *ed = current;
  41.253 @@ -946,7 +948,7 @@ asmlinkage int do_debug(struct xen_regs 
  41.254  
  41.255      /* Mask out spurious debug traps due to lazy DR7 setting */
  41.256      if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
  41.257 -         (ed->arch.debugreg[7] == 0) )
  41.258 +         (ed->arch.guest_context.debugreg[7] == 0) )
  41.259      {
  41.260          __asm__("mov %0,%%db7" : : "r" (0UL));
  41.261          goto out;
  41.262 @@ -968,17 +970,17 @@ asmlinkage int do_debug(struct xen_regs 
  41.263      } 
  41.264  
  41.265      /* Save debug status register where guest OS can peek at it */
  41.266 -    ed->arch.debugreg[6] = condition;
  41.267 +    ed->arch.guest_context.debugreg[6] = condition;
  41.268  
  41.269      tb->flags = TBF_EXCEPTION;
  41.270 -    tb->cs    = ed->arch.traps[1].cs;
  41.271 -    tb->eip   = ed->arch.traps[1].address;
  41.272 +    tb->cs    = ed->arch.guest_context.trap_ctxt[TRAP_debug].cs;
  41.273 +    tb->eip   = ed->arch.guest_context.trap_ctxt[TRAP_debug].address;
  41.274  
  41.275   out:
  41.276      return EXCRET_not_a_fault;
  41.277  }
  41.278  
  41.279 -asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs)
  41.280 +asmlinkage int do_spurious_interrupt_bug(struct cpu_user_regs *regs)
  41.281  {
  41.282      return EXCRET_not_a_fault;
  41.283  }
  41.284 @@ -1059,7 +1061,7 @@ void __init trap_init(void)
  41.285  long do_set_trap_table(trap_info_t *traps)
  41.286  {
  41.287      trap_info_t cur;
  41.288 -    trap_info_t *dst = current->arch.traps;
  41.289 +    trap_info_t *dst = current->arch.guest_context.trap_ctxt;
  41.290      long rc = 0;
  41.291  
  41.292      LOCK_BIGLOCK(current->domain);
  41.293 @@ -1163,7 +1165,7 @@ long set_debugreg(struct exec_domain *p,
  41.294          return -EINVAL;
  41.295      }
  41.296  
  41.297 -    p->arch.debugreg[reg] = value;
  41.298 +    p->arch.guest_context.debugreg[reg] = value;
  41.299      return 0;
  41.300  }
  41.301  
  41.302 @@ -1175,7 +1177,7 @@ long do_set_debugreg(int reg, unsigned l
  41.303  unsigned long do_get_debugreg(int reg)
  41.304  {
  41.305      if ( (reg < 0) || (reg > 7) ) return -EINVAL;
  41.306 -    return current->arch.debugreg[reg];
  41.307 +    return current->arch.guest_context.debugreg[reg];
  41.308  }
  41.309  
  41.310  /*
    42.1 --- a/xen/arch/x86/vmx.c	Thu Apr 28 13:54:01 2005 +0000
    42.2 +++ b/xen/arch/x86/vmx.c	Fri Apr 29 07:34:47 2005 +0000
    42.3 @@ -46,7 +46,7 @@ unsigned int opt_vmx_debug_level = 0;
    42.4  
    42.5  extern long evtchn_send(int lport);
    42.6  extern long do_block(void);
    42.7 -void do_nmi(struct xen_regs *, unsigned long);
    42.8 +void do_nmi(struct cpu_user_regs *, unsigned long);
    42.9  
   42.10  int start_vmx()
   42.11  {
   42.12 @@ -105,7 +105,7 @@ static void inline __update_guest_eip(un
   42.13  
   42.14  #include <asm/domain_page.h>
   42.15  
   42.16 -static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs) 
   42.17 +static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
   42.18  {
   42.19      struct exec_domain *ed = current;
   42.20      unsigned long eip;
   42.21 @@ -154,7 +154,7 @@ static int vmx_do_page_fault(unsigned lo
   42.22      return result;
   42.23  }
   42.24  
   42.25 -static void vmx_do_general_protection_fault(struct xen_regs *regs) 
   42.26 +static void vmx_do_general_protection_fault(struct cpu_user_regs *regs) 
   42.27  {
   42.28      unsigned long eip, error_code;
   42.29      unsigned long intr_fields;
   42.30 @@ -181,7 +181,7 @@ static void vmx_do_general_protection_fa
   42.31      __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
   42.32  }
   42.33  
   42.34 -static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs) 
   42.35 +static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs) 
   42.36  {
   42.37      unsigned int eax, ebx, ecx, edx;
   42.38      unsigned long eip;
   42.39 @@ -217,7 +217,7 @@ static void vmx_vmexit_do_cpuid(unsigned
   42.40  #define CASE_GET_REG_P(REG, reg)    \
   42.41      case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
   42.42  
   42.43 -static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs)
   42.44 +static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs)
   42.45  {
   42.46      unsigned int reg;
   42.47      unsigned long *reg_p = 0;
   42.48 @@ -250,18 +250,18 @@ static void vmx_dr_access (unsigned long
   42.49      case TYPE_MOV_TO_DR: 
   42.50          /* don't need to check the range */
   42.51          if (reg != REG_ESP)
   42.52 -            ed->arch.debugreg[reg] = *reg_p; 
   42.53 +            ed->arch.guest_context.debugreg[reg] = *reg_p; 
   42.54          else {
   42.55              unsigned long value;
   42.56              __vmread(GUEST_ESP, &value);
   42.57 -            ed->arch.debugreg[reg] = value;
   42.58 +            ed->arch.guest_context.debugreg[reg] = value;
   42.59          }
   42.60          break;
   42.61      case TYPE_MOV_FROM_DR:
   42.62          if (reg != REG_ESP)
   42.63 -            *reg_p = ed->arch.debugreg[reg];
   42.64 +            *reg_p = ed->arch.guest_context.debugreg[reg];
   42.65          else {
   42.66 -            __vmwrite(GUEST_ESP, ed->arch.debugreg[reg]);
   42.67 +            __vmwrite(GUEST_ESP, ed->arch.guest_context.debugreg[reg]);
   42.68          }
   42.69          break;
   42.70      }
   42.71 @@ -288,7 +288,7 @@ static void vmx_vmexit_do_invlpg(unsigne
   42.72      shadow_invlpg(ed, va);
   42.73  }
   42.74  
   42.75 -static void vmx_io_instruction(struct xen_regs *regs, 
   42.76 +static void vmx_io_instruction(struct cpu_user_regs *regs, 
   42.77                     unsigned long exit_qualification, unsigned long inst_len) 
   42.78  {
   42.79      struct exec_domain *d = current;
   42.80 @@ -728,7 +728,7 @@ static int vmx_set_cr0(unsigned long val
   42.81  /*
   42.82   * Write to control registers
   42.83   */
   42.84 -static int mov_to_cr(int gp, int cr, struct xen_regs *regs)
   42.85 +static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
   42.86  {
   42.87      unsigned long value;
   42.88      unsigned long old_cr;
   42.89 @@ -847,7 +847,7 @@ static int mov_to_cr(int gp, int cr, str
   42.90  /*
   42.91   * Read from control registers. CR0 and CR4 are read from the shadow.
   42.92   */
   42.93 -static void mov_from_cr(int cr, int gp, struct xen_regs *regs)
   42.94 +static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
   42.95  {
   42.96      unsigned long value;
   42.97      struct exec_domain *d = current;
   42.98 @@ -878,7 +878,7 @@ static void mov_from_cr(int cr, int gp, 
   42.99      VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
  42.100  }
  42.101  
  42.102 -static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs *regs)
  42.103 +static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
  42.104  {
  42.105      unsigned int gp, cr;
  42.106      unsigned long value;
  42.107 @@ -916,7 +916,7 @@ static int vmx_cr_access(unsigned long e
  42.108      return 1;
  42.109  }
  42.110  
  42.111 -static inline void vmx_do_msr_read(struct xen_regs *regs)
  42.112 +static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
  42.113  {
  42.114      VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
  42.115                  (unsigned long)regs->ecx, (unsigned long)regs->eax, 
  42.116 @@ -973,7 +973,7 @@ static void vmx_print_line(const char c,
  42.117          print_buf[index++] = c;
  42.118  }
  42.119  
  42.120 -void save_vmx_execution_context(execution_context_t *ctxt)
  42.121 +void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
  42.122  {
  42.123      __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
  42.124      __vmread(GUEST_ESP, &ctxt->esp);
  42.125 @@ -988,7 +988,7 @@ void save_vmx_execution_context(executio
  42.126  }
  42.127  
  42.128  #ifdef XEN_DEBUGGER
  42.129 -void save_xen_regs(struct xen_regs *regs)
  42.130 +void save_cpu_user_regs(struct cpu_user_regs *regs)
  42.131  {
  42.132      __vmread(GUEST_SS_SELECTOR, &regs->xss);
  42.133      __vmread(GUEST_ESP, &regs->esp);
  42.134 @@ -1002,7 +1002,7 @@ void save_xen_regs(struct xen_regs *regs
  42.135      __vmread(GUEST_DS_SELECTOR, &regs->xds);
  42.136  }
  42.137  
  42.138 -void restore_xen_regs(struct xen_regs *regs)
  42.139 +void restore_cpu_user_regs(struct cpu_user_regs *regs)
  42.140  {
  42.141      __vmwrite(GUEST_SS_SELECTOR, regs->xss);
  42.142      __vmwrite(GUEST_ESP, regs->esp);
  42.143 @@ -1017,7 +1017,7 @@ void restore_xen_regs(struct xen_regs *r
  42.144  }
  42.145  #endif
  42.146  
  42.147 -asmlinkage void vmx_vmexit_handler(struct xen_regs regs)
  42.148 +asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
  42.149  {
  42.150      unsigned int exit_reason, idtv_info_field;
  42.151      unsigned long exit_qualification, eip, inst_len = 0;
  42.152 @@ -1080,16 +1080,16 @@ asmlinkage void vmx_vmexit_handler(struc
  42.153  #ifdef XEN_DEBUGGER
  42.154          case TRAP_debug:
  42.155          {
  42.156 -            save_xen_regs(&regs);
  42.157 +            save_cpu_user_regs(&regs);
  42.158              pdb_handle_exception(1, &regs, 1);
  42.159 -            restore_xen_regs(&regs);
  42.160 +            restore_cpu_user_regs(&regs);
  42.161              break;
  42.162          }
  42.163          case TRAP_int3:
  42.164          {
  42.165 -            save_xen_regs(&regs);
  42.166 +            save_cpu_user_regs(&regs);
  42.167              pdb_handle_exception(3, &regs, 1);
  42.168 -            restore_xen_regs(&regs);
  42.169 +            restore_cpu_user_regs(&regs);
  42.170              break;
  42.171          }
  42.172  #endif
  42.173 @@ -1139,9 +1139,9 @@ asmlinkage void vmx_vmexit_handler(struc
  42.174      case EXIT_REASON_EXTERNAL_INTERRUPT: 
  42.175      {
  42.176          extern int vector_irq[];
  42.177 -        extern asmlinkage void do_IRQ(struct xen_regs *);
  42.178 -        extern void smp_apic_timer_interrupt(struct xen_regs *);
  42.179 -        extern void timer_interrupt(int, void *, struct xen_regs *);
  42.180 +        extern asmlinkage void do_IRQ(struct cpu_user_regs *);
  42.181 +        extern void smp_apic_timer_interrupt(struct cpu_user_regs *);
  42.182 +        extern void timer_interrupt(int, void *, struct cpu_user_regs *);
  42.183          unsigned int    vector;
  42.184  
  42.185          if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
    43.1 --- a/xen/arch/x86/vmx_intercept.c	Thu Apr 28 13:54:01 2005 +0000
    43.2 +++ b/xen/arch/x86/vmx_intercept.c	Fri Apr 29 07:34:47 2005 +0000
    43.3 @@ -140,19 +140,19 @@ static int pit_read_io(struct vmx_virpit
    43.4  /* vmx_io_assist light-weight version, specific to PIT DM */ 
    43.5  static void resume_pit_io(ioreq_t *p)
    43.6  {
    43.7 -    execution_context_t *ec = get_execution_context();
    43.8 -    unsigned long old_eax = ec->eax;
    43.9 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   43.10 +    unsigned long old_eax = regs->eax;
   43.11      p->state = STATE_INVALID;
   43.12  
   43.13      switch(p->size) {
   43.14      case 1:
   43.15 -        ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   43.16 +        regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   43.17          break;
   43.18      case 2:
   43.19 -        ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
   43.20 +        regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
   43.21          break;
   43.22      case 4:
   43.23 -        ec->eax = (p->u.data & 0xffffffff);
   43.24 +        regs->eax = (p->u.data & 0xffffffff);
   43.25          break;
   43.26      default:
   43.27          BUG();
    44.1 --- a/xen/arch/x86/vmx_io.c	Thu Apr 28 13:54:01 2005 +0000
    44.2 +++ b/xen/arch/x86/vmx_io.c	Fri Apr 29 07:34:47 2005 +0000
    44.3 @@ -38,7 +38,7 @@
    44.4  extern long do_block();
    44.5    
    44.6  #if defined (__i386__)
    44.7 -static void load_xen_regs(struct xen_regs *regs)
    44.8 +static void load_cpu_user_regs(struct cpu_user_regs *regs)
    44.9  { 
   44.10      /*
   44.11       * Write the guest register value into VMCS
   44.12 @@ -50,7 +50,7 @@ static void load_xen_regs(struct xen_reg
   44.13      __vmwrite(GUEST_EIP, regs->eip);
   44.14  }
   44.15  
   44.16 -static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value)
   44.17 +static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
   44.18  {
   44.19      switch (size) {
   44.20      case BYTE:
   44.21 @@ -170,12 +170,12 @@ static void set_reg_value (int size, int
   44.22      }
   44.23  }
   44.24  #else
   44.25 -static void load_xen_regs(struct xen_regs *regs)
   44.26 +static void load_cpu_user_regs(struct cpu_user_regs *regs)
   44.27  { 
   44.28  	/* XXX: TBD */
   44.29  	return;
   44.30  }
   44.31 -static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value)
   44.32 +static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
   44.33  {
   44.34  	/* XXX: TBD */
   44.35  	return;
   44.36 @@ -187,11 +187,11 @@ void vmx_io_assist(struct exec_domain *e
   44.37      vcpu_iodata_t *vio;
   44.38      ioreq_t *p;
   44.39      struct domain *d = ed->domain;
   44.40 -    execution_context_t *ec = get_execution_context();
   44.41 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   44.42      unsigned long old_eax;
   44.43      int sign;
   44.44      struct mi_per_cpu_info *mpci_p;
   44.45 -    struct xen_regs *inst_decoder_regs;
   44.46 +    struct cpu_user_regs *inst_decoder_regs;
   44.47  
   44.48      mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci;
   44.49      inst_decoder_regs = mpci_p->inst_decoder_regs;
   44.50 @@ -230,8 +230,8 @@ void vmx_io_assist(struct exec_domain *e
   44.51      sign = (p->df) ? -1 : 1;
   44.52      if (p->port_mm) {
   44.53          if (p->pdata_valid) {
   44.54 -            ec->esi += sign * p->count * p->size;
   44.55 -            ec->edi += sign * p->count * p->size;
   44.56 +            regs->esi += sign * p->count * p->size;
   44.57 +            regs->edi += sign * p->count * p->size;
   44.58          } else {
   44.59              if (p->dir == IOREQ_WRITE) {
   44.60                  return;
   44.61 @@ -244,38 +244,38 @@ void vmx_io_assist(struct exec_domain *e
   44.62              if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
   44.63                  p->u.data = p->u.data & 0xffff;
   44.64              }        
   44.65 -            set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data);
   44.66 +            set_reg_value(size, index, 0, regs, p->u.data);
   44.67  
   44.68          }
   44.69 -        load_xen_regs((struct xen_regs *)ec);
   44.70 +        load_cpu_user_regs(regs);
   44.71          return;
   44.72      }
   44.73  
   44.74      if (p->dir == IOREQ_WRITE) {
   44.75          if (p->pdata_valid) {
   44.76 -            ec->esi += sign * p->count * p->size;
   44.77 -            ec->ecx -= p->count;
   44.78 +            regs->esi += sign * p->count * p->size;
   44.79 +            regs->ecx -= p->count;
   44.80          }
   44.81          return;
   44.82      } else {
   44.83          if (p->pdata_valid) {
   44.84 -            ec->edi += sign * p->count * p->size;
   44.85 -            ec->ecx -= p->count;
   44.86 +            regs->edi += sign * p->count * p->size;
   44.87 +            regs->ecx -= p->count;
   44.88              return;
   44.89          }
   44.90      }
   44.91  
   44.92 -    old_eax = ec->eax;
   44.93 +    old_eax = regs->eax;
   44.94  
   44.95      switch(p->size) {
   44.96      case 1:
   44.97 -        ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   44.98 +        regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   44.99          break;
  44.100      case 2:
  44.101 -        ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
  44.102 +        regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
  44.103          break;
  44.104      case 4:
  44.105 -        ec->eax = (p->u.data & 0xffffffff);
  44.106 +        regs->eax = (p->u.data & 0xffffffff);
  44.107          break;
  44.108      default:
  44.109          BUG();
    45.1 --- a/xen/arch/x86/vmx_platform.c	Thu Apr 28 13:54:01 2005 +0000
    45.2 +++ b/xen/arch/x86/vmx_platform.c	Fri Apr 29 07:34:47 2005 +0000
    45.3 @@ -39,17 +39,17 @@
    45.4  #define DECODE_failure  0
    45.5  
    45.6  #if defined (__x86_64__)
    45.7 -static void store_xen_regs(struct xen_regs *regs)
    45.8 +static void store_cpu_user_regs(struct cpu_user_regs *regs)
    45.9  {
   45.10  
   45.11  }
   45.12  
   45.13 -static long get_reg_value(int size, int index, int seg, struct xen_regs *regs) 
   45.14 +static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) 
   45.15  {
   45.16      return 0;
   45.17  }
   45.18  #elif defined (__i386__)
   45.19 -static void store_xen_regs(struct xen_regs *regs)
   45.20 +static void store_cpu_user_regs(struct cpu_user_regs *regs)
   45.21  {
   45.22      __vmread(GUEST_SS_SELECTOR, &regs->ss);
   45.23      __vmread(GUEST_ESP, &regs->esp);
   45.24 @@ -60,7 +60,7 @@ static void store_xen_regs(struct xen_re
   45.25      __vmread(GUEST_EIP, &regs->eip);
   45.26  }
   45.27  
   45.28 -static long get_reg_value(int size, int index, int seg, struct xen_regs *regs)
   45.29 +static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
   45.30  {                    
   45.31      /*               
   45.32       * Reference the db_reg[] table
   45.33 @@ -468,7 +468,7 @@ static void send_mmio_req(unsigned long 
   45.34      ioreq_t *p;
   45.35      int vm86;
   45.36      struct mi_per_cpu_info *mpci_p;
   45.37 -    struct xen_regs *inst_decoder_regs;
   45.38 +    struct cpu_user_regs *inst_decoder_regs;
   45.39      extern long evtchn_send(int lport);
   45.40      extern long do_block(void);
   45.41  
   45.42 @@ -528,7 +528,7 @@ void handle_mmio(unsigned long va, unsig
   45.43      unsigned long eip, eflags, cs;
   45.44      unsigned long inst_len, inst_addr;
   45.45      struct mi_per_cpu_info *mpci_p;
   45.46 -    struct xen_regs *inst_decoder_regs;
   45.47 +    struct cpu_user_regs *inst_decoder_regs;
   45.48      struct instruction mmio_inst;
   45.49      unsigned char inst[MAX_INST_LEN];
   45.50      int vm86, ret;
   45.51 @@ -569,7 +569,7 @@ void handle_mmio(unsigned long va, unsig
   45.52          domain_crash_synchronous();
   45.53  
   45.54      __vmwrite(GUEST_EIP, eip + inst_len);
   45.55 -    store_xen_regs(inst_decoder_regs);
   45.56 +    store_cpu_user_regs(inst_decoder_regs);
   45.57  
   45.58      // Only handle "mov" and "movs" instructions!
   45.59      if (!strncmp((char *)mmio_inst.i_name, "movz", 4)) {
    46.1 --- a/xen/arch/x86/vmx_vmcs.c	Thu Apr 28 13:54:01 2005 +0000
    46.2 +++ b/xen/arch/x86/vmx_vmcs.c	Fri Apr 29 07:34:47 2005 +0000
    46.3 @@ -100,7 +100,7 @@ struct host_execution_env {
    46.4  
    46.5  #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
    46.6  
    46.7 -int vmx_setup_platform(struct exec_domain *d, execution_context_t *context)
    46.8 +int vmx_setup_platform(struct exec_domain *d, struct cpu_user_regs *regs)
    46.9  {
   46.10      int i;
   46.11      unsigned int n;
   46.12 @@ -108,15 +108,15 @@ int vmx_setup_platform(struct exec_domai
   46.13      struct e820entry *e820p;
   46.14      unsigned long gpfn = 0;
   46.15  
   46.16 -    context->ebx = 0;   /* Linux expects ebx to be 0 for boot proc */
   46.17 +    regs->ebx = 0;   /* Linux expects ebx to be 0 for boot proc */
   46.18  
   46.19 -    n = context->ecx;
   46.20 +    n = regs->ecx;
   46.21      if (n > 32) {
   46.22          VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n);
   46.23          return -1;
   46.24      }
   46.25  
   46.26 -    addr = context->edi;
   46.27 +    addr = regs->edi;
   46.28      offset = (addr & ~PAGE_MASK);
   46.29      addr = round_pgdown(addr);
   46.30      mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
   46.31 @@ -162,14 +162,14 @@ void vmx_do_launch(struct exec_domain *e
   46.32      struct Xgt_desc_struct desc;
   46.33      unsigned long pfn = 0;
   46.34      struct pfn_info *page;
   46.35 -    execution_context_t *ec = get_execution_context();
   46.36 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   46.37  
   46.38      cpu = smp_processor_id();
   46.39  
   46.40      page = (struct pfn_info *) alloc_domheap_page(NULL);
   46.41      pfn = (unsigned long) (page - frame_table);
   46.42  
   46.43 -    vmx_setup_platform(ed, ec);
   46.44 +    vmx_setup_platform(ed, regs);
   46.45  
   46.46      __asm__ __volatile__ ("sgdt  (%0) \n" :: "a"(&desc) : "memory");
   46.47      host_env.gdtr_limit = desc.size;
   46.48 @@ -202,8 +202,8 @@ void vmx_do_launch(struct exec_domain *e
   46.49   * Initially set the same environement as host.
   46.50   */
   46.51  static inline int 
   46.52 -construct_init_vmcs_guest(execution_context_t *context, 
   46.53 -                          full_execution_context_t *full_context,
   46.54 +construct_init_vmcs_guest(struct cpu_user_regs *regs, 
   46.55 +                          struct vcpu_guest_context *ctxt,
   46.56                            struct host_execution_env *host_env)
   46.57  {
   46.58      int error = 0;
   46.59 @@ -232,12 +232,12 @@ construct_init_vmcs_guest(execution_cont
   46.60      error |= __vmwrite(CR3_TARGET_COUNT, 0);
   46.61  
   46.62      /* Guest Selectors */
   46.63 -    error |= __vmwrite(GUEST_CS_SELECTOR, context->cs);
   46.64 -    error |= __vmwrite(GUEST_ES_SELECTOR, context->es);
   46.65 -    error |= __vmwrite(GUEST_SS_SELECTOR, context->ss);
   46.66 -    error |= __vmwrite(GUEST_DS_SELECTOR, context->ds);
   46.67 -    error |= __vmwrite(GUEST_FS_SELECTOR, context->fs);
   46.68 -    error |= __vmwrite(GUEST_GS_SELECTOR, context->gs);
   46.69 +    error |= __vmwrite(GUEST_CS_SELECTOR, regs->cs);
   46.70 +    error |= __vmwrite(GUEST_ES_SELECTOR, regs->es);
   46.71 +    error |= __vmwrite(GUEST_SS_SELECTOR, regs->ss);
   46.72 +    error |= __vmwrite(GUEST_DS_SELECTOR, regs->ds);
   46.73 +    error |= __vmwrite(GUEST_FS_SELECTOR, regs->fs);
   46.74 +    error |= __vmwrite(GUEST_GS_SELECTOR, regs->gs);
   46.75  
   46.76      /* Guest segment Limits */
   46.77      error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
   46.78 @@ -268,10 +268,10 @@ construct_init_vmcs_guest(execution_cont
   46.79      arbytes.fields.seg_type = 0xb;          /* type = 0xb */
   46.80      error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
   46.81  
   46.82 -    error |= __vmwrite(GUEST_GDTR_BASE, context->edx);
   46.83 -    context->edx = 0;
   46.84 -    error |= __vmwrite(GUEST_GDTR_LIMIT, context->eax);
   46.85 -    context->eax = 0;
   46.86 +    error |= __vmwrite(GUEST_GDTR_BASE, regs->edx);
   46.87 +    regs->edx = 0;
   46.88 +    error |= __vmwrite(GUEST_GDTR_LIMIT, regs->eax);
   46.89 +    regs->eax = 0;
   46.90  
   46.91      arbytes.fields.s = 0;                   /* not code or data segement */
   46.92      arbytes.fields.seg_type = 0x2;          /* LTD */
   46.93 @@ -302,10 +302,10 @@ construct_init_vmcs_guest(execution_cont
   46.94      error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
   46.95      error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
   46.96  
   46.97 -    error |= __vmwrite(GUEST_ESP, context->esp);
   46.98 -    error |= __vmwrite(GUEST_EIP, context->eip);
   46.99 +    error |= __vmwrite(GUEST_ESP, regs->esp);
  46.100 +    error |= __vmwrite(GUEST_EIP, regs->eip);
  46.101  
  46.102 -    eflags = context->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
  46.103 +    eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
  46.104      eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
  46.105  
  46.106      error |= __vmwrite(GUEST_EFLAGS, eflags);
  46.107 @@ -380,8 +380,8 @@ static inline int construct_vmcs_host(st
  46.108   */
  46.109  
  46.110  int construct_vmcs(struct arch_vmx_struct *arch_vmx,
  46.111 -                   execution_context_t *context,
  46.112 -                   full_execution_context_t *full_context,
  46.113 +                   struct cpu_user_regs *regs,
  46.114 +                   struct vcpu_guest_context *ctxt,
  46.115                     int use_host_env)
  46.116  {
  46.117      int error;
  46.118 @@ -415,7 +415,7 @@ int construct_vmcs(struct arch_vmx_struc
  46.119          return -EINVAL;         
  46.120      }
  46.121      /* guest selectors */
  46.122 -    if ((error = construct_init_vmcs_guest(context, full_context, &host_env))) {
  46.123 +    if ((error = construct_init_vmcs_guest(regs, ctxt, &host_env))) {
  46.124          printk("construct_vmcs: construct_vmcs_guest failed\n");
  46.125          return -EINVAL;         
  46.126      }       
    47.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Thu Apr 28 13:54:01 2005 +0000
    47.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Fri Apr 29 07:34:47 2005 +0000
    47.3 @@ -24,38 +24,44 @@
    47.4  
    47.5  void __dummy__(void)
    47.6  {
    47.7 -    OFFSET(XREGS_eax, struct xen_regs, eax);
    47.8 -    OFFSET(XREGS_ebx, struct xen_regs, ebx);
    47.9 -    OFFSET(XREGS_ecx, struct xen_regs, ecx);
   47.10 -    OFFSET(XREGS_edx, struct xen_regs, edx);
   47.11 -    OFFSET(XREGS_esi, struct xen_regs, esi);
   47.12 -    OFFSET(XREGS_edi, struct xen_regs, edi);
   47.13 -    OFFSET(XREGS_esp, struct xen_regs, esp);
   47.14 -    OFFSET(XREGS_ebp, struct xen_regs, ebp);
   47.15 -    OFFSET(XREGS_eip, struct xen_regs, eip);
   47.16 -    OFFSET(XREGS_cs, struct xen_regs, cs);
   47.17 -    OFFSET(XREGS_ds, struct xen_regs, ds);
   47.18 -    OFFSET(XREGS_es, struct xen_regs, es);
   47.19 -    OFFSET(XREGS_fs, struct xen_regs, fs);
   47.20 -    OFFSET(XREGS_gs, struct xen_regs, gs);
   47.21 -    OFFSET(XREGS_ss, struct xen_regs, ss);
   47.22 -    OFFSET(XREGS_eflags, struct xen_regs, eflags);
   47.23 -    OFFSET(XREGS_error_code, struct xen_regs, error_code);
   47.24 -    OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
   47.25 -    OFFSET(XREGS_kernel_sizeof, struct xen_regs, esp);
   47.26 -    DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs));
   47.27 +    OFFSET(UREGS_eax, struct cpu_user_regs, eax);
   47.28 +    OFFSET(UREGS_ebx, struct cpu_user_regs, ebx);
   47.29 +    OFFSET(UREGS_ecx, struct cpu_user_regs, ecx);
   47.30 +    OFFSET(UREGS_edx, struct cpu_user_regs, edx);
   47.31 +    OFFSET(UREGS_esi, struct cpu_user_regs, esi);
   47.32 +    OFFSET(UREGS_edi, struct cpu_user_regs, edi);
   47.33 +    OFFSET(UREGS_esp, struct cpu_user_regs, esp);
   47.34 +    OFFSET(UREGS_ebp, struct cpu_user_regs, ebp);
   47.35 +    OFFSET(UREGS_eip, struct cpu_user_regs, eip);
   47.36 +    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
   47.37 +    OFFSET(UREGS_ds, struct cpu_user_regs, ds);
   47.38 +    OFFSET(UREGS_es, struct cpu_user_regs, es);
   47.39 +    OFFSET(UREGS_fs, struct cpu_user_regs, fs);
   47.40 +    OFFSET(UREGS_gs, struct cpu_user_regs, gs);
   47.41 +    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
   47.42 +    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
   47.43 +    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
   47.44 +    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
   47.45 +    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
   47.46 +    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
   47.47      BLANK();
   47.48  
   47.49      OFFSET(EDOMAIN_processor, struct exec_domain, processor);
   47.50      OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
   47.51 -    OFFSET(EDOMAIN_event_sel, struct exec_domain, arch.event_selector);
   47.52 -    OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address);
   47.53 -    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, arch.failsafe_selector);
   47.54 -    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address);
   47.55      OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
   47.56      OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
   47.57 -    OFFSET(EDOMAIN_kernel_ss, struct exec_domain, arch.kernel_ss);
   47.58 -    OFFSET(EDOMAIN_kernel_sp, struct exec_domain, arch.kernel_sp);
   47.59 +    OFFSET(EDOMAIN_event_sel, struct exec_domain,
   47.60 +           arch.guest_context.event_callback_cs);
   47.61 +    OFFSET(EDOMAIN_event_addr, struct exec_domain, 
   47.62 +           arch.guest_context.event_callback_eip);
   47.63 +    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain,
   47.64 +           arch.guest_context.failsafe_callback_cs);
   47.65 +    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain,
   47.66 +           arch.guest_context.failsafe_callback_eip);
   47.67 +    OFFSET(EDOMAIN_kernel_ss, struct exec_domain,
   47.68 +           arch.guest_context.kernel_ss);
   47.69 +    OFFSET(EDOMAIN_kernel_sp, struct exec_domain,
   47.70 +           arch.guest_context.kernel_sp);
   47.71      BLANK();
   47.72  
   47.73      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
    48.1 --- a/xen/arch/x86/x86_32/call_with_regs.S	Thu Apr 28 13:54:01 2005 +0000
    48.2 +++ b/xen/arch/x86/x86_32/call_with_regs.S	Fri Apr 29 07:34:47 2005 +0000
    48.3 @@ -2,35 +2,35 @@
    48.4  
    48.5  #include <asm/asm-offsets.h>
    48.6  
    48.7 -	// int call_with_registers(void (*f)(struct xen_regs *r)) ->
    48.8 -	// build a xen_regs structure, and then call f with that.
    48.9 +	// int call_with_registers(void (*f)(struct cpu_user_regs *r)) ->
   48.10 +	// build a cpu_user_regs structure, and then call f with that.
   48.11  call_with_registers:
   48.12  	pushf
   48.13 -	subl $XREGS_user_sizeof, %esp
   48.14 -	movl %ebx, XREGS_ebx(%esp)
   48.15 -	movl %ecx, XREGS_ecx(%esp)
   48.16 -	movl %edx, XREGS_edx(%esp)
   48.17 -	movl %esi, XREGS_esi(%esp)
   48.18 -	movl %edi, XREGS_edi(%esp)
   48.19 -	movl %ebp, XREGS_ebp(%esp)
   48.20 -	movl %eax, XREGS_eax(%esp)
   48.21 -	movw $0, XREGS_error_code(%esp)
   48.22 -	movw $0, XREGS_entry_vector(%esp)
   48.23 -	movl XREGS_user_sizeof+4(%esp), %eax
   48.24 -	movl %eax, XREGS_eip(%esp)
   48.25 -	movl %cs, XREGS_cs(%esp)
   48.26 -	movl XREGS_user_sizeof(%esp), %eax
   48.27 -	movl %eax, XREGS_eflags(%esp)
   48.28 -	movl %esp, XREGS_esp(%esp)
   48.29 -	addl $XREGS_user_sizeof+4, XREGS_esp(%esp)
   48.30 -	movl %ss, XREGS_ss(%esp)
   48.31 -	movl %es, XREGS_es(%esp)
   48.32 -	movl %ds, XREGS_ds(%esp)
   48.33 -	movl %fs, XREGS_fs(%esp)
   48.34 -	movl %gs, XREGS_gs(%esp)
   48.35 +	subl $UREGS_user_sizeof, %esp
   48.36 +	movl %ebx, UREGS_ebx(%esp)
   48.37 +	movl %ecx, UREGS_ecx(%esp)
   48.38 +	movl %edx, UREGS_edx(%esp)
   48.39 +	movl %esi, UREGS_esi(%esp)
   48.40 +	movl %edi, UREGS_edi(%esp)
   48.41 +	movl %ebp, UREGS_ebp(%esp)
   48.42 +	movl %eax, UREGS_eax(%esp)
   48.43 +	movw $0, UREGS_error_code(%esp)
   48.44 +	movw $0, UREGS_entry_vector(%esp)
   48.45 +	movl UREGS_user_sizeof+4(%esp), %eax
   48.46 +	movl %eax, UREGS_eip(%esp)
   48.47 +	movl %cs, UREGS_cs(%esp)
   48.48 +	movl UREGS_user_sizeof(%esp), %eax
   48.49 +	movl %eax, UREGS_eflags(%esp)
   48.50 +	movl %esp, UREGS_esp(%esp)
   48.51 +	addl $UREGS_user_sizeof+4, UREGS_esp(%esp)
   48.52 +	movl %ss, UREGS_ss(%esp)
   48.53 +	movl %es, UREGS_es(%esp)
   48.54 +	movl %ds, UREGS_ds(%esp)
   48.55 +	movl %fs, UREGS_fs(%esp)
   48.56 +	movl %gs, UREGS_gs(%esp)
   48.57  
   48.58 -	movl XREGS_user_sizeof+8(%esp), %eax
   48.59 +	movl UREGS_user_sizeof+8(%esp), %eax
   48.60  	pushl %esp
   48.61  	call *%eax
   48.62 -	add $XREGS_user_sizeof + 8, %esp
   48.63 +	add $UREGS_user_sizeof + 8, %esp
   48.64  	ret
    49.1 --- a/xen/arch/x86/x86_32/entry.S	Thu Apr 28 13:54:01 2005 +0000
    49.2 +++ b/xen/arch/x86/x86_32/entry.S	Fri Apr 29 07:34:47 2005 +0000
    49.3 @@ -76,7 +76,7 @@
    49.4   * and we set it to the fixed value.
    49.5   *
    49.6   * We also need the room, especially because orig_eax field is used 
    49.7 - * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
    49.8 + * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
    49.9   *   (10) u32 gs;                 
   49.10   *   (9)  u32 fs;
   49.11   *   (8)  u32 ds;
   49.12 @@ -99,7 +99,7 @@
   49.13          pushl $VMX_MONITOR_EFLAGS; \
   49.14          popf; \
   49.15          subl $(NR_SKIPPED_REGS*4), %esp; \
   49.16 -        movl $0, 0xc(%esp); /* eflags==0 identifies xen_regs as VMX guest */ \
   49.17 +        movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \
   49.18          pushl %eax; \
   49.19          pushl %ebp; \
   49.20          pushl %edi; \
   49.21 @@ -111,7 +111,7 @@
   49.22  ENTRY(vmx_asm_vmexit_handler)
   49.23          /* selectors are restored/saved by VMX */
   49.24          VMX_SAVE_ALL_NOSEGREGS
   49.25 -        call SYMBOL_NAME(vmx_vmexit_handler)
   49.26 +        call vmx_vmexit_handler
   49.27          jmp vmx_asm_do_resume
   49.28  
   49.29  ENTRY(vmx_asm_do_launch)
   49.30 @@ -126,7 +126,7 @@ ENTRY(vmx_asm_do_launch)
   49.31          /* VMLUANCH */
   49.32          .byte 0x0f,0x01,0xc2
   49.33          pushf
   49.34 -        call SYMBOL_NAME(vm_launch_fail)
   49.35 +        call vm_launch_fail
   49.36          hlt
   49.37          
   49.38          ALIGN
   49.39 @@ -141,11 +141,11 @@ vmx_test_all_events:
   49.40  /*test_softirqs:*/  
   49.41          movl EDOMAIN_processor(%ebx),%eax
   49.42          shl  $IRQSTAT_shift,%eax
   49.43 -        test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
   49.44 +        test %ecx,irq_stat(%eax,1)
   49.45          jnz  vmx_process_softirqs
   49.46  
   49.47  vmx_restore_all_guest:
   49.48 -        call SYMBOL_NAME(load_cr2)
   49.49 +        call load_cr2
   49.50          /* 
   49.51           * Check if we are going back to VMX-based VM
   49.52           * By this time, all the setups in the VMCS must be complete.
   49.53 @@ -161,25 +161,25 @@ vmx_restore_all_guest:
   49.54          /* VMRESUME */
   49.55          .byte 0x0f,0x01,0xc3
   49.56          pushf
   49.57 -        call SYMBOL_NAME(vm_resume_fail)
   49.58 +        call vm_resume_fail
   49.59          /* Should never reach here */
   49.60          hlt
   49.61  
   49.62          ALIGN
   49.63  vmx_process_softirqs:
   49.64          sti       
   49.65 -        call SYMBOL_NAME(do_softirq)
   49.66 +        call do_softirq
   49.67          jmp  vmx_test_all_events
   49.68  #endif
   49.69  
   49.70          ALIGN
   49.71  restore_all_guest:
   49.72 -        testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
   49.73 +        testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
   49.74          jnz  restore_all_vm86
   49.75 -FLT1:   movl XREGS_ds(%esp),%ds
   49.76 -FLT2:   movl XREGS_es(%esp),%es
   49.77 -FLT3:   movl XREGS_fs(%esp),%fs
   49.78 -FLT4:   movl XREGS_gs(%esp),%gs
   49.79 +FLT1:   movl UREGS_ds(%esp),%ds
   49.80 +FLT2:   movl UREGS_es(%esp),%es
   49.81 +FLT3:   movl UREGS_fs(%esp),%fs
   49.82 +FLT4:   movl UREGS_gs(%esp),%gs
   49.83  restore_all_vm86:
   49.84          popl %ebx
   49.85          popl %ecx
   49.86 @@ -193,13 +193,13 @@ FLT5:   iret
   49.87  .section .fixup,"ax"
   49.88  FIX5:   subl  $28,%esp
   49.89          pushl 28(%esp)                 # error_code/entry_vector
   49.90 -        movl  %eax,XREGS_eax+4(%esp)
   49.91 -        movl  %ebp,XREGS_ebp+4(%esp)
   49.92 -        movl  %edi,XREGS_edi+4(%esp)
   49.93 -        movl  %esi,XREGS_esi+4(%esp)
   49.94 -        movl  %edx,XREGS_edx+4(%esp)
   49.95 -        movl  %ecx,XREGS_ecx+4(%esp)
   49.96 -        movl  %ebx,XREGS_ebx+4(%esp)
   49.97 +        movl  %eax,UREGS_eax+4(%esp)
   49.98 +        movl  %ebp,UREGS_ebp+4(%esp)
   49.99 +        movl  %edi,UREGS_edi+4(%esp)
  49.100 +        movl  %esi,UREGS_esi+4(%esp)
  49.101 +        movl  %edx,UREGS_edx+4(%esp)
  49.102 +        movl  %ecx,UREGS_ecx+4(%esp)
  49.103 +        movl  %ebx,UREGS_ebx+4(%esp)
  49.104  FIX1:   SET_XEN_SEGMENTS(a)
  49.105          movl  %eax,%fs
  49.106          movl  %eax,%gs
  49.107 @@ -224,10 +224,10 @@ failsafe_callback:
  49.108          movw  $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
  49.109          call  create_bounce_frame
  49.110          xorl  %eax,%eax
  49.111 -        movl  %eax,XREGS_ds(%esp)
  49.112 -        movl  %eax,XREGS_es(%esp)
  49.113 -        movl  %eax,XREGS_fs(%esp)
  49.114 -        movl  %eax,XREGS_gs(%esp)
  49.115 +        movl  %eax,UREGS_ds(%esp)
  49.116 +        movl  %eax,UREGS_es(%esp)
  49.117 +        movl  %eax,UREGS_fs(%esp)
  49.118 +        movl  %eax,UREGS_gs(%esp)
  49.119          jmp   test_all_events
  49.120  .previous
  49.121  .section __pre_ex_table,"a"
  49.122 @@ -261,8 +261,8 @@ ENTRY(hypercall)
  49.123          GET_CURRENT(%ebx)
  49.124          andl $(NR_hypercalls-1),%eax
  49.125          PERFC_INCR(PERFC_hypercalls, %eax)
  49.126 -        call *SYMBOL_NAME(hypercall_table)(,%eax,4)
  49.127 -        movl %eax,XREGS_eax(%esp)       # save the return value
  49.128 +        call *hypercall_table(,%eax,4)
  49.129 +        movl %eax,UREGS_eax(%esp)       # save the return value
  49.130  
  49.131  test_all_events:
  49.132          xorl %ecx,%ecx
  49.133 @@ -271,7 +271,7 @@ test_all_events:
  49.134  /*test_softirqs:*/  
  49.135          movl EDOMAIN_processor(%ebx),%eax
  49.136          shl  $IRQSTAT_shift,%eax
  49.137 -        test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
  49.138 +        test %ecx,irq_stat(%eax,1)
  49.139          jnz  process_softirqs
  49.140  /*test_guest_events:*/
  49.141          movl EDOMAIN_vcpu_info(%ebx),%eax
  49.142 @@ -295,47 +295,47 @@ test_all_events:
  49.143          ALIGN
  49.144  process_softirqs:
  49.145          sti       
  49.146 -        call SYMBOL_NAME(do_softirq)
  49.147 +        call do_softirq
  49.148          jmp  test_all_events
  49.149                  
  49.150  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
  49.151  /*   {EIP, CS, EFLAGS, [ESP, SS]}                                        */
  49.152  /* %edx == trap_bounce, %ebx == struct exec_domain                       */
  49.153 -/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
  49.154 +/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
  49.155  create_bounce_frame:
  49.156 -        movl XREGS_eflags+4(%esp),%ecx
  49.157 -        movb XREGS_cs+4(%esp),%cl
  49.158 +        movl UREGS_eflags+4(%esp),%ecx
  49.159 +        movb UREGS_cs+4(%esp),%cl
  49.160          testl $(2|X86_EFLAGS_VM),%ecx
  49.161          jz   ring1 /* jump if returning to an existing ring-1 activation */
  49.162          movl EDOMAIN_kernel_sp(%ebx),%esi
  49.163  FLT6:   movl EDOMAIN_kernel_ss(%ebx),%gs
  49.164 -        testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
  49.165 +        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
  49.166          jz   nvm86_1
  49.167          subl $16,%esi       /* push ES/DS/FS/GS (VM86 stack frame) */
  49.168 -        movl XREGS_es+4(%esp),%eax
  49.169 +        movl UREGS_es+4(%esp),%eax
  49.170  FLT7:   movl %eax,%gs:(%esi)
  49.171 -        movl XREGS_ds+4(%esp),%eax
  49.172 +        movl UREGS_ds+4(%esp),%eax
  49.173  FLT8:   movl %eax,%gs:4(%esi)
  49.174 -        movl XREGS_fs+4(%esp),%eax
  49.175 +        movl UREGS_fs+4(%esp),%eax
  49.176  FLT9:   movl %eax,%gs:8(%esi)
  49.177 -        movl XREGS_gs+4(%esp),%eax
  49.178 +        movl UREGS_gs+4(%esp),%eax
  49.179  FLT10:  movl %eax,%gs:12(%esi)
  49.180  nvm86_1:subl $8,%esi        /* push SS/ESP (inter-priv iret) */
  49.181 -        movl XREGS_esp+4(%esp),%eax
  49.182 +        movl UREGS_esp+4(%esp),%eax
  49.183  FLT11:  movl %eax,%gs:(%esi) 
  49.184 -        movl XREGS_ss+4(%esp),%eax
  49.185 +        movl UREGS_ss+4(%esp),%eax
  49.186  FLT12:  movl %eax,%gs:4(%esi) 
  49.187          jmp 1f
  49.188  ring1:  /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
  49.189 -        movl XREGS_esp+4(%esp),%esi
  49.190 -FLT13:  movl XREGS_ss+4(%esp),%gs 
  49.191 +        movl UREGS_esp+4(%esp),%esi
  49.192 +FLT13:  movl UREGS_ss+4(%esp),%gs 
  49.193  1:      /* Construct a stack frame: EFLAGS, CS/EIP */
  49.194          subl $12,%esi
  49.195 -        movl XREGS_eip+4(%esp),%eax
  49.196 +        movl UREGS_eip+4(%esp),%eax
  49.197  FLT14:  movl %eax,%gs:(%esi) 
  49.198 -        movl XREGS_cs+4(%esp),%eax
  49.199 +        movl UREGS_cs+4(%esp),%eax
  49.200  FLT15:  movl %eax,%gs:4(%esi) 
  49.201 -        movl XREGS_eflags+4(%esp),%eax
  49.202 +        movl UREGS_eflags+4(%esp),%eax
  49.203  FLT16:  movl %eax,%gs:8(%esi)
  49.204          movb TRAPBOUNCE_flags(%edx),%cl
  49.205          test $TBF_EXCEPTION_ERRCODE,%cl
  49.206 @@ -351,7 +351,7 @@ FLT18:  movl %eax,%gs:(%esi)
  49.207  1:      testb $TBF_FAILSAFE,%cl
  49.208          jz   2f
  49.209          subl $16,%esi                # add DS/ES/FS/GS to failsafe stack frame
  49.210 -        testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
  49.211 +        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
  49.212          jz   nvm86_2
  49.213          xorl %eax,%eax               # VM86: we write zero selector values
  49.214  FLT19:  movl %eax,%gs:(%esi) 
  49.215 @@ -359,30 +359,30 @@ FLT20:  movl %eax,%gs:4(%esi)
  49.216  FLT21:  movl %eax,%gs:8(%esi) 
  49.217  FLT22:  movl %eax,%gs:12(%esi)
  49.218          jmp  2f
  49.219 -nvm86_2:movl XREGS_ds+4(%esp),%eax   # non-VM86: write real selector values
  49.220 +nvm86_2:movl UREGS_ds+4(%esp),%eax   # non-VM86: write real selector values
  49.221  FLT23:  movl %eax,%gs:(%esi) 
  49.222 -        movl XREGS_es+4(%esp),%eax
  49.223 +        movl UREGS_es+4(%esp),%eax
  49.224  FLT24:  movl %eax,%gs:4(%esi)
  49.225 -        movl XREGS_fs+4(%esp),%eax
  49.226 +        movl UREGS_fs+4(%esp),%eax
  49.227  FLT25:  movl %eax,%gs:8(%esi) 
  49.228 -        movl XREGS_gs+4(%esp),%eax
  49.229 +        movl UREGS_gs+4(%esp),%eax
  49.230  FLT26:  movl %eax,%gs:12(%esi)
  49.231 -2:      testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
  49.232 +2:      testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
  49.233          jz   nvm86_3
  49.234          xorl %eax,%eax      /* zero DS-GS, just as a real CPU would */
  49.235 -        movl %eax,XREGS_ds+4(%esp)
  49.236 -        movl %eax,XREGS_es+4(%esp)
  49.237 -        movl %eax,XREGS_fs+4(%esp)
  49.238 -        movl %eax,XREGS_gs+4(%esp)
  49.239 +        movl %eax,UREGS_ds+4(%esp)
  49.240 +        movl %eax,UREGS_es+4(%esp)
  49.241 +        movl %eax,UREGS_fs+4(%esp)
  49.242 +        movl %eax,UREGS_gs+4(%esp)
  49.243  nvm86_3:/* Rewrite our stack frame and return to ring 1. */
  49.244          /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
  49.245 -        andl $0xfffcbeff,XREGS_eflags+4(%esp)
  49.246 -        movl %gs,XREGS_ss+4(%esp)
  49.247 -        movl %esi,XREGS_esp+4(%esp)
  49.248 +        andl $0xfffcbeff,UREGS_eflags+4(%esp)
  49.249 +        movl %gs,UREGS_ss+4(%esp)
  49.250 +        movl %esi,UREGS_esp+4(%esp)
  49.251          movzwl TRAPBOUNCE_cs(%edx),%eax
  49.252 -        movl %eax,XREGS_cs+4(%esp)
  49.253 +        movl %eax,UREGS_cs+4(%esp)
  49.254          movl TRAPBOUNCE_eip(%edx),%eax
  49.255 -        movl %eax,XREGS_eip+4(%esp)
  49.256 +        movl %eax,UREGS_eip+4(%esp)
  49.257          movb $0,TRAPBOUNCE_flags(%edx)
  49.258          ret
  49.259  .section __ex_table,"a"
  49.260 @@ -410,8 +410,8 @@ process_guest_exception_and_events:
  49.261          ALIGN
  49.262  ENTRY(ret_from_intr)
  49.263          GET_CURRENT(%ebx)
  49.264 -        movl  XREGS_eflags(%esp),%eax
  49.265 -        movb  XREGS_cs(%esp),%al
  49.266 +        movl  UREGS_eflags(%esp),%eax
  49.267 +        movb  UREGS_cs(%esp),%al
  49.268          testl $(3|X86_EFLAGS_VM),%eax
  49.269          jnz   test_all_events
  49.270          jmp   restore_all_xen
  49.271 @@ -422,26 +422,26 @@ ENTRY(divide_error)
  49.272  error_code:
  49.273          SAVE_ALL_NOSEGREGS(a)
  49.274          SET_XEN_SEGMENTS(a)
  49.275 -        testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
  49.276 +        testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
  49.277          jz    exception_with_ints_disabled
  49.278          sti                             # re-enable interrupts
  49.279          xorl  %eax,%eax
  49.280 -        movw  XREGS_entry_vector(%esp),%ax
  49.281 +        movw  UREGS_entry_vector(%esp),%ax
  49.282          movl  %esp,%edx
  49.283 -	pushl %edx			# push the xen_regs pointer
  49.284 +	pushl %edx			# push the cpu_user_regs pointer
  49.285  	GET_CURRENT(%ebx)
  49.286          PERFC_INCR(PERFC_exceptions, %eax)
  49.287 -	call  *SYMBOL_NAME(exception_table)(,%eax,4)
  49.288 +	call  *exception_table(,%eax,4)
  49.289          addl  $4,%esp
  49.290 -        movl  XREGS_eflags(%esp),%eax
  49.291 -        movb  XREGS_cs(%esp),%al
  49.292 +        movl  UREGS_eflags(%esp),%eax
  49.293 +        movb  UREGS_cs(%esp),%al
  49.294          testl $(3|X86_EFLAGS_VM),%eax
  49.295  	jz    restore_all_xen
  49.296          jmp   process_guest_exception_and_events
  49.297  
  49.298  exception_with_ints_disabled:
  49.299 -        movl  XREGS_eflags(%esp),%eax
  49.300 -        movb  XREGS_cs(%esp),%al
  49.301 +        movl  UREGS_eflags(%esp),%eax
  49.302 +        movb  UREGS_cs(%esp),%al
  49.303          testl $(3|X86_EFLAGS_VM),%eax   # interrupts disabled outside Xen?
  49.304          jnz   FATAL_exception_with_ints_disabled
  49.305          pushl %esp
  49.306 @@ -449,23 +449,23 @@ exception_with_ints_disabled:
  49.307          addl  $4,%esp
  49.308          testl %eax,%eax                 # no fixup code for faulting EIP?
  49.309          jz    FATAL_exception_with_ints_disabled
  49.310 -        movl  %eax,XREGS_eip(%esp)
  49.311 +        movl  %eax,UREGS_eip(%esp)
  49.312          movl  %esp,%esi
  49.313          subl  $4,%esp
  49.314          movl  %esp,%edi
  49.315 -        movl  $XREGS_kernel_sizeof/4,%ecx
  49.316 +        movl  $UREGS_kernel_sizeof/4,%ecx
  49.317          rep;  movsl                     # make room for error_code/entry_vector
  49.318 -        movl  XREGS_error_code(%esp),%eax # error_code/entry_vector
  49.319 -        movl  %eax,XREGS_kernel_sizeof(%esp)
  49.320 +        movl  UREGS_error_code(%esp),%eax # error_code/entry_vector
  49.321 +        movl  %eax,UREGS_kernel_sizeof(%esp)
  49.322          jmp   restore_all_xen           # return to fixup code
  49.323  
  49.324  FATAL_exception_with_ints_disabled:
  49.325          xorl  %esi,%esi
  49.326 -        movw  XREGS_entry_vector(%esp),%si
  49.327 +        movw  UREGS_entry_vector(%esp),%si
  49.328          movl  %esp,%edx
  49.329 -	pushl %edx			# push the xen_regs pointer
  49.330 +	pushl %edx			# push the cpu_user_regs pointer
  49.331          pushl %esi                      # push the trapnr (entry vector)
  49.332 -        call  SYMBOL_NAME(fatal_trap)
  49.333 +        call  fatal_trap
  49.334          ud2
  49.335                                          
  49.336  ENTRY(coprocessor_error)
  49.337 @@ -557,8 +557,8 @@ ENTRY(nmi)
  49.338          # In all other cases we bail without touching DS-GS, as we have
  49.339          # interrupted an enclosing Xen activation in tricky prologue or
  49.340          # epilogue code.
  49.341 -        movl  XREGS_eflags(%esp),%eax
  49.342 -        movb  XREGS_cs(%esp),%al
  49.343 +        movl  UREGS_eflags(%esp),%eax
  49.344 +        movb  UREGS_cs(%esp),%al
  49.345          testl $(3|X86_EFLAGS_VM),%eax
  49.346          jnz   do_watchdog_tick
  49.347          movl  %ds,%eax
  49.348 @@ -575,7 +575,7 @@ do_watchdog_tick:
  49.349          movl  %esp,%edx
  49.350          pushl %ebx   # reason
  49.351          pushl %edx   # regs
  49.352 -        call  SYMBOL_NAME(do_nmi)
  49.353 +        call  do_nmi
  49.354          addl  $8,%esp
  49.355          jmp   ret_from_intr
  49.356  
  49.357 @@ -595,21 +595,21 @@ nmi_parity_err:
  49.358          andb $0xf,%al
  49.359          orb  $0x4,%al
  49.360          outb %al,$0x61
  49.361 -        cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
  49.362 +        cmpb $'i',%ss:opt_nmi # nmi=ignore
  49.363          je   nmi_out
  49.364 -        bts  $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
  49.365 -        bts  $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
  49.366 -        cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
  49.367 +        bts  $0,%ss:nmi_softirq_reason
  49.368 +        bts  $NMI_SOFTIRQ,%ss:irq_stat
  49.369 +        cmpb $'d',%ss:opt_nmi # nmi=dom0
  49.370          je   nmi_out
  49.371          movl $(__HYPERVISOR_DS),%edx       # nmi=fatal
  49.372          movl %edx,%ds
  49.373          movl %edx,%es
  49.374          movl %esp,%edx
  49.375          push %edx
  49.376 -        call SYMBOL_NAME(mem_parity_error)
  49.377 +        call mem_parity_error
  49.378          addl $4,%esp
  49.379 -nmi_out:movl  %ss:XREGS_eflags(%esp),%eax
  49.380 -        movb  %ss:XREGS_cs(%esp),%al
  49.381 +nmi_out:movl  %ss:UREGS_eflags(%esp),%eax
  49.382 +        movb  %ss:UREGS_cs(%esp),%al
  49.383          testl $(3|X86_EFLAGS_VM),%eax
  49.384          jz    restore_all_xen
  49.385          movl  $(__HYPERVISOR_DS),%edx
  49.386 @@ -623,18 +623,18 @@ nmi_io_err:
  49.387          andb $0xf,%al
  49.388          orb  $0x8,%al
  49.389          outb %al,$0x61
  49.390 -        cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
  49.391 +        cmpb $'i',%ss:opt_nmi # nmi=ignore
  49.392          je   nmi_out
  49.393 -        bts  $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
  49.394 -        bts  $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
  49.395 -        cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
  49.396 +        bts  $1,%ss:nmi_softirq_reason
  49.397 +        bts  $NMI_SOFTIRQ,%ss:irq_stat
  49.398 +        cmpb $'d',%ss:opt_nmi # nmi=dom0
  49.399          je   nmi_out
  49.400          movl $(__HYPERVISOR_DS),%edx       # nmi=fatal
  49.401          movl %edx,%ds
  49.402          movl %edx,%es
  49.403          movl %esp,%edx
  49.404          push %edx
  49.405 -        call SYMBOL_NAME(io_check_error)                        
  49.406 +        call io_check_error                        
  49.407          addl $4,%esp
  49.408          jmp  nmi_out
  49.409  
  49.410 @@ -652,32 +652,38 @@ ENTRY(setup_vm86_frame)
  49.411          addl $16,%esp
  49.412          ret
  49.413  
  49.414 +do_arch_sched_op:
  49.415 +        # Ensure we return success even if we return via schedule_tail()
  49.416 +        xorl %eax,%eax
  49.417 +        movl %eax,UREGS_eax+4(%esp)
  49.418 +        jmp  do_sched_op
  49.419 +
  49.420  do_switch_vm86:
  49.421          # Discard the return address
  49.422          addl $4,%esp
  49.423  
  49.424          # GS:ESI == Ring-1 stack activation
  49.425 -        movl XREGS_esp(%esp),%esi
  49.426 -VFLT1:  movl XREGS_ss(%esp),%gs
  49.427 +        movl UREGS_esp(%esp),%esi
  49.428 +VFLT1:  movl UREGS_ss(%esp),%gs
  49.429  
  49.430          # ES:EDI == Ring-0 stack activation
  49.431 -        leal XREGS_eip(%esp),%edi
  49.432 +        leal UREGS_eip(%esp),%edi
  49.433  
  49.434          # Restore the hypercall-number-clobbered EAX on our stack frame
  49.435  VFLT2:  movl %gs:(%esi),%eax
  49.436 -        movl %eax,XREGS_eax(%esp)
  49.437 +        movl %eax,UREGS_eax(%esp)
  49.438          addl $4,%esi
  49.439          	
  49.440        	# Copy the VM86 activation from the ring-1 stack to the ring-0 stack
  49.441 -        movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
  49.442 +        movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx
  49.443  VFLT3:  movl %gs:(%esi),%eax
  49.444          stosl
  49.445          addl $4,%esi
  49.446          loop VFLT3
  49.447  
  49.448          # Fix up EFLAGS: IOPL=0, IF=1, VM=1
  49.449 -        andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
  49.450 -        orl  $X86_EFLAGS_IF|X86_EFLAGS_VM,XREGS_eflags(%esp)
  49.451 +        andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp)
  49.452 +        orl  $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp)
  49.453          
  49.454          jmp test_all_events
  49.455  
  49.456 @@ -690,55 +696,55 @@ VFLT3:  movl %gs:(%esi),%eax
  49.457  .data
  49.458  
  49.459  ENTRY(exception_table)
  49.460 -        .long SYMBOL_NAME(do_divide_error)
  49.461 -        .long SYMBOL_NAME(do_debug)
  49.462 +        .long do_divide_error
  49.463 +        .long do_debug
  49.464          .long 0 # nmi
  49.465 -        .long SYMBOL_NAME(do_int3)
  49.466 -        .long SYMBOL_NAME(do_overflow)
  49.467 -        .long SYMBOL_NAME(do_bounds)
  49.468 -        .long SYMBOL_NAME(do_invalid_op)
  49.469 -        .long SYMBOL_NAME(math_state_restore)
  49.470 +        .long do_int3
  49.471 +        .long do_overflow
  49.472 +        .long do_bounds
  49.473 +        .long do_invalid_op
  49.474 +        .long math_state_restore
  49.475          .long 0 # double fault
  49.476 -        .long SYMBOL_NAME(do_coprocessor_segment_overrun)
  49.477 -        .long SYMBOL_NAME(do_invalid_TSS)
  49.478 -        .long SYMBOL_NAME(do_segment_not_present)
  49.479 -        .long SYMBOL_NAME(do_stack_segment)
  49.480 -        .long SYMBOL_NAME(do_general_protection)
  49.481 -        .long SYMBOL_NAME(do_page_fault)
  49.482 -        .long SYMBOL_NAME(do_spurious_interrupt_bug)
  49.483 -        .long SYMBOL_NAME(do_coprocessor_error)
  49.484 -        .long SYMBOL_NAME(do_alignment_check)
  49.485 -        .long SYMBOL_NAME(do_machine_check)
  49.486 -        .long SYMBOL_NAME(do_simd_coprocessor_error)
  49.487 +        .long do_coprocessor_segment_overrun
  49.488 +        .long do_invalid_TSS
  49.489 +        .long do_segment_not_present
  49.490 +        .long do_stack_segment
  49.491 +        .long do_general_protection
  49.492 +        .long do_page_fault
  49.493 +        .long do_spurious_interrupt_bug
  49.494 +        .long do_coprocessor_error
  49.495 +        .long do_alignment_check
  49.496 +        .long do_machine_check
  49.497 +        .long do_simd_coprocessor_error
  49.498  
  49.499  ENTRY(hypercall_table)
  49.500 -        .long SYMBOL_NAME(do_set_trap_table)     /*  0 */
  49.501 -        .long SYMBOL_NAME(do_mmu_update)
  49.502 -        .long SYMBOL_NAME(do_set_gdt)
  49.503 -        .long SYMBOL_NAME(do_stack_switch)
  49.504 -        .long SYMBOL_NAME(do_set_callbacks)
  49.505 -        .long SYMBOL_NAME(do_fpu_taskswitch)     /*  5 */
  49.506 -        .long SYMBOL_NAME(do_sched_op)
  49.507 -        .long SYMBOL_NAME(do_dom0_op)
  49.508 -        .long SYMBOL_NAME(do_set_debugreg)
  49.509 -        .long SYMBOL_NAME(do_get_debugreg)
  49.510 -        .long SYMBOL_NAME(do_update_descriptor)  /* 10 */
  49.511 -        .long SYMBOL_NAME(do_set_fast_trap)
  49.512 -        .long SYMBOL_NAME(do_dom_mem_op)
  49.513 -        .long SYMBOL_NAME(do_multicall)
  49.514 -        .long SYMBOL_NAME(do_update_va_mapping)
  49.515 -        .long SYMBOL_NAME(do_set_timer_op)       /* 15 */
  49.516 -        .long SYMBOL_NAME(do_event_channel_op)
  49.517 -        .long SYMBOL_NAME(do_xen_version)
  49.518 -        .long SYMBOL_NAME(do_console_io)
  49.519 -        .long SYMBOL_NAME(do_physdev_op)
  49.520 -        .long SYMBOL_NAME(do_grant_table_op)     /* 20 */
  49.521 -        .long SYMBOL_NAME(do_vm_assist)
  49.522 -        .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
  49.523 -        .long SYMBOL_NAME(do_switch_vm86)
  49.524 -        .long SYMBOL_NAME(do_boot_vcpu)
  49.525 -        .long SYMBOL_NAME(do_ni_hypercall)       /* 25 */
  49.526 -        .long SYMBOL_NAME(do_mmuext_op)
  49.527 +        .long do_set_trap_table     /*  0 */
  49.528 +        .long do_mmu_update
  49.529 +        .long do_set_gdt
  49.530 +        .long do_stack_switch
  49.531 +        .long do_set_callbacks
  49.532 +        .long do_fpu_taskswitch     /*  5 */
  49.533 +        .long do_arch_sched_op
  49.534 +        .long do_dom0_op
  49.535 +        .long do_set_debugreg
  49.536 +        .long do_get_debugreg
  49.537 +        .long do_update_descriptor  /* 10 */
  49.538 +        .long do_set_fast_trap
  49.539 +        .long do_dom_mem_op
  49.540 +        .long do_multicall
  49.541 +        .long do_update_va_mapping
  49.542 +        .long do_set_timer_op       /* 15 */
  49.543 +        .long do_event_channel_op
  49.544 +        .long do_xen_version
  49.545 +        .long do_console_io
  49.546 +        .long do_physdev_op
  49.547 +        .long do_grant_table_op     /* 20 */
  49.548 +        .long do_vm_assist
  49.549 +        .long do_update_va_mapping_otherdomain
  49.550 +        .long do_switch_vm86
  49.551 +        .long do_boot_vcpu
  49.552 +        .long do_ni_hypercall       /* 25 */
  49.553 +        .long do_mmuext_op
  49.554          .rept NR_hypercalls-((.-hypercall_table)/4)
  49.555 -        .long SYMBOL_NAME(do_ni_hypercall)
  49.556 +        .long do_ni_hypercall
  49.557          .endr
    50.1 --- a/xen/arch/x86/x86_32/mm.c	Thu Apr 28 13:54:01 2005 +0000
    50.2 +++ b/xen/arch/x86/x86_32/mm.c	Fri Apr 29 07:34:47 2005 +0000
    50.3 @@ -188,8 +188,8 @@ long do_stack_switch(unsigned long ss, u
    50.4      if ( (ss & 3) != 1 )
    50.5          return -EPERM;
    50.6  
    50.7 -    current->arch.kernel_ss = ss;
    50.8 -    current->arch.kernel_sp = esp;
    50.9 +    current->arch.guest_context.kernel_ss = ss;
   50.10 +    current->arch.guest_context.kernel_sp = esp;
   50.11      t->ss1  = ss;
   50.12      t->esp1 = esp;
   50.13  
    51.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Thu Apr 28 13:54:01 2005 +0000
    51.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Fri Apr 29 07:34:47 2005 +0000
    51.3 @@ -115,7 +115,7 @@ int get_baselimit(u16 seg, unsigned long
    51.4      if ( ldt )
    51.5      {
    51.6          table = (unsigned long *)LDT_VIRT_START(d);
    51.7 -        if ( idx >= d->arch.ldt_ents )
    51.8 +        if ( idx >= d->arch.guest_context.ldt_ents )
    51.9              goto fail;
   51.10      }
   51.11      else /* gdt */
   51.12 @@ -181,7 +181,7 @@ int fixup_seg(u16 seg, unsigned long off
   51.13      if ( ldt )
   51.14      {
   51.15          table = (unsigned long *)LDT_VIRT_START(d);
   51.16 -        if ( idx >= d->arch.ldt_ents )
   51.17 +        if ( idx >= d->arch.guest_context.ldt_ents )
   51.18          {
   51.19              DPRINTK("Segment %04x out of LDT range (%ld)\n",
   51.20                      seg, d->arch.ldt_ents);
   51.21 @@ -263,7 +263,7 @@ int fixup_seg(u16 seg, unsigned long off
   51.22   * Called from the general-protection fault handler to attempt to decode
   51.23   * and emulate an instruction that depends on 4GB segments.
   51.24   */
   51.25 -int gpf_emulate_4gb(struct xen_regs *regs)
   51.26 +int gpf_emulate_4gb(struct cpu_user_regs *regs)
   51.27  {
   51.28      struct exec_domain *d = current;
   51.29      trap_info_t   *ti;
   51.30 @@ -449,7 +449,7 @@ int gpf_emulate_4gb(struct xen_regs *reg
   51.31      /* If requested, give a callback on otherwise unused vector 15. */
   51.32      if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
   51.33      {
   51.34 -        ti  = &d->arch.traps[15];
   51.35 +        ti  = &d->arch.guest_context.trap_ctxt[15];
   51.36          tb  = &d->arch.trap_bounce;
   51.37          tb->flags      = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
   51.38          tb->error_code = pb - eip;
    52.1 --- a/xen/arch/x86/x86_32/traps.c	Thu Apr 28 13:54:01 2005 +0000
    52.2 +++ b/xen/arch/x86/x86_32/traps.c	Fri Apr 29 07:34:47 2005 +0000
    52.3 @@ -29,9 +29,10 @@ static inline int kernel_text_address(un
    52.4  void show_guest_stack(void)
    52.5  {
    52.6      int i;
    52.7 -    execution_context_t *ec = get_execution_context();
    52.8 -    unsigned long *stack = (unsigned long *)ec->esp;
    52.9 -    printk("Guest EIP is %08x\n   ", ec->eip);
   52.10 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   52.11 +    unsigned long *stack = (unsigned long *)regs->esp;
   52.12 +
   52.13 +    printk("Guest EIP is %08x\n   ", regs->eip);
   52.14  
   52.15      for ( i = 0; i < kstack_depth_to_print; i++ )
   52.16      {
   52.17 @@ -89,7 +90,7 @@ void show_stack(unsigned long *esp)
   52.18      show_trace( esp );
   52.19  }
   52.20  
   52.21 -void show_registers(struct xen_regs *regs)
   52.22 +void show_registers(struct cpu_user_regs *regs)
   52.23  {
   52.24      unsigned long ss, ds, es, fs, gs, cs;
   52.25      unsigned long eip, esp, eflags;
   52.26 @@ -215,9 +216,9 @@ asmlinkage void do_double_fault(void)
   52.27  }
   52.28  
   52.29  BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
   52.30 -asmlinkage void smp_deferred_nmi(struct xen_regs regs)
   52.31 +asmlinkage void smp_deferred_nmi(struct cpu_user_regs regs)
   52.32  {
   52.33 -    asmlinkage void do_nmi(struct xen_regs *, unsigned long);
   52.34 +    asmlinkage void do_nmi(struct cpu_user_regs *, unsigned long);
   52.35      ack_APIC_irq();
   52.36      do_nmi(&regs, 0);
   52.37  }
   52.38 @@ -280,7 +281,7 @@ long set_fast_trap(struct exec_domain *p
   52.39      if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 
   52.40          return -1;
   52.41  
   52.42 -    ti = p->arch.traps + idx;
   52.43 +    ti = &p->arch.guest_context.trap_ctxt[idx];
   52.44  
   52.45      /*
   52.46       * We can't virtualise interrupt gates, as there's no way to get
   52.47 @@ -292,7 +293,7 @@ long set_fast_trap(struct exec_domain *p
   52.48      if ( p == current )
   52.49          CLEAR_FAST_TRAP(&p->arch);
   52.50  
   52.51 -    p->arch.fast_trap_idx    = idx;
   52.52 +    p->arch.guest_context.fast_trap_idx = idx;
   52.53      p->arch.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
   52.54      p->arch.fast_trap_desc.b = 
   52.55          (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13;
   52.56 @@ -319,10 +320,10 @@ long do_set_callbacks(unsigned long even
   52.57      if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
   52.58          return -EPERM;
   52.59  
   52.60 -    d->arch.event_selector    = event_selector;
   52.61 -    d->arch.event_address     = event_address;
   52.62 -    d->arch.failsafe_selector = failsafe_selector;
   52.63 -    d->arch.failsafe_address  = failsafe_address;
   52.64 +    d->arch.guest_context.event_callback_cs     = event_selector;
   52.65 +    d->arch.guest_context.event_callback_eip    = event_address;
   52.66 +    d->arch.guest_context.failsafe_callback_cs  = failsafe_selector;
   52.67 +    d->arch.guest_context.failsafe_callback_eip = failsafe_address;
   52.68  
   52.69      return 0;
   52.70  }
    53.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Thu Apr 28 13:54:01 2005 +0000
    53.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Fri Apr 29 07:34:47 2005 +0000
    53.3 @@ -24,40 +24,44 @@
    53.4  
    53.5  void __dummy__(void)
    53.6  {
    53.7 -    OFFSET(XREGS_r15, struct xen_regs, r15);
    53.8 -    OFFSET(XREGS_r14, struct xen_regs, r14);
    53.9 -    OFFSET(XREGS_r13, struct xen_regs, r13);
   53.10 -    OFFSET(XREGS_r12, struct xen_regs, r12);
   53.11 -    OFFSET(XREGS_rbp, struct xen_regs, rbp);
   53.12 -    OFFSET(XREGS_rbx, struct xen_regs, rbx);
   53.13 -    OFFSET(XREGS_r11, struct xen_regs, r11);
   53.14 -    OFFSET(XREGS_r10, struct xen_regs, r10);
   53.15 -    OFFSET(XREGS_r9, struct xen_regs, r9);
   53.16 -    OFFSET(XREGS_r8, struct xen_regs, r8);
   53.17 -    OFFSET(XREGS_rax, struct xen_regs, rax);
   53.18 -    OFFSET(XREGS_rcx, struct xen_regs, rcx);
   53.19 -    OFFSET(XREGS_rdx, struct xen_regs, rdx);
   53.20 -    OFFSET(XREGS_rsi, struct xen_regs, rsi);
   53.21 -    OFFSET(XREGS_rdi, struct xen_regs, rdi);
   53.22 -    OFFSET(XREGS_error_code, struct xen_regs, error_code);
   53.23 -    OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
   53.24 -    OFFSET(XREGS_rip, struct xen_regs, rip);
   53.25 -    OFFSET(XREGS_cs, struct xen_regs, cs);
   53.26 -    OFFSET(XREGS_eflags, struct xen_regs, eflags);
   53.27 -    OFFSET(XREGS_rsp, struct xen_regs, rsp);
   53.28 -    OFFSET(XREGS_ss, struct xen_regs, ss);
   53.29 -    OFFSET(XREGS_kernel_sizeof, struct xen_regs, es);
   53.30 -    DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs));
   53.31 +    OFFSET(UREGS_r15, struct cpu_user_regs, r15);
   53.32 +    OFFSET(UREGS_r14, struct cpu_user_regs, r14);
   53.33 +    OFFSET(UREGS_r13, struct cpu_user_regs, r13);
   53.34 +    OFFSET(UREGS_r12, struct cpu_user_regs, r12);
   53.35 +    OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
   53.36 +    OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
   53.37 +    OFFSET(UREGS_r11, struct cpu_user_regs, r11);
   53.38 +    OFFSET(UREGS_r10, struct cpu_user_regs, r10);
   53.39 +    OFFSET(UREGS_r9, struct cpu_user_regs, r9);
   53.40 +    OFFSET(UREGS_r8, struct cpu_user_regs, r8);
   53.41 +    OFFSET(UREGS_rax, struct cpu_user_regs, rax);
   53.42 +    OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
   53.43 +    OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
   53.44 +    OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
   53.45 +    OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
   53.46 +    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
   53.47 +    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
   53.48 +    OFFSET(UREGS_rip, struct cpu_user_regs, rip);
   53.49 +    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
   53.50 +    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
   53.51 +    OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
   53.52 +    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
   53.53 +    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
   53.54 +    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
   53.55      BLANK();
   53.56  
   53.57      OFFSET(EDOMAIN_processor, struct exec_domain, processor);
   53.58      OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
   53.59 -    OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address);
   53.60 -    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address);
   53.61 -    OFFSET(EDOMAIN_syscall_addr, struct exec_domain, arch.syscall_address);
   53.62      OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
   53.63      OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
   53.64 -    OFFSET(EDOMAIN_kernel_sp, struct exec_domain, arch.kernel_sp);
   53.65 +    OFFSET(EDOMAIN_event_addr, struct exec_domain,
   53.66 +           arch.guest_context.event_callback_eip);
   53.67 +    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain,
   53.68 +           arch.guest_context.failsafe_callback_eip);
   53.69 +    OFFSET(EDOMAIN_syscall_addr, struct exec_domain,
   53.70 +           arch.guest_context.syscall_callback_eip);
   53.71 +    OFFSET(EDOMAIN_kernel_sp, struct exec_domain,
   53.72 +           arch.guest_context.kernel_sp);
   53.73      BLANK();
   53.74  
   53.75      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
    54.1 --- a/xen/arch/x86/x86_64/entry.S	Thu Apr 28 13:54:01 2005 +0000
    54.2 +++ b/xen/arch/x86/x86_64/entry.S	Fri Apr 29 07:34:47 2005 +0000
    54.3 @@ -120,10 +120,10 @@ ENTRY(syscall_enter)
    54.4          sti
    54.5          movq  %r10,%rcx
    54.6          andq  $(NR_hypercalls-1),%rax
    54.7 -        leaq  SYMBOL_NAME(hypercall_table)(%rip),%r10
    54.8 +        leaq  hypercall_table(%rip),%r10
    54.9          PERFC_INCR(PERFC_hypercalls, %rax)
   54.10          callq *(%r10,%rax,8)
   54.11 -        movq %rax,XREGS_rax(%rsp)       # save the return value
   54.12 +        movq %rax,UREGS_rax(%rsp)       # save the return value
   54.13  
   54.14  /* %rbx: struct exec_domain */
   54.15  test_all_events:
   54.16 @@ -131,7 +131,7 @@ test_all_events:
   54.17  /*test_softirqs:*/  
   54.18          movl  EDOMAIN_processor(%rbx),%eax
   54.19          shl   $IRQSTAT_shift,%rax
   54.20 -        leaq  SYMBOL_NAME(irq_stat)(%rip),%rcx
   54.21 +        leaq  irq_stat(%rip),%rcx
   54.22          testl $~0,(%rcx,%rax,1)
   54.23          jnz   process_softirqs
   54.24  /*test_guest_events:*/
   54.25 @@ -160,10 +160,7 @@ test_all_events:
   54.26   * and we set it to the fixed value.
   54.27   *
   54.28   * We also need the room, especially because orig_eax field is used 
   54.29 - * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
   54.30 - *   (13) u64 gs_base_user;                 
   54.31 - *   (12) u64 gs_base_kernel;                 
   54.32 - *   (11) u64 fs_base;                 
   54.33 + * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
   54.34   *   (10) u64 gs;                 
   54.35   *   (9)  u64 fs;
   54.36   *   (8)  u64 ds;
   54.37 @@ -176,9 +173,6 @@ test_all_events:
   54.38   *   (2)  u64 rip;
   54.39   * (2/1)  u32 entry_vector;
   54.40   * (1/1)  u32 error_code;
   54.41 - * However, get_stack_bottom() actually returns 64 bytes before the real
   54.42 - * bottom of the stack to allow space for:
   54.43 - * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
   54.44   */
   54.45  #define VMX_MONITOR_RFLAGS	0x202 /* IF on */
   54.46  #define NR_SKIPPED_REGS	6	/* See the above explanation */
   54.47 @@ -205,7 +199,7 @@ test_all_events:
   54.48  ENTRY(vmx_asm_vmexit_handler)
   54.49          /* selectors are restored/saved by VMX */
   54.50          VMX_SAVE_ALL_NOSEGREGS
   54.51 -        call SYMBOL_NAME(vmx_vmexit_handler)
   54.52 +        call vmx_vmexit_handler
   54.53          jmp vmx_asm_do_resume
   54.54  
   54.55  ENTRY(vmx_asm_do_launch)
   54.56 @@ -228,7 +222,7 @@ ENTRY(vmx_asm_do_launch)
   54.57          /* VMLUANCH */
   54.58          .byte 0x0f,0x01,0xc2
   54.59          pushfq
   54.60 -        call SYMBOL_NAME(vm_launch_fail)
   54.61 +        call vm_launch_fail
   54.62          hlt
   54.63          
   54.64          ALIGN
   54.65 @@ -241,12 +235,12 @@ vmx_test_all_events:
   54.66  /*test_softirqs:*/  
   54.67          movl  EDOMAIN_processor(%rbx),%eax
   54.68          shl   $IRQSTAT_shift,%rax
   54.69 -        leaq  SYMBOL_NAME(irq_stat)(%rip), %rdx
   54.70 +        leaq  irq_stat(%rip), %rdx
   54.71          testl $~0,(%rdx,%rax,1)
   54.72          jnz   vmx_process_softirqs
   54.73  
   54.74  vmx_restore_all_guest:
   54.75 -        call SYMBOL_NAME(load_cr2)
   54.76 +        call load_cr2
   54.77          /* 
   54.78           * Check if we are going back to VMX-based VM
   54.79           * By this time, all the setups in the VMCS must be complete.
   54.80 @@ -270,14 +264,14 @@ vmx_restore_all_guest:
   54.81          /* VMRESUME */
   54.82          .byte 0x0f,0x01,0xc3
   54.83          pushfq
   54.84 -        call SYMBOL_NAME(vm_resume_fail)
   54.85 +        call vm_resume_fail
   54.86          /* Should never reach here */
   54.87          hlt
   54.88  
   54.89          ALIGN
   54.90  vmx_process_softirqs:
   54.91          sti       
   54.92 -        call SYMBOL_NAME(do_softirq)
   54.93 +        call do_softirq
   54.94          jmp  vmx_test_all_events
   54.95  #endif
   54.96  
   54.97 @@ -285,7 +279,7 @@ vmx_process_softirqs:
   54.98  /* %rbx: struct exec_domain */
   54.99  process_softirqs:
  54.100          sti       
  54.101 -        call SYMBOL_NAME(do_softirq)
  54.102 +        call do_softirq
  54.103          jmp  test_all_events
  54.104  
  54.105  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK:                     */
  54.106 @@ -298,13 +292,13 @@ create_bounce_frame:
  54.107          /* Push new frame at registered guest-OS stack base. */
  54.108          pushq %rdx
  54.109          movq  %rbx,%rdi
  54.110 -        call  SYMBOL_NAME(toggle_guest_mode)
  54.111 +        call  toggle_guest_mode
  54.112          popq  %rdx
  54.113          movq  EDOMAIN_kernel_sp(%rbx),%rsi
  54.114          jmp   2f
  54.115  1:      /* In kernel context already: push new frame at existing %rsp. */
  54.116 -        movq  XREGS_rsp+8(%rsp),%rsi
  54.117 -        andb  $0xfc,XREGS_cs+8(%rsp)    # Indicate kernel context to guest.
  54.118 +        movq  UREGS_rsp+8(%rsp),%rsi
  54.119 +        andb  $0xfc,UREGS_cs+8(%rsp)    # Indicate kernel context to guest.
  54.120  2:      movq  $HYPERVISOR_VIRT_START,%rax
  54.121          cmpq  %rax,%rsi
  54.122          jb    1f                        # In +ve address space? Then okay.
  54.123 @@ -312,15 +306,15 @@ 2:      movq  $HYPERVISOR_VIRT_START,%ra
  54.124          cmpq  %rax,%rsi
  54.125          jb    domain_crash_synchronous  # Above Xen private area? Then okay.
  54.126  1:      subq  $40,%rsi
  54.127 -        movq  XREGS_ss+8(%rsp),%rax
  54.128 +        movq  UREGS_ss+8(%rsp),%rax
  54.129  FLT2:   movq  %rax,32(%rsi)             # SS
  54.130 -        movq  XREGS_rsp+8(%rsp),%rax
  54.131 +        movq  UREGS_rsp+8(%rsp),%rax
  54.132  FLT3:   movq  %rax,24(%rsi)             # RSP
  54.133 -        movq  XREGS_eflags+8(%rsp),%rax
  54.134 +        movq  UREGS_eflags+8(%rsp),%rax
  54.135  FLT4:   movq  %rax,16(%rsi)             # RFLAGS
  54.136 -        movq  XREGS_cs+8(%rsp),%rax
  54.137 +        movq  UREGS_cs+8(%rsp),%rax
  54.138  FLT5:   movq  %rax,8(%rsi)              # CS
  54.139 -        movq  XREGS_rip+8(%rsp),%rax
  54.140 +        movq  UREGS_rip+8(%rsp),%rax
  54.141  FLT6:   movq  %rax,(%rsi)               # RIP
  54.142          movb  TRAPBOUNCE_flags(%rdx),%cl
  54.143          testb $TBF_EXCEPTION_ERRCODE,%cl
  54.144 @@ -345,19 +339,19 @@ FLT11:  movq  %rax,8(%rsi)              
  54.145          movl  %ds,%eax
  54.146  FLT12:  movq  %rax,(%rsi)               # DS
  54.147  2:      subq  $16,%rsi
  54.148 -        movq  XREGS_r11+8(%rsp),%rax
  54.149 +        movq  UREGS_r11+8(%rsp),%rax
  54.150  FLT13:  movq  %rax,8(%rsi)              # R11
  54.151 -        movq  XREGS_rcx+8(%rsp),%rax
  54.152 +        movq  UREGS_rcx+8(%rsp),%rax
  54.153  FLT14:  movq  %rax,(%rsi)               # RCX
  54.154          /* Rewrite our stack frame and return to guest-OS mode. */
  54.155          /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
  54.156 -        movq  $TRAP_syscall,XREGS_entry_vector+8(%rsp)
  54.157 -        andl  $0xfffcbeff,XREGS_eflags+8(%rsp)
  54.158 -        movq  $__GUEST_SS,XREGS_ss+8(%rsp)
  54.159 -        movq  %rsi,XREGS_rsp+8(%rsp)
  54.160 -        movq  $__GUEST_CS,XREGS_cs+8(%rsp)
  54.161 +        movq  $TRAP_syscall,UREGS_entry_vector+8(%rsp)
  54.162 +        andl  $0xfffcbeff,UREGS_eflags+8(%rsp)
  54.163 +        movq  $__GUEST_SS,UREGS_ss+8(%rsp)
  54.164 +        movq  %rsi,UREGS_rsp+8(%rsp)
  54.165 +        movq  $__GUEST_CS,UREGS_cs+8(%rsp)
  54.166          movq  TRAPBOUNCE_eip(%rdx),%rax
  54.167 -        movq  %rax,XREGS_rip+8(%rsp)
  54.168 +        movq  %rax,UREGS_rip+8(%rsp)
  54.169          movb  $0,TRAPBOUNCE_flags(%rdx)
  54.170          ret
  54.171  .section __ex_table,"a"
  54.172 @@ -383,7 +377,7 @@ process_guest_exception_and_events:
  54.173  /* No special register assumptions. */
  54.174  ENTRY(ret_from_intr)
  54.175          GET_CURRENT(%rbx)
  54.176 -        testb $3,XREGS_cs(%rsp)
  54.177 +        testb $3,UREGS_cs(%rsp)
  54.178          jnz   test_all_events
  54.179          jmp   restore_all_xen
  54.180  
  54.181 @@ -391,45 +385,45 @@ ENTRY(ret_from_intr)
  54.182  /* No special register assumptions. */
  54.183  error_code:
  54.184          SAVE_ALL
  54.185 -        testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%rsp)
  54.186 +        testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
  54.187          jz    exception_with_ints_disabled
  54.188          sti
  54.189          movq  %rsp,%rdi
  54.190 -        movl  XREGS_entry_vector(%rsp),%eax
  54.191 -        leaq  SYMBOL_NAME(exception_table)(%rip),%rdx
  54.192 +        movl  UREGS_entry_vector(%rsp),%eax
  54.193 +        leaq  exception_table(%rip),%rdx
  54.194          GET_CURRENT(%rbx)
  54.195          PERFC_INCR(PERFC_exceptions, %rax)
  54.196          callq *(%rdx,%rax,8)
  54.197 -        testb $3,XREGS_cs(%rsp)
  54.198 +        testb $3,UREGS_cs(%rsp)
  54.199          jz    restore_all_xen
  54.200          jmp   process_guest_exception_and_events
  54.201  
  54.202  /* No special register assumptions. */
  54.203  exception_with_ints_disabled:
  54.204 -        testb $3,XREGS_cs(%rsp)         # interrupts disabled outside Xen?
  54.205 +        testb $3,UREGS_cs(%rsp)         # interrupts disabled outside Xen?
  54.206          jnz   FATAL_exception_with_ints_disabled
  54.207          movq  %rsp,%rdi
  54.208          call  search_pre_exception_table
  54.209          testq %rax,%rax                 # no fixup code for faulting EIP?
  54.210          jz    FATAL_exception_with_ints_disabled
  54.211 -        movq  %rax,XREGS_rip(%rsp)
  54.212 -        subq  $8,XREGS_rsp(%rsp)        # add ec/ev to previous stack frame
  54.213 -        testb $15,XREGS_rsp(%rsp)       # return %rsp is now aligned?
  54.214 +        movq  %rax,UREGS_rip(%rsp)
  54.215 +        subq  $8,UREGS_rsp(%rsp)        # add ec/ev to previous stack frame
  54.216 +        testb $15,UREGS_rsp(%rsp)       # return %rsp is now aligned?
  54.217          jz    1f                        # then there is a pad quadword already
  54.218          movq  %rsp,%rsi
  54.219          subq  $8,%rsp
  54.220          movq  %rsp,%rdi
  54.221 -        movq  $XREGS_kernel_sizeof/8,%rcx
  54.222 +        movq  $UREGS_kernel_sizeof/8,%rcx
  54.223          rep;  movsq                     # make room for ec/ev
  54.224 -1:      movq  XREGS_error_code(%rsp),%rax # ec/ev
  54.225 -        movq  %rax,XREGS_kernel_sizeof(%rsp)
  54.226 +1:      movq  UREGS_error_code(%rsp),%rax # ec/ev
  54.227 +        movq  %rax,UREGS_kernel_sizeof(%rsp)
  54.228          jmp   restore_all_xen           # return to fixup code
  54.229  
  54.230  /* No special register assumptions. */
  54.231  FATAL_exception_with_ints_disabled:
  54.232 -        movl  XREGS_entry_vector(%rsp),%edi
  54.233 +        movl  UREGS_entry_vector(%rsp),%edi
  54.234          movq  %rsp,%rsi
  54.235 -        call  SYMBOL_NAME(fatal_trap)
  54.236 +        call  fatal_trap
  54.237          ud2
  54.238  
  54.239  ENTRY(divide_error)
  54.240 @@ -526,61 +520,67 @@ ENTRY(nmi)
  54.241          inb   $0x61,%al
  54.242          movl  %eax,%esi # reason
  54.243          movq  %rsp,%rdi # regs
  54.244 -        call  SYMBOL_NAME(do_nmi)
  54.245 +        call  do_nmi
  54.246  	jmp   restore_all_xen
  54.247  
  54.248 +do_arch_sched_op:
  54.249 +        # Ensure we return success even if we return via schedule_tail()
  54.250 +        xorl  %eax,%eax
  54.251 +        movq  %rax,UREGS_rax+8(%rsp)
  54.252 +        jmp   do_sched_op
  54.253 +
  54.254  .data
  54.255  
  54.256  ENTRY(exception_table)
  54.257 -        .quad SYMBOL_NAME(do_divide_error)
  54.258 -        .quad SYMBOL_NAME(do_debug)
  54.259 +        .quad do_divide_error
  54.260 +        .quad do_debug
  54.261          .quad 0 # nmi
  54.262 -        .quad SYMBOL_NAME(do_int3)
  54.263 -        .quad SYMBOL_NAME(do_overflow)
  54.264 -        .quad SYMBOL_NAME(do_bounds)
  54.265 -        .quad SYMBOL_NAME(do_invalid_op)
  54.266 -        .quad SYMBOL_NAME(math_state_restore)
  54.267 -        .quad SYMBOL_NAME(do_double_fault)
  54.268 -        .quad SYMBOL_NAME(do_coprocessor_segment_overrun)
  54.269 -        .quad SYMBOL_NAME(do_invalid_TSS)
  54.270 -        .quad SYMBOL_NAME(do_segment_not_present)
  54.271 -        .quad SYMBOL_NAME(do_stack_segment)
  54.272 -        .quad SYMBOL_NAME(do_general_protection)
  54.273 -        .quad SYMBOL_NAME(do_page_fault)
  54.274 -        .quad SYMBOL_NAME(do_spurious_interrupt_bug)
  54.275 -        .quad SYMBOL_NAME(do_coprocessor_error)
  54.276 -        .quad SYMBOL_NAME(do_alignment_check)
  54.277 -        .quad SYMBOL_NAME(do_machine_check)
  54.278 -        .quad SYMBOL_NAME(do_simd_coprocessor_error)
  54.279 +        .quad do_int3
  54.280 +        .quad do_overflow
  54.281 +        .quad do_bounds
  54.282 +        .quad do_invalid_op
  54.283 +        .quad math_state_restore
  54.284 +        .quad do_double_fault
  54.285 +        .quad do_coprocessor_segment_overrun
  54.286 +        .quad do_invalid_TSS
  54.287 +        .quad do_segment_not_present
  54.288 +        .quad do_stack_segment
  54.289 +        .quad do_general_protection
  54.290 +        .quad do_page_fault
  54.291 +        .quad do_spurious_interrupt_bug
  54.292 +        .quad do_coprocessor_error
  54.293 +        .quad do_alignment_check
  54.294 +        .quad do_machine_check
  54.295 +        .quad do_simd_coprocessor_error
  54.296  
  54.297  ENTRY(hypercall_table)
  54.298 -        .quad SYMBOL_NAME(do_set_trap_table)     /*  0 */
  54.299 -        .quad SYMBOL_NAME(do_mmu_update)
  54.300 -        .quad SYMBOL_NAME(do_set_gdt)
  54.301 -        .quad SYMBOL_NAME(do_stack_switch)
  54.302 -        .quad SYMBOL_NAME(do_set_callbacks)
  54.303 -        .quad SYMBOL_NAME(do_fpu_taskswitch)     /*  5 */
  54.304 -        .quad SYMBOL_NAME(do_sched_op)
  54.305 -        .quad SYMBOL_NAME(do_dom0_op)
  54.306 -        .quad SYMBOL_NAME(do_set_debugreg)
  54.307 -        .quad SYMBOL_NAME(do_get_debugreg)
  54.308 -        .quad SYMBOL_NAME(do_update_descriptor)  /* 10 */
  54.309 -        .quad SYMBOL_NAME(do_ni_hypercall)
  54.310 -        .quad SYMBOL_NAME(do_dom_mem_op)
  54.311 -        .quad SYMBOL_NAME(do_multicall)
  54.312 -        .quad SYMBOL_NAME(do_update_va_mapping)
  54.313 -        .quad SYMBOL_NAME(do_set_timer_op)       /* 15 */
  54.314 -        .quad SYMBOL_NAME(do_event_channel_op)
  54.315 -        .quad SYMBOL_NAME(do_xen_version)
  54.316 -        .quad SYMBOL_NAME(do_console_io)
  54.317 -        .quad SYMBOL_NAME(do_physdev_op)
  54.318 -        .quad SYMBOL_NAME(do_grant_table_op)     /* 20 */
  54.319 -        .quad SYMBOL_NAME(do_vm_assist)
  54.320 -        .quad SYMBOL_NAME(do_update_va_mapping_otherdomain)
  54.321 -        .quad SYMBOL_NAME(do_switch_to_user)
  54.322 -        .quad SYMBOL_NAME(do_boot_vcpu)
  54.323 -        .quad SYMBOL_NAME(do_set_segment_base)   /* 25 */
  54.324 -        .quad SYMBOL_NAME(do_mmuext_op)
  54.325 +        .quad do_set_trap_table     /*  0 */
  54.326 +        .quad do_mmu_update
  54.327 +        .quad do_set_gdt
  54.328 +        .quad do_stack_switch
  54.329 +        .quad do_set_callbacks
  54.330 +        .quad do_fpu_taskswitch     /*  5 */
  54.331 +        .quad do_arch_sched_op
  54.332 +        .quad do_dom0_op
  54.333 +        .quad do_set_debugreg
  54.334 +        .quad do_get_debugreg
  54.335 +        .quad do_update_descriptor  /* 10 */
  54.336 +        .quad do_ni_hypercall
  54.337 +        .quad do_dom_mem_op
  54.338 +        .quad do_multicall
  54.339 +        .quad do_update_va_mapping
  54.340 +        .quad do_set_timer_op       /* 15 */
  54.341 +        .quad do_event_channel_op
  54.342 +        .quad do_xen_version
  54.343 +        .quad do_console_io
  54.344 +        .quad do_physdev_op
  54.345 +        .quad do_grant_table_op     /* 20 */
  54.346 +        .quad do_vm_assist
  54.347 +        .quad do_update_va_mapping_otherdomain
  54.348 +        .quad do_switch_to_user
  54.349 +        .quad do_boot_vcpu
  54.350 +        .quad do_set_segment_base   /* 25 */
  54.351 +        .quad do_mmuext_op
  54.352          .rept NR_hypercalls-((.-hypercall_table)/4)
  54.353 -        .quad SYMBOL_NAME(do_ni_hypercall)
  54.354 +        .quad do_ni_hypercall
  54.355          .endr
    55.1 --- a/xen/arch/x86/x86_64/mm.c	Thu Apr 28 13:54:01 2005 +0000
    55.2 +++ b/xen/arch/x86/x86_64/mm.c	Fri Apr 29 07:34:47 2005 +0000
    55.3 @@ -240,8 +240,8 @@ long do_stack_switch(unsigned long ss, u
    55.4  {
    55.5      if ( (ss & 3) != 3 )
    55.6          return -EPERM;
    55.7 -    current->arch.kernel_ss = ss;
    55.8 -    current->arch.kernel_sp = esp;
    55.9 +    current->arch.guest_context.kernel_ss = ss;
   55.10 +    current->arch.guest_context.kernel_sp = esp;
   55.11      return 0;
   55.12  }
   55.13  
   55.14 @@ -253,21 +253,24 @@ long do_set_segment_base(unsigned int wh
   55.15      switch ( which )
   55.16      {
   55.17      case SEGBASE_FS:
   55.18 -        ed->arch.user_ctxt.fs_base = base;
   55.19          if ( wrmsr_user(MSR_FS_BASE, base, base>>32) )
   55.20              ret = -EFAULT;
   55.21 +        else
   55.22 +            ed->arch.guest_context.fs_base = base;
   55.23          break;
   55.24  
   55.25      case SEGBASE_GS_USER:
   55.26 -        ed->arch.user_ctxt.gs_base_user = base;
   55.27          if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) )
   55.28              ret = -EFAULT;
   55.29 +        else
   55.30 +            ed->arch.guest_context.gs_base_user = base;
   55.31          break;
   55.32  
   55.33      case SEGBASE_GS_KERNEL:
   55.34 -        ed->arch.user_ctxt.gs_base_kernel = base;
   55.35          if ( wrmsr_user(MSR_GS_BASE, base, base>>32) )
   55.36              ret = -EFAULT;
   55.37 +        else
   55.38 +            ed->arch.guest_context.gs_base_kernel = base;
   55.39          break;
   55.40  
   55.41      case SEGBASE_GS_USER_SEL:
    56.1 --- a/xen/arch/x86/x86_64/traps.c	Thu Apr 28 13:54:01 2005 +0000
    56.2 +++ b/xen/arch/x86/x86_64/traps.c	Fri Apr 29 07:34:47 2005 +0000
    56.3 @@ -24,9 +24,10 @@ static inline int kernel_text_address(un
    56.4  void show_guest_stack(void)
    56.5  {
    56.6      int i;
    56.7 -    execution_context_t *ec = get_execution_context();
    56.8 -    unsigned long *stack = (unsigned long *)ec->rsp;
    56.9 -    printk("Guest RIP is %016lx\n   ", ec->rip);
   56.10 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   56.11 +    unsigned long *stack = (unsigned long *)regs->rsp;
   56.12 +
   56.13 +    printk("Guest RIP is %016lx\n   ", regs->rip);
   56.14  
   56.15      for ( i = 0; i < kstack_depth_to_print; i++ )
   56.16      {
   56.17 @@ -84,7 +85,7 @@ void show_stack(unsigned long *rsp)
   56.18      show_trace(rsp);
   56.19  }
   56.20  
   56.21 -void show_registers(struct xen_regs *regs)
   56.22 +void show_registers(struct cpu_user_regs *regs)
   56.23  {
   56.24      printk("CPU:    %d\nEIP:    %04lx:[<%016lx>]      \nEFLAGS: %016lx\n",
   56.25             smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags);
   56.26 @@ -130,7 +131,7 @@ void show_page_walk(unsigned long addr)
   56.27  }
   56.28  
   56.29  asmlinkage void double_fault(void);
   56.30 -asmlinkage void do_double_fault(struct xen_regs *regs)
   56.31 +asmlinkage void do_double_fault(struct cpu_user_regs *regs)
   56.32  {
   56.33      /* Disable the NMI watchdog. It's useless now. */
   56.34      watchdog_on = 0;
   56.35 @@ -254,9 +255,9 @@ long do_set_callbacks(unsigned long even
   56.36  {
   56.37      struct exec_domain *d = current;
   56.38  
   56.39 -    d->arch.event_address    = event_address;
   56.40 -    d->arch.failsafe_address = failsafe_address;
   56.41 -    d->arch.syscall_address  = syscall_address;
   56.42 +    d->arch.guest_context.event_callback_eip    = event_address;
   56.43 +    d->arch.guest_context.failsafe_callback_eip = failsafe_address;
   56.44 +    d->arch.guest_context.syscall_callback_eip  = syscall_address;
   56.45  
   56.46      return 0;
   56.47  }
    57.1 --- a/xen/arch/x86/x86_emulate.c	Thu Apr 28 13:54:01 2005 +0000
    57.2 +++ b/xen/arch/x86/x86_emulate.c	Fri Apr 29 07:34:47 2005 +0000
    57.3 @@ -377,7 +377,7 @@ do{ __asm__ __volatile__ (              
    57.4  
    57.5  void *
    57.6  decode_register(
    57.7 -    u8 modrm_reg, struct xen_regs *regs, int highbyte_regs)
    57.8 +    u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs)
    57.9  {
   57.10      void *p;
   57.11  
   57.12 @@ -417,7 +417,7 @@ decode_register(
   57.13  
   57.14  int 
   57.15  x86_emulate_memop(
   57.16 -    struct xen_regs *regs,
   57.17 +    struct cpu_user_regs *regs,
   57.18      unsigned long cr2,
   57.19      struct x86_mem_emulator *ops,
   57.20      int mode)
   57.21 @@ -430,7 +430,7 @@ x86_emulate_memop(
   57.22      struct operand src, dst;
   57.23  
   57.24      /* Shadow copy of register state. Committed on successful emulation. */
   57.25 -    struct xen_regs _regs = *regs;
   57.26 +    struct cpu_user_regs _regs = *regs;
   57.27  
   57.28      /* Legacy prefixes. */
   57.29      for ( i = 0; i < 8; i++ )
    58.1 --- a/xen/common/dom0_ops.c	Thu Apr 28 13:54:01 2005 +0000
    58.2 +++ b/xen/common/dom0_ops.c	Fri Apr 29 07:34:47 2005 +0000
    58.3 @@ -21,7 +21,7 @@
    58.4  
    58.5  extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
    58.6  extern void arch_getdomaininfo_ctxt(
    58.7 -    struct exec_domain *, full_execution_context_t *);
    58.8 +    struct exec_domain *, struct vcpu_guest_context *);
    58.9  
   58.10  static inline int is_free_domid(domid_t dom)
   58.11  {
   58.12 @@ -279,7 +279,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   58.13  
   58.14      case DOM0_GETDOMAININFO:
   58.15      { 
   58.16 -        full_execution_context_t *c;
   58.17 +        struct vcpu_guest_context *c;
   58.18          struct domain            *d;
   58.19          struct exec_domain       *ed;
   58.20  
   58.21 @@ -331,7 +331,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   58.22  
   58.23          if ( op->u.getdomaininfo.ctxt != NULL )
   58.24          {
   58.25 -            if ( (c = xmalloc(full_execution_context_t)) == NULL )
   58.26 +            if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
   58.27              {
   58.28                  ret = -ENOMEM;
   58.29                  put_domain(d);
    59.1 --- a/xen/common/domain.c	Thu Apr 28 13:54:01 2005 +0000
    59.2 +++ b/xen/common/domain.c	Fri Apr 29 07:34:47 2005 +0000
    59.3 @@ -231,7 +231,7 @@ void domain_destruct(struct domain *d)
    59.4  int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo)
    59.5  {
    59.6      int rc = 0;
    59.7 -    full_execution_context_t *c = NULL;
    59.8 +    struct vcpu_guest_context *c = NULL;
    59.9      unsigned long vcpu = setdomaininfo->exec_domain;
   59.10      struct exec_domain *ed; 
   59.11  
   59.12 @@ -242,7 +242,7 @@ int set_info_guest(struct domain *p, dom
   59.13          !test_bit(EDF_CTRLPAUSE, &ed->ed_flags))
   59.14          return -EINVAL;
   59.15  
   59.16 -    if ( (c = xmalloc(full_execution_context_t)) == NULL )
   59.17 +    if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
   59.18          return -ENOMEM;
   59.19  
   59.20      if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) )
   59.21 @@ -266,12 +266,12 @@ int set_info_guest(struct domain *p, dom
   59.22   * than domain 0. ie. the domains that are being built by the userspace dom0
   59.23   * domain builder.
   59.24   */
   59.25 -long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt) 
   59.26 +long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) 
   59.27  {
   59.28      struct domain *d = current->domain;
   59.29      struct exec_domain *ed;
   59.30      int rc = 0;
   59.31 -    full_execution_context_t *c;
   59.32 +    struct vcpu_guest_context *c;
   59.33  
   59.34      if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
   59.35          return -EINVAL;
   59.36 @@ -279,7 +279,7 @@ long do_boot_vcpu(unsigned long vcpu, fu
   59.37      if ( alloc_exec_domain_struct(d, vcpu) == NULL )
   59.38          return -ENOMEM;
   59.39  
   59.40 -    if ( (c = xmalloc(full_execution_context_t)) == NULL )
   59.41 +    if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
   59.42      {
   59.43          rc = -ENOMEM;
   59.44          goto out;
    60.1 --- a/xen/common/keyhandler.c	Thu Apr 28 13:54:01 2005 +0000
    60.2 +++ b/xen/common/keyhandler.c	Fri Apr 29 07:34:47 2005 +0000
    60.3 @@ -36,7 +36,7 @@ static void keypress_softirq(void)
    60.4          (*h)(key);
    60.5  }
    60.6  
    60.7 -void handle_keypress(unsigned char key, struct xen_regs *regs)
    60.8 +void handle_keypress(unsigned char key, struct cpu_user_regs *regs)
    60.9  {
   60.10      irq_keyhandler_t *h;
   60.11  
   60.12 @@ -83,13 +83,13 @@ static void show_handlers(unsigned char 
   60.13                     key_table[i].desc);
   60.14  }
   60.15  
   60.16 -static void dump_registers(unsigned char key, struct xen_regs *regs)
   60.17 +static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
   60.18  {
   60.19      printk("'%c' pressed -> dumping registers\n", key); 
   60.20      show_registers(regs); 
   60.21  }
   60.22  
   60.23 -static void halt_machine(unsigned char key, struct xen_regs *regs)
   60.24 +static void halt_machine(unsigned char key, struct cpu_user_regs *regs)
   60.25  {
   60.26      printk("'%c' pressed -> rebooting machine\n", key); 
   60.27      machine_restart(NULL); 
   60.28 @@ -125,9 +125,12 @@ static void do_task_queues(unsigned char
   60.29              printk("Notifying guest... %d/%d\n", d->id, ed->eid); 
   60.30              printk("port %d/%d stat %d %d %d\n",
   60.31                     VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
   60.32 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_pending[0]),
   60.33 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_mask[0]),
   60.34 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, &ed->vcpu_info->evtchn_pending_sel));
   60.35 +                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 
   60.36 +                            &d->shared_info->evtchn_pending[0]),
   60.37 +                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 
   60.38 +                            &d->shared_info->evtchn_mask[0]),
   60.39 +                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, 
   60.40 +                            &ed->vcpu_info->evtchn_pending_sel));
   60.41              send_guest_virq(ed, VIRQ_DEBUG);
   60.42          }
   60.43      }
   60.44 @@ -147,7 +150,7 @@ extern void perfc_printall(unsigned char
   60.45  extern void perfc_reset(unsigned char key);
   60.46  #endif
   60.47  
   60.48 -void do_debug_key(unsigned char key, struct xen_regs *regs)
   60.49 +void do_debug_key(unsigned char key, struct cpu_user_regs *regs)
   60.50  {
   60.51      (void)debugger_trap_fatal(0xf001, regs);
   60.52      nop(); /* Prevent the compiler doing tail call
    61.1 --- a/xen/common/schedule.c	Thu Apr 28 13:54:01 2005 +0000
    61.2 +++ b/xen/common/schedule.c	Fri Apr 29 07:34:47 2005 +0000
    61.3 @@ -228,7 +228,9 @@ long do_block(void)
    61.4  
    61.5      /* Check for events /after/ blocking: avoids wakeup waiting race. */
    61.6      if ( event_pending(ed) )
    61.7 +    {
    61.8          clear_bit(EDF_BLOCKED, &ed->ed_flags);
    61.9 +    }
   61.10      else
   61.11      {
   61.12          TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->eid);
   61.13 @@ -382,7 +384,7 @@ static void __enter_scheduler(void)
   61.14      spin_unlock_irq(&schedule_data[cpu].schedule_lock);
   61.15  
   61.16      if ( unlikely(prev == next) )
   61.17 -        return;
   61.18 +        return continue_running(prev);
   61.19      
   61.20      perfc_incrc(sched_ctx);
   61.21  
    62.1 --- a/xen/drivers/char/console.c	Thu Apr 28 13:54:01 2005 +0000
    62.2 +++ b/xen/drivers/char/console.c	Fri Apr 29 07:34:47 2005 +0000
    62.3 @@ -260,7 +260,7 @@ static void switch_serial_input(void)
    62.4      }
    62.5  }
    62.6  
    62.7 -static void __serial_rx(unsigned char c, struct xen_regs *regs)
    62.8 +static void __serial_rx(unsigned char c, struct cpu_user_regs *regs)
    62.9  {
   62.10      if ( xen_rx )
   62.11      {
   62.12 @@ -274,7 +274,7 @@ static void __serial_rx(unsigned char c,
   62.13      }
   62.14  }
   62.15  
   62.16 -static void serial_rx(unsigned char c, struct xen_regs *regs)
   62.17 +static void serial_rx(unsigned char c, struct cpu_user_regs *regs)
   62.18  {
   62.19      static int switch_code_count = 0;
   62.20  
    63.1 --- a/xen/drivers/char/serial.c	Thu Apr 28 13:54:01 2005 +0000
    63.2 +++ b/xen/drivers/char/serial.c	Fri Apr 29 07:34:47 2005 +0000
    63.3 @@ -105,7 +105,7 @@ static struct uart com[2] = {
    63.4   * PRIVATE FUNCTIONS
    63.5   */
    63.6  
    63.7 -static void uart_rx(struct uart *uart, struct xen_regs *regs)
    63.8 +static void uart_rx(struct uart *uart, struct cpu_user_regs *regs)
    63.9  {
   63.10      unsigned char c;
   63.11  
   63.12 @@ -132,7 +132,7 @@ static void uart_rx(struct uart *uart, s
   63.13  }
   63.14  
   63.15  static void serial_interrupt(
   63.16 -    int irq, void *dev_id, struct xen_regs *regs)
   63.17 +    int irq, void *dev_id, struct cpu_user_regs *regs)
   63.18  {
   63.19      uart_rx((struct uart *)dev_id, regs);
   63.20  }
    64.1 --- a/xen/include/asm-ia64/debugger.h	Thu Apr 28 13:54:01 2005 +0000
    64.2 +++ b/xen/include/asm-ia64/debugger.h	Fri Apr 29 07:34:47 2005 +0000
    64.3 @@ -26,13 +26,13 @@
    64.4  
    64.5  /* The main trap handlers use these helper macros which include early bail. */
    64.6  static inline int debugger_trap_entry(
    64.7 -    unsigned int vector, struct xen_regs *regs)
    64.8 +    unsigned int vector, struct cpu_user_regs *regs)
    64.9  {
   64.10      return 0;
   64.11  }
   64.12  
   64.13  static inline int debugger_trap_fatal(
   64.14 -    unsigned int vector, struct xen_regs *regs)
   64.15 +    unsigned int vector, struct cpu_user_regs *regs)
   64.16  {
   64.17      return 0;
   64.18  }
    65.1 --- a/xen/include/asm-ia64/domain.h	Thu Apr 28 13:54:01 2005 +0000
    65.2 +++ b/xen/include/asm-ia64/domain.h	Fri Apr 29 07:34:47 2005 +0000
    65.3 @@ -6,7 +6,7 @@
    65.4  extern void arch_do_createdomain(struct exec_domain *);
    65.5  
    65.6  extern int arch_final_setup_guestos(
    65.7 -    struct exec_domain *, full_execution_context_t *);
    65.8 +    struct exec_domain *, struct vcpu_guest_context *);
    65.9  
   65.10  extern void domain_relinquish_resources(struct domain *);
   65.11  
    66.1 --- a/xen/include/asm-ia64/regs.h	Thu Apr 28 13:54:01 2005 +0000
    66.2 +++ b/xen/include/asm-ia64/regs.h	Fri Apr 29 07:34:47 2005 +0000
    66.3 @@ -1,2 +1,2 @@
    66.4  #include <asm/ptrace.h>
    66.5 -#define xen_regs pt_regs
    66.6 +#define cpu_user_regs pt_regs
    67.1 --- a/xen/include/asm-x86/apic.h	Thu Apr 28 13:54:01 2005 +0000
    67.2 +++ b/xen/include/asm-x86/apic.h	Fri Apr 29 07:34:47 2005 +0000
    67.3 @@ -74,10 +74,10 @@ extern void sync_Arb_IDs (void);
    67.4  extern void init_bsp_APIC (void);
    67.5  extern void setup_local_APIC (void);
    67.6  extern void init_apic_mappings (void);
    67.7 -extern void smp_local_timer_interrupt (struct xen_regs * regs);
    67.8 +extern void smp_local_timer_interrupt (struct cpu_user_regs * regs);
    67.9  extern void setup_APIC_clocks (void);
   67.10  extern void setup_apic_nmi_watchdog (void);
   67.11 -extern void nmi_watchdog_tick (struct xen_regs * regs);
   67.12 +extern void nmi_watchdog_tick (struct cpu_user_regs * regs);
   67.13  extern void touch_nmi_watchdog(void);
   67.14  extern int APIC_init_uniprocessor (void);
   67.15  extern void disable_APIC_timer(void);
    68.1 --- a/xen/include/asm-x86/config.h	Thu Apr 28 13:54:01 2005 +0000
    68.2 +++ b/xen/include/asm-x86/config.h	Fri Apr 29 07:34:47 2005 +0000
    68.3 @@ -64,16 +64,13 @@
    68.4  /* Linkage for x86 */
    68.5  #define __ALIGN .align 16,0x90
    68.6  #define __ALIGN_STR ".align 16,0x90"
    68.7 -#define SYMBOL_NAME_STR(X) #X
    68.8 -#define SYMBOL_NAME(X) X
    68.9 -#define SYMBOL_NAME_LABEL(X) X##:
   68.10  #ifdef __ASSEMBLY__
   68.11  #define ALIGN __ALIGN
   68.12  #define ALIGN_STR __ALIGN_STR
   68.13 -#define ENTRY(name) \
   68.14 -  .globl SYMBOL_NAME(name); \
   68.15 -  ALIGN; \
   68.16 -  SYMBOL_NAME_LABEL(name)
   68.17 +#define ENTRY(name)                             \
   68.18 +  .globl name;                                  \
   68.19 +  ALIGN;                                        \
   68.20 +  name:
   68.21  #endif
   68.22  
   68.23  #define barrier() __asm__ __volatile__("": : :"memory")
    69.1 --- a/xen/include/asm-x86/debugger.h	Thu Apr 28 13:54:01 2005 +0000
    69.2 +++ b/xen/include/asm-x86/debugger.h	Fri Apr 29 07:34:47 2005 +0000
    69.3 @@ -38,11 +38,11 @@
    69.4  #define DEBUGGER_trap_fatal(_v, _r) \
    69.5      if ( debugger_trap_fatal(_v, _r) ) return EXCRET_fault_fixed;
    69.6  
    69.7 -int call_with_registers(int (*f)(struct xen_regs *r));
    69.8 +int call_with_registers(int (*f)(struct cpu_user_regs *r));
    69.9  
   69.10  #if defined(CRASH_DEBUG)
   69.11  
   69.12 -extern int __trap_to_cdb(struct xen_regs *r);
   69.13 +extern int __trap_to_cdb(struct cpu_user_regs *r);
   69.14  #define debugger_trap_entry(_v, _r) (0)
   69.15  #define debugger_trap_fatal(_v, _r) __trap_to_cdb(_r)
   69.16  #define debugger_trap_immediate() call_with_registers(__trap_to_cdb)
   69.17 @@ -52,7 +52,7 @@ extern int __trap_to_cdb(struct xen_regs
   69.18  #include <xen/softirq.h>
   69.19  
   69.20  static inline int debugger_trap_entry(
   69.21 -    unsigned int vector, struct xen_regs *regs)
   69.22 +    unsigned int vector, struct cpu_user_regs *regs)
   69.23  {
   69.24      struct exec_domain *ed = current;
   69.25  
   69.26 @@ -77,16 +77,16 @@ static inline int debugger_trap_entry(
   69.27  
   69.28  #elif 0
   69.29  
   69.30 -extern int kdb_trap(int, int, struct xen_regs *);
   69.31 +extern int kdb_trap(int, int, struct cpu_user_regs *);
   69.32  
   69.33  static inline int debugger_trap_entry(
   69.34 -    unsigned int vector, struct xen_regs *regs)
   69.35 +    unsigned int vector, struct cpu_user_regs *regs)
   69.36  {
   69.37      return 0;
   69.38  }
   69.39  
   69.40  static inline int debugger_trap_fatal(
   69.41 -    unsigned int vector, struct xen_regs *regs)
   69.42 +    unsigned int vector, struct cpu_user_regs *regs)
   69.43  {
   69.44      return kdb_trap(vector, 0, regs);
   69.45  }
    70.1 --- a/xen/include/asm-x86/domain.h	Thu Apr 28 13:54:01 2005 +0000
    70.2 +++ b/xen/include/asm-x86/domain.h	Fri Apr 29 07:34:47 2005 +0000
    70.3 @@ -66,38 +66,12 @@ struct arch_domain
    70.4  
    70.5  struct arch_exec_domain
    70.6  {
    70.7 -    unsigned long      kernel_sp;
    70.8 -    unsigned long      kernel_ss;
    70.9 +    struct vcpu_guest_context guest_context;
   70.10  
   70.11      unsigned long      flags; /* TF_ */
   70.12  
   70.13 -    /* Hardware debugging registers */
   70.14 -    unsigned long      debugreg[8];  /* %%db0-7 debug registers */
   70.15 -
   70.16 -    /* floating point info */
   70.17 -    struct i387_state  i387;
   70.18 -
   70.19 -    /* general user-visible register state */
   70.20 -    execution_context_t user_ctxt;
   70.21 -
   70.22      void (*schedule_tail) (struct exec_domain *);
   70.23  
   70.24 -    /*
   70.25 -     * Return vectors pushed to us by guest OS.
   70.26 -     * The stack frame for events is exactly that of an x86 hardware interrupt.
   70.27 -     * The stack frame for a failsafe callback is augmented with saved values
   70.28 -     * for segment registers %ds, %es, %fs and %gs:
   70.29 -     *  %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
   70.30 -     */
   70.31 -
   70.32 -    unsigned long event_selector;    /* entry CS  (x86/32 only) */
   70.33 -    unsigned long event_address;     /* entry EIP */
   70.34 -
   70.35 -    unsigned long failsafe_selector; /* entry CS  (x86/32 only) */
   70.36 -    unsigned long failsafe_address;  /* entry EIP */
   70.37 -
   70.38 -    unsigned long syscall_address;   /* entry EIP (x86/64 only) */
   70.39 -
   70.40      /* Bounce information for propagating an exception to guest OS. */
   70.41      struct trap_bounce trap_bounce;
   70.42  
   70.43 @@ -108,10 +82,8 @@ struct arch_exec_domain
   70.44  
   70.45      /* Trap info. */
   70.46  #ifdef ARCH_HAS_FAST_TRAP
   70.47 -    int                fast_trap_idx;
   70.48      struct desc_struct fast_trap_desc;
   70.49  #endif
   70.50 -    trap_info_t        traps[256];
   70.51  
   70.52      /* Virtual Machine Extensions */
   70.53      struct arch_vmx_struct arch_vmx;
   70.54 @@ -143,7 +115,7 @@ struct arch_exec_domain
   70.55      unsigned long guest_cr2;
   70.56  
   70.57      /* Current LDT details. */
   70.58 -    unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
   70.59 +    unsigned long shadow_ldt_mapcnt;
   70.60      /* Next entry is passed to LGDT on domain switch. */
   70.61      char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
   70.62  } __cacheline_aligned;
    71.1 --- a/xen/include/asm-x86/ldt.h	Thu Apr 28 13:54:01 2005 +0000
    71.2 +++ b/xen/include/asm-x86/ldt.h	Fri Apr 29 07:34:47 2005 +0000
    71.3 @@ -10,7 +10,7 @@ static inline void load_LDT(struct exec_
    71.4      struct desc_struct *desc;
    71.5      unsigned long ents;
    71.6  
    71.7 -    if ( (ents = ed->arch.ldt_ents) == 0 )
    71.8 +    if ( (ents = ed->arch.guest_context.ldt_ents) == 0 )
    71.9      {
   71.10          __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
   71.11      }
    72.1 --- a/xen/include/asm-x86/processor.h	Thu Apr 28 13:54:01 2005 +0000
    72.2 +++ b/xen/include/asm-x86/processor.h	Fri Apr 29 07:34:47 2005 +0000
    72.3 @@ -110,7 +110,7 @@
    72.4  #define TRAP_deferred_nmi     31
    72.5  
    72.6  /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
    72.7 -/* NB. Same as ECF_IN_SYSCALL. No bits in common with any other TRAP_* defn. */
    72.8 +/* NB. Same as VGCF_IN_SYSCALL. No bits in common with any other TRAP_ defn. */
    72.9  #define TRAP_syscall         256
   72.10  
   72.11  /*
   72.12 @@ -191,7 +191,9 @@ extern void dodgy_tsc(void);
   72.13  /*
   72.14   * Generic CPUID function
   72.15   */
   72.16 -static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
   72.17 +static inline void cpuid(
   72.18 +    int op, unsigned int *eax, unsigned int *ebx,
   72.19 +    unsigned int *ecx, unsigned int *edx)
   72.20  {
   72.21      __asm__("cpuid"
   72.22              : "=a" (*eax),
   72.23 @@ -330,10 +332,6 @@ static inline void clear_in_cr4 (unsigne
   72.24  #define IOBMP_BYTES             8192
   72.25  #define IOBMP_INVALID_OFFSET    0x8000
   72.26  
   72.27 -struct i387_state {
   72.28 -    u8 state[512]; /* big enough for FXSAVE */
   72.29 -} __attribute__ ((aligned (16)));
   72.30 -
   72.31  struct tss_struct {
   72.32      unsigned short	back_link,__blh;
   72.33  #ifdef __x86_64__
   72.34 @@ -382,16 +380,18 @@ extern struct tss_struct init_tss[NR_CPU
   72.35  #ifdef ARCH_HAS_FAST_TRAP
   72.36  
   72.37  #define SET_DEFAULT_FAST_TRAP(_p) \
   72.38 -    (_p)->fast_trap_idx = 0x20;   \
   72.39 +    (_p)->guest_context.fast_trap_idx = 0x20;   \
   72.40      (_p)->fast_trap_desc.a = 0;   \
   72.41      (_p)->fast_trap_desc.b = 0;
   72.42  
   72.43  #define CLEAR_FAST_TRAP(_p) \
   72.44 -    (memset(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
   72.45 -     0, 8))
   72.46 +    (memset(idt_tables[smp_processor_id()] + \
   72.47 +            (_p)->guest_context.fast_trap_idx, \
   72.48 +            0, 8))
   72.49  
   72.50  #define SET_FAST_TRAP(_p)   \
   72.51 -    (memcpy(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
   72.52 +    (memcpy(idt_tables[smp_processor_id()] + \
   72.53 +            (_p)->guest_context.fast_trap_idx, \
   72.54              &((_p)->fast_trap_desc), 8))
   72.55  
   72.56  long set_fast_trap(struct exec_domain *p, int idx);
   72.57 @@ -405,7 +405,7 @@ long set_fast_trap(struct exec_domain *p
   72.58  
   72.59  #endif
   72.60  
   72.61 -extern int gpf_emulate_4gb(struct xen_regs *regs);
   72.62 +extern int gpf_emulate_4gb(struct cpu_user_regs *regs);
   72.63  
   72.64  extern void write_ptbase(struct exec_domain *ed);
   72.65  
   72.66 @@ -499,9 +499,9 @@ extern inline void prefetchw(const void 
   72.67  void show_guest_stack();
   72.68  void show_trace(unsigned long *esp);
   72.69  void show_stack(unsigned long *esp);
   72.70 -void show_registers(struct xen_regs *regs);
   72.71 +void show_registers(struct cpu_user_regs *regs);
   72.72  void show_page_walk(unsigned long addr);
   72.73 -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs);
   72.74 +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
   72.75  
   72.76  #endif /* !__ASSEMBLY__ */
   72.77  
    73.1 --- a/xen/include/asm-x86/shadow.h	Thu Apr 28 13:54:01 2005 +0000
    73.2 +++ b/xen/include/asm-x86/shadow.h	Fri Apr 29 07:34:47 2005 +0000
    73.3 @@ -63,7 +63,7 @@
    73.4  
    73.5  extern void shadow_mode_init(void);
    73.6  extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
    73.7 -extern int shadow_fault(unsigned long va, struct xen_regs *regs);
    73.8 +extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs);
    73.9  extern int shadow_mode_enable(struct domain *p, unsigned int mode);
   73.10  extern void shadow_invlpg(struct exec_domain *, unsigned long);
   73.11  extern struct out_of_sync_entry *shadow_mark_mfn_out_of_sync(
    74.1 --- a/xen/include/asm-x86/vmx.h	Thu Apr 28 13:54:01 2005 +0000
    74.2 +++ b/xen/include/asm-x86/vmx.h	Fri Apr 29 07:34:47 2005 +0000
    74.3 @@ -25,7 +25,7 @@
    74.4  #include <asm/processor.h>
    74.5  #include <asm/vmx_vmcs.h>
    74.6  
    74.7 -extern void vmx_asm_vmexit_handler(struct xen_regs);
    74.8 +extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
    74.9  extern void vmx_asm_do_resume(void);
   74.10  extern void vmx_asm_do_launch(void);
   74.11  extern void vmx_intr_assist(struct exec_domain *d);
    75.1 --- a/xen/include/asm-x86/vmx_platform.h	Thu Apr 28 13:54:01 2005 +0000
    75.2 +++ b/xen/include/asm-x86/vmx_platform.h	Fri Apr 29 07:34:47 2005 +0000
    75.3 @@ -73,7 +73,7 @@ struct instruction {
    75.4  struct mi_per_cpu_info
    75.5  {
    75.6      unsigned long          mmio_target;
    75.7 -    struct xen_regs        *inst_decoder_regs;
    75.8 +    struct cpu_user_regs        *inst_decoder_regs;
    75.9  };
   75.10  
   75.11  struct virutal_platform_def {
   75.12 @@ -85,7 +85,7 @@ struct virutal_platform_def {
   75.13  };
   75.14  
   75.15  extern void handle_mmio(unsigned long, unsigned long);
   75.16 -extern int vmx_setup_platform(struct exec_domain *, execution_context_t *);
   75.17 +extern int vmx_setup_platform(struct exec_domain *, struct cpu_user_regs *);
   75.18  
   75.19  // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame.
   75.20  #define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT)))
    76.1 --- a/xen/include/asm-x86/vmx_vmcs.h	Thu Apr 28 13:54:01 2005 +0000
    76.2 +++ b/xen/include/asm-x86/vmx_vmcs.h	Fri Apr 29 07:34:47 2005 +0000
    76.3 @@ -65,8 +65,8 @@ void free_vmcs(struct vmcs_struct *);
    76.4  int  load_vmcs(struct arch_vmx_struct *, u64);
    76.5  int  store_vmcs(struct arch_vmx_struct *, u64);
    76.6  void dump_vmcs(void);
    76.7 -int  construct_vmcs(struct arch_vmx_struct *, execution_context_t *, 
    76.8 -                    full_execution_context_t *, int);
    76.9 +int  construct_vmcs(struct arch_vmx_struct *, struct cpu_user_regs *, 
   76.10 +                    struct vcpu_guest_context *, int);
   76.11  
   76.12  #define VMCS_USE_HOST_ENV       1
   76.13  #define VMCS_USE_SEPARATE_ENV   0
    77.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h	Thu Apr 28 13:54:01 2005 +0000
    77.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h	Fri Apr 29 07:34:47 2005 +0000
    77.3 @@ -4,81 +4,81 @@
    77.4  /* Maybe auto-generate the following two cases (quoted vs. unquoted). */
    77.5  #ifndef __ASSEMBLY__
    77.6  
    77.7 -#define __SAVE_ALL_PRE \
    77.8 -        "cld;" \
    77.9 -        "pushl %eax;" \
   77.10 -        "pushl %ebp;" \
   77.11 -        "pushl %edi;" \
   77.12 -        "pushl %esi;" \
   77.13 -        "pushl %edx;" \
   77.14 -        "pushl %ecx;" \
   77.15 -        "pushl %ebx;" \
   77.16 -        "testl $"STR(X86_EFLAGS_VM)","STR(XREGS_eflags)"(%esp);" \
   77.17 -        "jz 2f;" \
   77.18 -        "call setup_vm86_frame;" \
   77.19 -        "jmp 3f;" \
   77.20 -        "2:testb $3,"STR(XREGS_cs)"(%esp);" \
   77.21 -        "jz 1f;" \
   77.22 -        "movl %ds,"STR(XREGS_ds)"(%esp);" \
   77.23 -        "movl %es,"STR(XREGS_es)"(%esp);" \
   77.24 -        "movl %fs,"STR(XREGS_fs)"(%esp);" \
   77.25 -        "movl %gs,"STR(XREGS_gs)"(%esp);" \
   77.26 +#define __SAVE_ALL_PRE                                                  \
   77.27 +        "cld;"                                                          \
   77.28 +        "pushl %eax;"                                                   \
   77.29 +        "pushl %ebp;"                                                   \
   77.30 +        "pushl %edi;"                                                   \
   77.31 +        "pushl %esi;"                                                   \
   77.32 +        "pushl %edx;"                                                   \
   77.33 +        "pushl %ecx;"                                                   \
   77.34 +        "pushl %ebx;"                                                   \
   77.35 +        "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);"        \
   77.36 +        "jz 2f;"                                                        \
   77.37 +        "call setup_vm86_frame;"                                        \
   77.38 +        "jmp 3f;"                                                       \
   77.39 +        "2:testb $3,"STR(UREGS_cs)"(%esp);"                             \
   77.40 +        "jz 1f;"                                                        \
   77.41 +        "movl %ds,"STR(UREGS_ds)"(%esp);"                               \
   77.42 +        "movl %es,"STR(UREGS_es)"(%esp);"                               \
   77.43 +        "movl %fs,"STR(UREGS_fs)"(%esp);"                               \
   77.44 +        "movl %gs,"STR(UREGS_gs)"(%esp);"                               \
   77.45          "3:"
   77.46  
   77.47 -#define SAVE_ALL_NOSEGREGS(_reg) \
   77.48 -        __SAVE_ALL_PRE \
   77.49 +#define SAVE_ALL_NOSEGREGS(_reg)                \
   77.50 +        __SAVE_ALL_PRE                          \
   77.51          "1:"
   77.52  
   77.53 -#define SET_XEN_SEGMENTS(_reg) \
   77.54 -        "movl $("STR(__HYPERVISOR_DS)"),%e"STR(_reg)"x;" \
   77.55 -        "movl %e"STR(_reg)"x,%ds;" \
   77.56 +#define SET_XEN_SEGMENTS(_reg)                                  \
   77.57 +        "movl $("STR(__HYPERVISOR_DS)"),%e"STR(_reg)"x;"        \
   77.58 +        "movl %e"STR(_reg)"x,%ds;"                              \
   77.59          "movl %e"STR(_reg)"x,%es;"
   77.60  
   77.61 -#define SAVE_ALL(_reg) \
   77.62 -        __SAVE_ALL_PRE \
   77.63 -        SET_XEN_SEGMENTS(_reg) \
   77.64 +#define SAVE_ALL(_reg)                          \
   77.65 +        __SAVE_ALL_PRE                          \
   77.66 +        SET_XEN_SEGMENTS(_reg)                  \
   77.67          "1:"
   77.68  
   77.69  #else
   77.70  
   77.71 -#define __SAVE_ALL_PRE \
   77.72 -        cld; \
   77.73 -        pushl %eax; \
   77.74 -        pushl %ebp; \
   77.75 -        pushl %edi; \
   77.76 -        pushl %esi; \
   77.77 -        pushl %edx; \
   77.78 -        pushl %ecx; \
   77.79 -        pushl %ebx; \
   77.80 -        testl $X86_EFLAGS_VM,XREGS_eflags(%esp); \
   77.81 -        jz 2f; \
   77.82 -        call setup_vm86_frame; \
   77.83 -        jmp 3f; \
   77.84 -        2:testb $3,XREGS_cs(%esp); \
   77.85 -        jz 1f; \
   77.86 -        movl %ds,XREGS_ds(%esp); \
   77.87 -        movl %es,XREGS_es(%esp); \
   77.88 -        movl %fs,XREGS_fs(%esp); \
   77.89 -        movl %gs,XREGS_gs(%esp); \
   77.90 +#define __SAVE_ALL_PRE                                  \
   77.91 +        cld;                                            \
   77.92 +        pushl %eax;                                     \
   77.93 +        pushl %ebp;                                     \
   77.94 +        pushl %edi;                                     \
   77.95 +        pushl %esi;                                     \
   77.96 +        pushl %edx;                                     \
   77.97 +        pushl %ecx;                                     \
   77.98 +        pushl %ebx;                                     \
   77.99 +        testl $X86_EFLAGS_VM,UREGS_eflags(%esp);        \
  77.100 +        jz 2f;                                          \
  77.101 +        call setup_vm86_frame;                          \
  77.102 +        jmp 3f;                                         \
  77.103 +        2:testb $3,UREGS_cs(%esp);                      \
  77.104 +        jz 1f;                                          \
  77.105 +        movl %ds,UREGS_ds(%esp);                        \
  77.106 +        movl %es,UREGS_es(%esp);                        \
  77.107 +        movl %fs,UREGS_fs(%esp);                        \
  77.108 +        movl %gs,UREGS_gs(%esp);                        \
  77.109          3:
  77.110  
  77.111 -#define SAVE_ALL_NOSEGREGS(_reg) \
  77.112 -        __SAVE_ALL_PRE \
  77.113 +#define SAVE_ALL_NOSEGREGS(_reg)                \
  77.114 +        __SAVE_ALL_PRE                          \
  77.115          1:
  77.116  
  77.117 -#define SET_XEN_SEGMENTS(_reg) \
  77.118 -        movl $(__HYPERVISOR_DS),%e ## _reg ## x; \
  77.119 -        movl %e ## _reg ## x,%ds; \
  77.120 +#define SET_XEN_SEGMENTS(_reg)                          \
  77.121 +        movl $(__HYPERVISOR_DS),%e ## _reg ## x;        \
  77.122 +        movl %e ## _reg ## x,%ds;                       \
  77.123          movl %e ## _reg ## x,%es;
  77.124  
  77.125 -#define SAVE_ALL(_reg) \
  77.126 -        __SAVE_ALL_PRE \
  77.127 -        SET_XEN_SEGMENTS(_reg) \
  77.128 +#define SAVE_ALL(_reg)                          \
  77.129 +        __SAVE_ALL_PRE                          \
  77.130 +        SET_XEN_SEGMENTS(_reg)                  \
  77.131          1:
  77.132  
  77.133  #ifdef PERF_COUNTERS
  77.134 -#define PERFC_INCR(_name,_idx) \
  77.135 -    lock incl SYMBOL_NAME(perfcounters)+_name(,_idx,4)
  77.136 +#define PERFC_INCR(_name,_idx)                          \
  77.137 +    lock incl perfcounters+_name(,_idx,4)
  77.138  #else
  77.139  #define PERFC_INCR(_name,_idx)
  77.140  #endif
  77.141 @@ -86,50 +86,50 @@
  77.142  #endif
  77.143  
  77.144  #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
  77.145 -#define XBUILD_SMP_INTERRUPT(x,v)\
  77.146 -asmlinkage void x(void); \
  77.147 -__asm__( \
  77.148 -    "\n"__ALIGN_STR"\n" \
  77.149 -    SYMBOL_NAME_STR(x) ":\n\t" \
  77.150 -    "pushl $"#v"<<16\n\t" \
  77.151 -    SAVE_ALL(a) \
  77.152 -    "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
  77.153 +#define XBUILD_SMP_INTERRUPT(x,v)               \
  77.154 +asmlinkage void x(void);                        \
  77.155 +__asm__(                                        \
  77.156 +    "\n"__ALIGN_STR"\n"                         \
  77.157 +    STR(x) ":\n\t"                              \
  77.158 +    "pushl $"#v"<<16\n\t"                       \
  77.159 +    SAVE_ALL(a)                                 \
  77.160 +    "call "STR(smp_##x)"\n\t"                   \
  77.161      "jmp ret_from_intr\n");
  77.162  
  77.163  #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
  77.164 -#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
  77.165 -asmlinkage void x(struct xen_regs * regs); \
  77.166 -__asm__( \
  77.167 -"\n"__ALIGN_STR"\n" \
  77.168 -SYMBOL_NAME_STR(x) ":\n\t" \
  77.169 -    "pushl $"#v"<<16\n\t" \
  77.170 -    SAVE_ALL(a) \
  77.171 -    "movl %esp,%eax\n\t" \
  77.172 -    "pushl %eax\n\t" \
  77.173 -    "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
  77.174 -    "addl $4,%esp\n\t" \
  77.175 +#define XBUILD_SMP_TIMER_INTERRUPT(x,v)         \
  77.176 +asmlinkage void x(struct cpu_user_regs * regs); \
  77.177 +__asm__(                                        \
  77.178 +"\n"__ALIGN_STR"\n"                             \
  77.179 +STR(x) ":\n\t"                                  \
  77.180 +    "pushl $"#v"<<16\n\t"                       \
  77.181 +    SAVE_ALL(a)                                 \
  77.182 +    "movl %esp,%eax\n\t"                        \
  77.183 +    "pushl %eax\n\t"                            \
  77.184 +    "call "STR(smp_##x)"\n\t"                   \
  77.185 +    "addl $4,%esp\n\t"                          \
  77.186      "jmp ret_from_intr\n");
  77.187  
  77.188 -#define BUILD_COMMON_IRQ() \
  77.189 -__asm__( \
  77.190 -    "\n" __ALIGN_STR"\n" \
  77.191 -    "common_interrupt:\n\t" \
  77.192 -    SAVE_ALL(a) \
  77.193 -    "movl %esp,%eax\n\t" \
  77.194 -    "pushl %eax\n\t" \
  77.195 -    "call " SYMBOL_NAME_STR(do_IRQ) "\n\t" \
  77.196 -    "addl $4,%esp\n\t" \
  77.197 +#define BUILD_COMMON_IRQ()                      \
  77.198 +__asm__(                                        \
  77.199 +    "\n" __ALIGN_STR"\n"                        \
  77.200 +    "common_interrupt:\n\t"                     \
  77.201 +    SAVE_ALL(a)                                 \
  77.202 +    "movl %esp,%eax\n\t"                        \
  77.203 +    "pushl %eax\n\t"                            \
  77.204 +    "call " STR(do_IRQ) "\n\t"                  \
  77.205 +    "addl $4,%esp\n\t"                          \
  77.206      "jmp ret_from_intr\n");
  77.207  
  77.208  #define IRQ_NAME2(nr) nr##_interrupt(void)
  77.209  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
  77.210  
  77.211 -#define BUILD_IRQ(nr) \
  77.212 -asmlinkage void IRQ_NAME(nr); \
  77.213 -__asm__( \
  77.214 -"\n"__ALIGN_STR"\n" \
  77.215 -SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
  77.216 -    "pushl $"#nr"<<16\n\t" \
  77.217 +#define BUILD_IRQ(nr)                           \
  77.218 +asmlinkage void IRQ_NAME(nr);                   \
  77.219 +__asm__(                                        \
  77.220 +"\n"__ALIGN_STR"\n"                             \
  77.221 +STR(IRQ) #nr "_interrupt:\n\t"                  \
  77.222 +    "pushl $"#nr"<<16\n\t"                      \
  77.223      "jmp common_interrupt");
  77.224  
  77.225  #endif /* __X86_32_ASM_DEFNS_H__ */
    78.1 --- a/xen/include/asm-x86/x86_32/current.h	Thu Apr 28 13:54:01 2005 +0000
    78.2 +++ b/xen/include/asm-x86/x86_32/current.h	Fri Apr 29 07:34:47 2005 +0000
    78.3 @@ -5,7 +5,7 @@
    78.4  struct domain;
    78.5  
    78.6  #define STACK_RESERVED \
    78.7 -    (sizeof(execution_context_t) + sizeof(struct domain *))
    78.8 +    (sizeof(struct cpu_user_regs) + sizeof(struct domain *))
    78.9  
   78.10  static inline struct exec_domain *get_current(void)
   78.11  {
   78.12 @@ -23,13 +23,13 @@ static inline void set_current(struct ex
   78.13                : : "r" (STACK_SIZE-4), "r" (ed) );    
   78.14  }
   78.15  
   78.16 -static inline execution_context_t *get_execution_context(void)
   78.17 +static inline struct cpu_user_regs *get_cpu_user_regs(void)
   78.18  {
   78.19 -    execution_context_t *execution_context;
   78.20 +    struct cpu_user_regs *cpu_user_regs;
   78.21      __asm__ ( "andl %%esp,%0; addl %2,%0"
   78.22 -              : "=r" (execution_context) 
   78.23 +              : "=r" (cpu_user_regs) 
   78.24                : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
   78.25 -    return execution_context;
   78.26 +    return cpu_user_regs;
   78.27  }
   78.28  
   78.29  /*
   78.30 @@ -49,7 +49,7 @@ static inline unsigned long get_stack_bo
   78.31  #define reset_stack_and_jump(__fn)                                \
   78.32      __asm__ __volatile__ (                                        \
   78.33          "movl %0,%%esp; jmp "STR(__fn)                            \
   78.34 -        : : "r" (get_execution_context()) )
   78.35 +        : : "r" (get_cpu_user_regs()) )
   78.36  
   78.37  #define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
   78.38  
    79.1 --- a/xen/include/asm-x86/x86_32/regs.h	Thu Apr 28 13:54:01 2005 +0000
    79.2 +++ b/xen/include/asm-x86/x86_32/regs.h	Fri Apr 29 07:34:47 2005 +0000
    79.3 @@ -16,6 +16,6 @@
    79.4      ((_dpl) >= (VM86_MODE(_r) ? 3 : ((_r)->cs & 3)))
    79.5  
    79.6  /* Number of bytes of on-stack execution state to be context-switched. */
    79.7 -#define CTXT_SWITCH_STACK_BYTES (sizeof(execution_context_t))
    79.8 +#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
    79.9  
   79.10  #endif
    80.1 --- a/xen/include/asm-x86/x86_64/asm_defns.h	Thu Apr 28 13:54:01 2005 +0000
    80.2 +++ b/xen/include/asm-x86/x86_64/asm_defns.h	Fri Apr 29 07:34:47 2005 +0000
    80.3 @@ -4,87 +4,87 @@
    80.4  /* Maybe auto-generate the following two cases (quoted vs. unquoted). */
    80.5  #ifndef __ASSEMBLY__
    80.6  
    80.7 -#define SAVE_ALL \
    80.8 -        "cld;" \
    80.9 -        "pushq %rdi;" \
   80.10 -        "pushq %rsi;" \
   80.11 -        "pushq %rdx;" \
   80.12 -        "pushq %rcx;" \
   80.13 -        "pushq %rax;" \
   80.14 -        "pushq %r8;" \
   80.15 -        "pushq %r9;" \
   80.16 -        "pushq %r10;" \
   80.17 -        "pushq %r11;" \
   80.18 -        "pushq %rbx;" \
   80.19 -        "pushq %rbp;" \
   80.20 -        "pushq %r12;" \
   80.21 -        "pushq %r13;" \
   80.22 -        "pushq %r14;" \
   80.23 +#define SAVE_ALL                                \
   80.24 +        "cld;"                                  \
   80.25 +        "pushq %rdi;"                           \
   80.26 +        "pushq %rsi;"                           \
   80.27 +        "pushq %rdx;"                           \
   80.28 +        "pushq %rcx;"                           \
   80.29 +        "pushq %rax;"                           \
   80.30 +        "pushq %r8;"                            \
   80.31 +        "pushq %r9;"                            \
   80.32 +        "pushq %r10;"                           \
   80.33 +        "pushq %r11;"                           \
   80.34 +        "pushq %rbx;"                           \
   80.35 +        "pushq %rbp;"                           \
   80.36 +        "pushq %r12;"                           \
   80.37 +        "pushq %r13;"                           \
   80.38 +        "pushq %r14;"                           \
   80.39          "pushq %r15;"
   80.40  
   80.41 -#define RESTORE_ALL \
   80.42 -        "popq  %r15;" \
   80.43 -        "popq  %r14;" \
   80.44 -        "popq  %r13;" \
   80.45 -        "popq  %r12;" \
   80.46 -        "popq  %rbp;" \
   80.47 -        "popq  %rbx;" \
   80.48 -        "popq  %r11;" \
   80.49 -        "popq  %r10;" \
   80.50 -        "popq  %r9;" \
   80.51 -        "popq  %r8;" \
   80.52 -        "popq  %rax;" \
   80.53 -        "popq  %rcx;" \
   80.54 -        "popq  %rdx;" \
   80.55 -        "popq  %rsi;" \
   80.56 +#define RESTORE_ALL                             \
   80.57 +        "popq  %r15;"                           \
   80.58 +        "popq  %r14;"                           \
   80.59 +        "popq  %r13;"                           \
   80.60 +        "popq  %r12;"                           \
   80.61 +        "popq  %rbp;"                           \
   80.62 +        "popq  %rbx;"                           \
   80.63 +        "popq  %r11;"                           \
   80.64 +        "popq  %r10;"                           \
   80.65 +        "popq  %r9;"                            \
   80.66 +        "popq  %r8;"                            \
   80.67 +        "popq  %rax;"                           \
   80.68 +        "popq  %rcx;"                           \
   80.69 +        "popq  %rdx;"                           \
   80.70 +        "popq  %rsi;"                           \
   80.71          "popq  %rdi;"
   80.72  
   80.73  /* Work around AMD erratum #88 */
   80.74 -#define safe_swapgs \
   80.75 +#define safe_swapgs                             \
   80.76          "mfence; swapgs;"
   80.77  
   80.78  #else
   80.79  
   80.80 -#define SAVE_ALL \
   80.81 -        cld; \
   80.82 -        pushq %rdi; \
   80.83 -        pushq %rsi; \
   80.84 -        pushq %rdx; \
   80.85 -        pushq %rcx; \
   80.86 -        pushq %rax; \
   80.87 -        pushq %r8; \
   80.88 -        pushq %r9; \
   80.89 -        pushq %r10; \
   80.90 -        pushq %r11; \
   80.91 -        pushq %rbx; \
   80.92 -        pushq %rbp; \
   80.93 -        pushq %r12; \
   80.94 -        pushq %r13; \
   80.95 -        pushq %r14; \
   80.96 +#define SAVE_ALL                                \
   80.97 +        cld;                                    \
   80.98 +        pushq %rdi;                             \
   80.99 +        pushq %rsi;                             \
  80.100 +        pushq %rdx;                             \
  80.101 +        pushq %rcx;                             \
  80.102 +        pushq %rax;                             \
  80.103 +        pushq %r8;                              \
  80.104 +        pushq %r9;                              \
  80.105 +        pushq %r10;                             \
  80.106 +        pushq %r11;                             \
  80.107 +        pushq %rbx;                             \
  80.108 +        pushq %rbp;                             \
  80.109 +        pushq %r12;                             \
  80.110 +        pushq %r13;                             \
  80.111 +        pushq %r14;                             \
  80.112          pushq %r15;
  80.113  
  80.114 -#define RESTORE_ALL \
  80.115 -        popq  %r15; \
  80.116 -        popq  %r14; \
  80.117 -        popq  %r13; \
  80.118 -        popq  %r12; \
  80.119 -        popq  %rbp; \
  80.120 -        popq  %rbx; \
  80.121 -        popq  %r11; \
  80.122 -        popq  %r10; \
  80.123 -        popq  %r9; \
  80.124 -        popq  %r8; \
  80.125 -        popq  %rax; \
  80.126 -        popq  %rcx; \
  80.127 -        popq  %rdx; \
  80.128 -        popq  %rsi; \
  80.129 +#define RESTORE_ALL                             \
  80.130 +        popq  %r15;                             \
  80.131 +        popq  %r14;                             \
  80.132 +        popq  %r13;                             \
  80.133 +        popq  %r12;                             \
  80.134 +        popq  %rbp;                             \
  80.135 +        popq  %rbx;                             \
  80.136 +        popq  %r11;                             \
  80.137 +        popq  %r10;                             \
  80.138 +        popq  %r9;                              \
  80.139 +        popq  %r8;                              \
  80.140 +        popq  %rax;                             \
  80.141 +        popq  %rcx;                             \
  80.142 +        popq  %rdx;                             \
  80.143 +        popq  %rsi;                             \
  80.144          popq  %rdi;
  80.145  
  80.146  #ifdef PERF_COUNTERS
  80.147 -#define PERFC_INCR(_name,_idx) \
  80.148 -    pushq %rdx; \
  80.149 -    leaq SYMBOL_NAME(perfcounters)+_name(%rip),%rdx; \
  80.150 -    lock incl (%rdx,_idx,4); \
  80.151 +#define PERFC_INCR(_name,_idx)                  \
  80.152 +    pushq %rdx;                                 \
  80.153 +    leaq perfcounters+_name(%rip),%rdx;         \
  80.154 +    lock incl (%rdx,_idx,4);                    \
  80.155      popq %rdx;
  80.156  #else
  80.157  #define PERFC_INCR(_name,_idx)
  80.158 @@ -93,49 +93,49 @@
  80.159  #endif
  80.160  
  80.161  #define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
  80.162 -#define XBUILD_SMP_INTERRUPT(x,v)\
  80.163 -asmlinkage void x(void); \
  80.164 -__asm__( \
  80.165 -    "\n"__ALIGN_STR"\n" \
  80.166 -    SYMBOL_NAME_STR(x) ":\n\t" \
  80.167 -    "pushq $0\n\t" \
  80.168 -    "movl $"#v",4(%rsp)\n\t" \
  80.169 -    SAVE_ALL \
  80.170 -    "callq "SYMBOL_NAME_STR(smp_##x)"\n\t" \
  80.171 +#define XBUILD_SMP_INTERRUPT(x,v)               \
  80.172 +asmlinkage void x(void);                        \
  80.173 +__asm__(                                        \
  80.174 +    "\n"__ALIGN_STR"\n"                         \
  80.175 +    STR(x) ":\n\t"                              \
  80.176 +    "pushq $0\n\t"                              \
  80.177 +    "movl $"#v",4(%rsp)\n\t"                    \
  80.178 +    SAVE_ALL                                    \
  80.179 +    "callq "STR(smp_##x)"\n\t"                  \
  80.180      "jmp ret_from_intr\n");
  80.181  
  80.182  #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
  80.183 -#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
  80.184 -asmlinkage void x(struct xen_regs * regs); \
  80.185 -__asm__( \
  80.186 -"\n"__ALIGN_STR"\n" \
  80.187 -SYMBOL_NAME_STR(x) ":\n\t" \
  80.188 -    "pushq $0\n\t" \
  80.189 -    "movl $"#v",4(%rsp)\n\t" \
  80.190 -    SAVE_ALL \
  80.191 -    "movq %rsp,%rdi\n\t" \
  80.192 -    "callq "SYMBOL_NAME_STR(smp_##x)"\n\t" \
  80.193 +#define XBUILD_SMP_TIMER_INTERRUPT(x,v)         \
  80.194 +asmlinkage void x(struct cpu_user_regs * regs); \
  80.195 +__asm__(                                        \
  80.196 +"\n"__ALIGN_STR"\n"                             \
  80.197 +STR(x) ":\n\t"                                  \
  80.198 +    "pushq $0\n\t"                              \
  80.199 +    "movl $"#v",4(%rsp)\n\t"                    \
  80.200 +    SAVE_ALL                                    \
  80.201 +    "movq %rsp,%rdi\n\t"                        \
  80.202 +    "callq "STR(smp_##x)"\n\t"                  \
  80.203      "jmp ret_from_intr\n");
  80.204  
  80.205 -#define BUILD_COMMON_IRQ() \
  80.206 -__asm__( \
  80.207 -    "\n" __ALIGN_STR"\n" \
  80.208 -    "common_interrupt:\n\t" \
  80.209 -    SAVE_ALL \
  80.210 -    "movq %rsp,%rdi\n\t" \
  80.211 -    "callq " SYMBOL_NAME_STR(do_IRQ) "\n\t" \
  80.212 +#define BUILD_COMMON_IRQ()                      \
  80.213 +__asm__(                                        \
  80.214 +    "\n" __ALIGN_STR"\n"                        \
  80.215 +    "common_interrupt:\n\t"                     \
  80.216 +    SAVE_ALL                                    \
  80.217 +    "movq %rsp,%rdi\n\t"                        \
  80.218 +    "callq " STR(do_IRQ) "\n\t"                 \
  80.219      "jmp ret_from_intr\n");
  80.220  
  80.221  #define IRQ_NAME2(nr) nr##_interrupt(void)
  80.222  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
  80.223  
  80.224 -#define BUILD_IRQ(nr) \
  80.225 -asmlinkage void IRQ_NAME(nr); \
  80.226 -__asm__( \
  80.227 -"\n"__ALIGN_STR"\n" \
  80.228 -SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
  80.229 -    "pushq $0\n\t" \
  80.230 -    "movl $"#nr",4(%rsp)\n\t" \
  80.231 +#define BUILD_IRQ(nr)                           \
  80.232 +asmlinkage void IRQ_NAME(nr);                   \
  80.233 +__asm__(                                        \
  80.234 +"\n"__ALIGN_STR"\n"                             \
  80.235 +STR(IRQ) #nr "_interrupt:\n\t"                  \
  80.236 +    "pushq $0\n\t"                              \
  80.237 +    "movl $"#nr",4(%rsp)\n\t"                   \
  80.238      "jmp common_interrupt");
  80.239  
  80.240  #endif /* __X86_64_ASM_DEFNS_H__ */
    81.1 --- a/xen/include/asm-x86/x86_64/current.h	Thu Apr 28 13:54:01 2005 +0000
    81.2 +++ b/xen/include/asm-x86/x86_64/current.h	Fri Apr 29 07:34:47 2005 +0000
    81.3 @@ -5,7 +5,7 @@
    81.4  struct domain;
    81.5  
    81.6  #define STACK_RESERVED \
    81.7 -    (sizeof(execution_context_t) + sizeof(struct domain *))
    81.8 +    (sizeof(struct cpu_user_regs) + sizeof(struct domain *))
    81.9  
   81.10  static inline struct exec_domain *get_current(void)
   81.11  {
   81.12 @@ -23,33 +23,33 @@ static inline void set_current(struct ex
   81.13                : : "r" (STACK_SIZE-8), "r" (ed) );    
   81.14  }
   81.15  
   81.16 -static inline execution_context_t *get_execution_context(void)
   81.17 +static inline struct cpu_user_regs *get_cpu_user_regs(void)
   81.18  {
   81.19 -    execution_context_t *execution_context;
   81.20 +    struct cpu_user_regs *cpu_user_regs;
   81.21      __asm__( "andq %%rsp,%0; addq %2,%0"
   81.22 -	    : "=r" (execution_context)
   81.23 +	    : "=r" (cpu_user_regs)
   81.24  	    : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) ); 
   81.25 -    return execution_context;
   81.26 +    return cpu_user_regs;
   81.27  }
   81.28  
   81.29  /*
   81.30   * Get the bottom-of-stack, as stored in the per-CPU TSS. This is actually
   81.31 - * 64 bytes before the real bottom of the stack to allow space for:
   81.32 - *  domain pointer, DS, ES, FS, GS, FS_BASE, GS_BASE_OS, GS_BASE_APP
   81.33 + * 40 bytes before the real bottom of the stack to allow space for:
   81.34 + *  domain pointer, DS, ES, FS, GS
   81.35   */
   81.36  static inline unsigned long get_stack_bottom(void)
   81.37  {
   81.38      unsigned long p;
   81.39      __asm__( "andq %%rsp,%0; addq %2,%0"
   81.40  	    : "=r" (p)
   81.41 -	    : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-64) );
   81.42 +	    : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-40) );
   81.43      return p;
   81.44  }
   81.45  
   81.46  #define reset_stack_and_jump(__fn)                                \
   81.47      __asm__ __volatile__ (                                        \
   81.48          "movq %0,%%rsp; jmp "STR(__fn)                            \
   81.49 -        : : "r" (get_execution_context()) )
   81.50 +        : : "r" (get_cpu_user_regs()) )
   81.51  
   81.52  #define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
   81.53  
    82.1 --- a/xen/include/asm-x86/x86_64/regs.h	Thu Apr 28 13:54:01 2005 +0000
    82.2 +++ b/xen/include/asm-x86/x86_64/regs.h	Fri Apr 29 07:34:47 2005 +0000
    82.3 @@ -17,6 +17,6 @@
    82.4  
    82.5  /* Number of bytes of on-stack execution state to be context-switched. */
    82.6  /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
    82.7 -#define CTXT_SWITCH_STACK_BYTES (offsetof(execution_context_t, es))
    82.8 +#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es))
    82.9  
   82.10  #endif
    83.1 --- a/xen/include/asm-x86/x86_emulate.h	Thu Apr 28 13:54:01 2005 +0000
    83.2 +++ b/xen/include/asm-x86/x86_emulate.h	Fri Apr 29 07:34:47 2005 +0000
    83.3 @@ -139,7 +139,7 @@ x86_emulate_write_std(
    83.4      unsigned long val,
    83.5      unsigned int bytes);
    83.6  
    83.7 -struct xen_regs;
    83.8 +struct cpu_user_regs;
    83.9  
   83.10  /*
   83.11   * x86_emulate_memop: Emulate an instruction that faulted attempting to
   83.12 @@ -152,7 +152,7 @@ struct xen_regs;
   83.13   */
   83.14  extern int
   83.15  x86_emulate_memop(
   83.16 -    struct xen_regs *regs,
   83.17 +    struct cpu_user_regs *regs,
   83.18      unsigned long cr2,
   83.19      struct x86_mem_emulator *ops,
   83.20      int mode);
   83.21 @@ -164,6 +164,6 @@ x86_emulate_memop(
   83.22   */
   83.23  extern void *
   83.24  decode_register(
   83.25 -    u8 modrm_reg, struct xen_regs *regs, int highbyte_regs);
   83.26 +    u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs);
   83.27  
   83.28  #endif /* __X86_EMULATE_H__ */
    84.1 --- a/xen/include/public/arch-ia64.h	Thu Apr 28 13:54:01 2005 +0000
    84.2 +++ b/xen/include/public/arch-ia64.h	Fri Apr 29 07:34:47 2005 +0000
    84.3 @@ -22,7 +22,7 @@ typedef unsigned long cpureg_t;   /* Ful
    84.4  
    84.5  typedef struct
    84.6  {
    84.7 -} PACKED execution_context_t;
    84.8 +} PACKED struct cpu_user_regs;
    84.9  
   84.10  /*
   84.11   * NB. This may become a 64-bit count with no shift. If this happens then the 
   84.12 @@ -91,9 +91,9 @@ typedef struct {
   84.13   * The following is all CPU context. Note that the i387_ctxt block is filled 
   84.14   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
   84.15   */
   84.16 -typedef struct {
   84.17 +typedef struct vcpu_guest_context {
   84.18      //unsigned long flags;
   84.19 -} PACKED full_execution_context_t;
   84.20 +} PACKED vcpu_guest_context_t;
   84.21  
   84.22  #endif /* !__ASSEMBLY__ */
   84.23  
    85.1 --- a/xen/include/public/arch-x86_32.h	Thu Apr 28 13:54:01 2005 +0000
    85.2 +++ b/xen/include/public/arch-x86_32.h	Fri Apr 29 07:34:47 2005 +0000
    85.3 @@ -97,8 +97,7 @@ typedef struct {
    85.4      memory_t address; /* 4: code address                                  */
    85.5  } PACKED trap_info_t; /* 8 bytes */
    85.6  
    85.7 -typedef struct xen_regs
    85.8 -{
    85.9 +typedef struct cpu_user_regs {
   85.10      u32 ebx;
   85.11      u32 ecx;
   85.12      u32 edx;
   85.13 @@ -117,26 +116,27 @@ typedef struct xen_regs
   85.14      u32 ds;
   85.15      u32 fs;
   85.16      u32 gs;
   85.17 -} PACKED execution_context_t;
   85.18 +} cpu_user_regs_t;
   85.19  
   85.20  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
   85.21  
   85.22  /*
   85.23 - * The following is all CPU context. Note that the i387_ctxt block is filled 
   85.24 + * The following is all CPU context. Note that the fpu_ctxt block is filled 
   85.25   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
   85.26   */
   85.27 -typedef struct {
   85.28 -#define ECF_I387_VALID (1<<0)
   85.29 -#define ECF_VMX_GUEST  (1<<1)
   85.30 -#define ECF_IN_KERNEL (1<<2)
   85.31 -    unsigned long flags;
   85.32 -    execution_context_t cpu_ctxt;           /* User-level CPU registers     */
   85.33 -    char          fpu_ctxt[256];            /* User-level FPU registers     */
   85.34 +typedef struct vcpu_guest_context {
   85.35 +#define VGCF_I387_VALID (1<<0)
   85.36 +#define VGCF_VMX_GUEST  (1<<1)
   85.37 +#define VGCF_IN_KERNEL  (1<<2)
   85.38 +    unsigned long flags;                    /* VGCF_* flags                 */
   85.39 +    cpu_user_regs_t user_regs;              /* User-level CPU registers     */
   85.40 +    struct { char x[512]; } fpu_ctxt        /* User-level FPU registers     */
   85.41 +    __attribute__((__aligned__(16)));       /* (needs 16-byte alignment)    */
   85.42      trap_info_t   trap_ctxt[256];           /* Virtual IDT                  */
   85.43      unsigned int  fast_trap_idx;            /* "Fast trap" vector offset    */
   85.44      unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
   85.45      unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
   85.46 -    unsigned long kernel_ss, kernel_esp;  /* Virtual TSS (only SS1/ESP1)  */
   85.47 +    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
   85.48      unsigned long pt_base;                  /* CR3 (pagetable base)         */
   85.49      unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
   85.50      unsigned long event_callback_cs;        /* CS:EIP of event callback     */
   85.51 @@ -144,15 +144,15 @@ typedef struct {
   85.52      unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
   85.53      unsigned long failsafe_callback_eip;
   85.54      unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
   85.55 -} PACKED full_execution_context_t;
   85.56 +} vcpu_guest_context_t;
   85.57  
   85.58  typedef struct {
   85.59      /* MFN of a table of MFNs that make up p2m table */
   85.60      u64 pfn_to_mfn_frame_list;
   85.61 -} PACKED arch_shared_info_t;
   85.62 +} arch_shared_info_t;
   85.63  
   85.64  typedef struct {
   85.65 -} PACKED arch_vcpu_info_t;
   85.66 +} arch_vcpu_info_t;
   85.67  
   85.68  #define ARCH_HAS_FAST_TRAP
   85.69  
    86.1 --- a/xen/include/public/arch-x86_64.h	Thu Apr 28 13:54:01 2005 +0000
    86.2 +++ b/xen/include/public/arch-x86_64.h	Fri Apr 29 07:34:47 2005 +0000
    86.3 @@ -101,7 +101,7 @@
    86.4   * int HYPERVISOR_switch_to_user(void)
    86.5   * All arguments are on the kernel stack, in the following format.
    86.6   * Never returns if successful. Current kernel context is lost.
    86.7 - * If flags contains ECF_IN_SYSCALL:
    86.8 + * If flags contains VGCF_IN_SYSCALL:
    86.9   *   Restore RAX, RIP, RFLAGS, RSP. 
   86.10   *   Discard R11, RCX, CS, SS.
   86.11   * Otherwise:
   86.12 @@ -109,7 +109,7 @@
   86.13   * All other registers are saved on hypercall entry and restored to user.
   86.14   */
   86.15  /* Guest exited in SYSCALL context? Return to guest with SYSRET? */
   86.16 -#define ECF_IN_SYSCALL (1<<8)
   86.17 +#define VGCF_IN_SYSCALL (1<<8)
   86.18  struct switch_to_user {
   86.19      /* Top of stack (%rsp at point of hypercall). */
   86.20      u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
   86.21 @@ -142,8 +142,7 @@ typedef struct {
   86.22      memory_t address; /* 8: code address                                  */
   86.23  } PACKED trap_info_t; /* 16 bytes */
   86.24  
   86.25 -typedef struct xen_regs
   86.26 -{
   86.27 +typedef struct cpu_user_regs {
   86.28      u64 r15;
   86.29      u64 r14;
   86.30      u64 r13;
   86.31 @@ -168,45 +167,47 @@ typedef struct xen_regs
   86.32      u64 ss;
   86.33      u64 es;
   86.34      u64 ds;
   86.35 -    u64 fs;      /* Non-zero => takes precedence over fs_base.     */
   86.36 -    u64 gs;      /* Non-zero => takes precedence over gs_base_app. */
   86.37 -    u64 fs_base;
   86.38 -    u64 gs_base_kernel;
   86.39 -    u64 gs_base_user;
   86.40 -} PACKED execution_context_t;
   86.41 +    u64 fs;      /* Non-zero => takes precedence over fs_base.      */
   86.42 +    u64 gs;      /* Non-zero => takes precedence over gs_base_user. */
   86.43 +} cpu_user_regs_t;
   86.44  
   86.45  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
   86.46  
   86.47  /*
   86.48 - * The following is all CPU context. Note that the i387_ctxt block is filled 
   86.49 + * The following is all CPU context. Note that the fpu_ctxt block is filled 
   86.50   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
   86.51   */
   86.52 -typedef struct {
   86.53 -#define ECF_I387_VALID (1<<0)
   86.54 -#define ECF_VMX_GUEST  (1<<1)
   86.55 -#define ECF_IN_KERNEL (1<<2)
   86.56 -    unsigned long flags;
   86.57 -    execution_context_t cpu_ctxt;           /* User-level CPU registers     */
   86.58 -    char          fpu_ctxt[512];            /* User-level FPU registers     */
   86.59 +typedef struct vcpu_guest_context {
   86.60 +#define VGCF_I387_VALID (1<<0)
   86.61 +#define VGCF_VMX_GUEST  (1<<1)
   86.62 +#define VGCF_IN_KERNEL  (1<<2)
   86.63 +    unsigned long flags;                    /* VGCF_* flags                 */
   86.64 +    cpu_user_regs_t user_regs;              /* User-level CPU registers     */
   86.65 +    struct { char x[512]; } fpu_ctxt        /* User-level FPU registers     */
   86.66 +    __attribute__((__aligned__(16)));       /* (needs 16-byte alignment)    */
   86.67      trap_info_t   trap_ctxt[256];           /* Virtual IDT                  */
   86.68      unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
   86.69      unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
   86.70 -    unsigned long kernel_ss, kernel_esp;  /* Virtual TSS (only SS1/ESP1)  */
   86.71 +    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
   86.72      unsigned long pt_base;                  /* CR3 (pagetable base)         */
   86.73      unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
   86.74      unsigned long event_callback_eip;
   86.75      unsigned long failsafe_callback_eip;
   86.76      unsigned long syscall_callback_eip;
   86.77      unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
   86.78 -} PACKED full_execution_context_t;
   86.79 +    /* Segment base addresses. */
   86.80 +    u64           fs_base;
   86.81 +    u64           gs_base_kernel;
   86.82 +    u64           gs_base_user;
   86.83 +} vcpu_guest_context_t;
   86.84  
   86.85  typedef struct {
   86.86      /* MFN of a table of MFNs that make up p2m table */
   86.87      u64 pfn_to_mfn_frame_list;
   86.88 -} PACKED arch_shared_info_t;
   86.89 +} arch_shared_info_t;
   86.90  
   86.91  typedef struct {
   86.92 -} PACKED arch_vcpu_info_t;
   86.93 +} arch_vcpu_info_t;
   86.94  
   86.95  #endif /* !__ASSEMBLY__ */
   86.96  
    87.1 --- a/xen/include/public/dom0_ops.h	Thu Apr 28 13:54:01 2005 +0000
    87.2 +++ b/xen/include/public/dom0_ops.h	Fri Apr 29 07:34:47 2005 +0000
    87.3 @@ -83,7 +83,7 @@ typedef struct {
    87.4  #define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
    87.5  #define DOMFLAGS_SHUTDOWNSHIFT 16
    87.6      u32      flags;
    87.7 -    full_execution_context_t *ctxt;   /* NB. IN/OUT variable. */
    87.8 +    vcpu_guest_context_t *ctxt;   /* NB. IN/OUT variable. */
    87.9      memory_t tot_pages;
   87.10      memory_t max_pages;
   87.11      memory_t shared_info_frame;       /* MFN of shared_info struct */
   87.12 @@ -96,7 +96,7 @@ typedef struct {
   87.13      domid_t                   domain;
   87.14      u16                       exec_domain;
   87.15      /* IN/OUT parameters */
   87.16 -    full_execution_context_t *ctxt;
   87.17 +    vcpu_guest_context_t *ctxt;
   87.18  } dom0_setdomaininfo_t;
   87.19  
   87.20  #define DOM0_MSR              15
    88.1 --- a/xen/include/xen/domain.h	Thu Apr 28 13:54:01 2005 +0000
    88.2 +++ b/xen/include/xen/domain.h	Fri Apr 29 07:34:47 2005 +0000
    88.3 @@ -15,7 +15,7 @@ extern void arch_do_createdomain(struct 
    88.4  extern void arch_do_boot_vcpu(struct exec_domain *ed);
    88.5  
    88.6  extern int  arch_set_info_guest(
    88.7 -    struct exec_domain *d, full_execution_context_t *c);
    88.8 +    struct exec_domain *d, struct vcpu_guest_context *c);
    88.9  
   88.10  extern void free_perdomain_pt(struct domain *d);
   88.11  
    89.1 --- a/xen/include/xen/irq.h	Thu Apr 28 13:54:01 2005 +0000
    89.2 +++ b/xen/include/xen/irq.h	Fri Apr 29 07:34:47 2005 +0000
    89.3 @@ -8,7 +8,7 @@
    89.4  
    89.5  struct irqaction
    89.6  {
    89.7 -    void (*handler)(int, void *, struct xen_regs *);
    89.8 +    void (*handler)(int, void *, struct cpu_user_regs *);
    89.9      const char *name;
   89.10      void *dev_id;
   89.11  };
   89.12 @@ -63,7 +63,7 @@ extern int setup_irq(unsigned int, struc
   89.13  extern void free_irq(unsigned int);
   89.14  
   89.15  extern hw_irq_controller no_irq_type;
   89.16 -extern void no_action(int cpl, void *dev_id, struct xen_regs *regs);
   89.17 +extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
   89.18  
   89.19  struct domain;
   89.20  struct exec_domain;
    90.1 --- a/xen/include/xen/keyhandler.h	Thu Apr 28 13:54:01 2005 +0000
    90.2 +++ b/xen/include/xen/keyhandler.h	Fri Apr 29 07:34:47 2005 +0000
    90.3 @@ -23,11 +23,11 @@ extern void register_keyhandler(
    90.4   * synchronously in hard-IRQ context with interrupts disabled. The @regs
    90.5   * callback parameter points at the interrupted register context.
    90.6   */
    90.7 -typedef void irq_keyhandler_t(unsigned char key, struct xen_regs *regs);
    90.8 +typedef void irq_keyhandler_t(unsigned char key, struct cpu_user_regs *regs);
    90.9  extern void register_irq_keyhandler(
   90.10      unsigned char key, irq_keyhandler_t *handler, char *desc); 
   90.11  
   90.12  /* Inject a keypress into the key-handling subsystem. */
   90.13 -extern void handle_keypress(unsigned char key, struct xen_regs *regs);
   90.14 +extern void handle_keypress(unsigned char key, struct cpu_user_regs *regs);
   90.15  
   90.16  #endif /* __XEN_KEYHANDLER_H__ */
    91.1 --- a/xen/include/xen/sched.h	Thu Apr 28 13:54:01 2005 +0000
    91.2 +++ b/xen/include/xen/sched.h	Fri Apr 29 07:34:47 2005 +0000
    91.3 @@ -210,7 +210,7 @@ static inline void get_knownalive_domain
    91.4      atomic_inc(&d->refcnt);
    91.5      ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
    91.6  }
    91.7 -  
    91.8 +
    91.9  extern struct domain *do_createdomain(
   91.10      domid_t dom_id, unsigned int cpu);
   91.11  extern int construct_dom0(
   91.12 @@ -265,10 +265,15 @@ extern void sync_lazy_execstate_cpuset(u
   91.13  extern void sync_lazy_execstate_all(void);
   91.14  extern int __sync_lazy_execstate(void);
   91.15  
   91.16 +/* Called by the scheduler to switch to another exec_domain. */
   91.17  extern void context_switch(
   91.18      struct exec_domain *prev, 
   91.19      struct exec_domain *next);
   91.20  
   91.21 +/* Called by the scheduler to continue running the current exec_domain. */
   91.22 +extern void continue_running(
   91.23 +    struct exec_domain *same);
   91.24 +
   91.25  void domain_init(void);
   91.26  
   91.27  int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
    92.1 --- a/xen/include/xen/serial.h	Thu Apr 28 13:54:01 2005 +0000
    92.2 +++ b/xen/include/xen/serial.h	Fri Apr 29 07:34:47 2005 +0000
    92.3 @@ -28,7 +28,7 @@ void serial_init_stage2(void);
    92.4  int parse_serial_handle(char *conf);
    92.5  
    92.6  /* Register a character-receive hook on the specified COM port. */
    92.7 -typedef void (*serial_rx_fn)(unsigned char, struct xen_regs *);
    92.8 +typedef void (*serial_rx_fn)(unsigned char, struct cpu_user_regs *);
    92.9  void serial_set_rx_handler(int handle, serial_rx_fn fn);
   92.10  
   92.11  /* Transmit a single character via the specified COM port. */