ia64/xen-unstable

changeset 4683:38a02ee9a9c8

bitkeeper revision 1.1389.1.2 (4270ed5dZvr_HdIQR0eBM2m4Kj81_A)

Renames:
execution_context/xen_regs -> cpu_user_regs
full_execution_context -> vcpu_guest_context
[defined both 'struct xxx' and 'xxx_t' forms]
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 28 14:04:13 2005 +0000 (2005-04-28)
parents 7f38080250df
children b2ca9de6952a
files linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h tools/libxc/xc.h tools/libxc/xc_domain.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_linux_save.c tools/libxc/xc_plan9_build.c tools/libxc/xc_ptrace.c tools/libxc/xc_vmx_build.c tools/xentrace/xenctx.c xen/arch/ia64/dom0_ops.c xen/arch/ia64/domain.c xen/arch/ia64/xenmisc.c xen/arch/x86/apic.c xen/arch/x86/cdb.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/extable.c xen/arch/x86/irq.c xen/arch/x86/mm.c xen/arch/x86/nmi.c xen/arch/x86/shadow.c xen/arch/x86/time.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/call_with_regs.S xen/arch/x86/x86_32/entry.S xen/arch/x86/x86_32/seg_fixup.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/arch/x86/x86_emulate.c xen/common/dom0_ops.c xen/common/domain.c xen/common/keyhandler.c xen/drivers/char/console.c xen/drivers/char/serial.c xen/include/asm-ia64/debugger.h xen/include/asm-ia64/domain.h xen/include/asm-ia64/regs.h xen/include/asm-x86/apic.h xen/include/asm-x86/debugger.h xen/include/asm-x86/domain.h xen/include/asm-x86/processor.h xen/include/asm-x86/shadow.h xen/include/asm-x86/vmx.h xen/include/asm-x86/vmx_platform.h xen/include/asm-x86/vmx_vmcs.h xen/include/asm-x86/x86_32/asm_defns.h xen/include/asm-x86/x86_32/current.h xen/include/asm-x86/x86_32/regs.h xen/include/asm-x86/x86_64/asm_defns.h xen/include/asm-x86/x86_64/current.h xen/include/asm-x86/x86_64/regs.h xen/include/asm-x86/x86_emulate.h xen/include/public/arch-ia64.h xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h xen/include/public/dom0_ops.h xen/include/xen/domain.h xen/include/xen/irq.h xen/include/xen/keyhandler.h xen/include/xen/serial.h
line diff
     1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Apr 28 13:52:41 2005 +0000
     1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Apr 28 14:04:13 2005 +0000
     1.3 @@ -820,7 +820,7 @@ static int __init do_boot_cpu(int apicid
     1.4  #if 0
     1.5  	unsigned short nmi_high = 0, nmi_low = 0;
     1.6  #endif
     1.7 -	full_execution_context_t ctxt;
     1.8 +	vcpu_guest_context_t ctxt;
     1.9  	extern void startup_32_smp(void);
    1.10  	extern void hypervisor_callback(void);
    1.11  	extern void failsafe_callback(void);
    1.12 @@ -865,15 +865,15 @@ static int __init do_boot_cpu(int apicid
    1.13  
    1.14  	memset(&ctxt, 0, sizeof(ctxt));
    1.15  
    1.16 -	ctxt.cpu_ctxt.ds = __USER_DS;
    1.17 -	ctxt.cpu_ctxt.es = __USER_DS;
    1.18 -	ctxt.cpu_ctxt.fs = 0;
    1.19 -	ctxt.cpu_ctxt.gs = 0;
    1.20 -	ctxt.cpu_ctxt.ss = __KERNEL_DS;
    1.21 -	ctxt.cpu_ctxt.cs = __KERNEL_CS;
    1.22 -	ctxt.cpu_ctxt.eip = start_eip;
    1.23 -	ctxt.cpu_ctxt.esp = idle->thread.esp;
    1.24 -	ctxt.cpu_ctxt.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
    1.25 +	ctxt.user_regs.ds = __USER_DS;
    1.26 +	ctxt.user_regs.es = __USER_DS;
    1.27 +	ctxt.user_regs.fs = 0;
    1.28 +	ctxt.user_regs.gs = 0;
    1.29 +	ctxt.user_regs.ss = __KERNEL_DS;
    1.30 +	ctxt.user_regs.cs = __KERNEL_CS;
    1.31 +	ctxt.user_regs.eip = start_eip;
    1.32 +	ctxt.user_regs.esp = idle->thread.esp;
    1.33 +	ctxt.user_regs.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
    1.34  
    1.35  	/* FPU is set up to default initial state. */
    1.36  	memset(ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
     2.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h	Thu Apr 28 13:52:41 2005 +0000
     2.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h	Thu Apr 28 14:04:13 2005 +0000
     2.3 @@ -519,7 +519,7 @@ HYPERVISOR_vm_assist(
     2.4  
     2.5  static inline int
     2.6  HYPERVISOR_boot_vcpu(
     2.7 -    unsigned long vcpu, full_execution_context_t *ctxt)
     2.8 +    unsigned long vcpu, vcpu_guest_context_t *ctxt)
     2.9  {
    2.10      int ret;
    2.11      unsigned long ign1, ign2;
     3.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h	Thu Apr 28 13:52:41 2005 +0000
     3.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h	Thu Apr 28 14:04:13 2005 +0000
     3.3 @@ -489,7 +489,7 @@ HYPERVISOR_switch_to_user(void)
     3.4  
     3.5  static inline int
     3.6  HYPERVISOR_boot_vcpu(
     3.7 -    unsigned long vcpu, full_execution_context_t *ctxt)
     3.8 +    unsigned long vcpu, vcpu_guest_context_t *ctxt)
     3.9  {
    3.10      int ret;
    3.11  
     4.1 --- a/tools/libxc/xc.h	Thu Apr 28 13:52:41 2005 +0000
     4.2 +++ b/tools/libxc/xc.h	Thu Apr 28 14:04:13 2005 +0000
     4.3 @@ -160,7 +160,7 @@ int xc_domain_getfullinfo(int xc_handle,
     4.4                            u32 domid,
     4.5                            u32 vcpu,
     4.6                            xc_domaininfo_t *info,
     4.7 -                          full_execution_context_t *ctxt);
     4.8 +                          vcpu_guest_context_t *ctxt);
     4.9  int xc_domain_setcpuweight(int xc_handle,
    4.10                             u32 domid,
    4.11                             float weight);
     5.1 --- a/tools/libxc/xc_domain.c	Thu Apr 28 13:52:41 2005 +0000
     5.2 +++ b/tools/libxc/xc_domain.c	Thu Apr 28 14:04:13 2005 +0000
     5.3 @@ -144,7 +144,7 @@ int xc_domain_getfullinfo(int xc_handle,
     5.4                            u32 domid,
     5.5                            u32 vcpu,
     5.6                            xc_domaininfo_t *info,
     5.7 -                          full_execution_context_t *ctxt)
     5.8 +                          vcpu_guest_context_t *ctxt)
     5.9  {
    5.10      int rc, errno_saved;
    5.11      dom0_op_t op;
     6.1 --- a/tools/libxc/xc_linux_build.c	Thu Apr 28 13:52:41 2005 +0000
     6.2 +++ b/tools/libxc/xc_linux_build.c	Thu Apr 28 14:04:13 2005 +0000
     6.3 @@ -45,7 +45,7 @@ static int setup_guest(int xc_handle,
     6.4                           gzFile initrd_gfd, unsigned long initrd_len,
     6.5                           unsigned long nr_pages,
     6.6                           unsigned long *pvsi, unsigned long *pvke,
     6.7 -                         full_execution_context_t *ctxt,
     6.8 +                         vcpu_guest_context_t *ctxt,
     6.9                           const char *cmdline,
    6.10                           unsigned long shared_info_frame,
    6.11                           unsigned int control_evtchn,
    6.12 @@ -316,7 +316,7 @@ int xc_linux_build(int xc_handle,
    6.13      int initrd_fd = -1;
    6.14      gzFile initrd_gfd = NULL;
    6.15      int rc, i;
    6.16 -    full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
    6.17 +    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
    6.18      unsigned long nr_pages;
    6.19      char         *image = NULL;
    6.20      unsigned long image_size, initrd_size=0;
    6.21 @@ -400,16 +400,16 @@ int xc_linux_build(int xc_handle,
    6.22       *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
    6.23       *       EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
    6.24       */
    6.25 -    ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS;
    6.26 -    ctxt->cpu_ctxt.es = FLAT_KERNEL_DS;
    6.27 -    ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS;
    6.28 -    ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS;
    6.29 -    ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS;
    6.30 -    ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS;
    6.31 -    ctxt->cpu_ctxt.eip = vkern_entry;
    6.32 -    ctxt->cpu_ctxt.esp = vstartinfo_start + 2*PAGE_SIZE;
    6.33 -    ctxt->cpu_ctxt.esi = vstartinfo_start;
    6.34 -    ctxt->cpu_ctxt.eflags = (1<<9) | (1<<2);
    6.35 +    ctxt->user_regs.ds = FLAT_KERNEL_DS;
    6.36 +    ctxt->user_regs.es = FLAT_KERNEL_DS;
    6.37 +    ctxt->user_regs.fs = FLAT_KERNEL_DS;
    6.38 +    ctxt->user_regs.gs = FLAT_KERNEL_DS;
    6.39 +    ctxt->user_regs.ss = FLAT_KERNEL_DS;
    6.40 +    ctxt->user_regs.cs = FLAT_KERNEL_CS;
    6.41 +    ctxt->user_regs.eip = vkern_entry;
    6.42 +    ctxt->user_regs.esp = vstartinfo_start + 2*PAGE_SIZE;
    6.43 +    ctxt->user_regs.esi = vstartinfo_start;
    6.44 +    ctxt->user_regs.eflags = (1<<9) | (1<<2);
    6.45  
    6.46      /* FPU is set up to default initial state. */
    6.47      memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
     7.1 --- a/tools/libxc/xc_linux_restore.c	Thu Apr 28 13:52:41 2005 +0000
     7.2 +++ b/tools/libxc/xc_linux_restore.c	Thu Apr 28 14:04:13 2005 +0000
     7.3 @@ -73,7 +73,7 @@ int xc_linux_restore(int xc_handle, XcIO
     7.4      shared_info_t *shared_info = (shared_info_t *)shared_info_page;
     7.5      
     7.6      /* A copy of the CPU context of the guest. */
     7.7 -    full_execution_context_t ctxt;
     7.8 +    vcpu_guest_context_t ctxt;
     7.9  
    7.10      /* First 16 bytes of the state file must contain 'LinuxGuestRecord'. */
    7.11      char signature[16];
    7.12 @@ -505,13 +505,13 @@ int xc_linux_restore(int xc_handle, XcIO
    7.13      }
    7.14  
    7.15      /* Uncanonicalise the suspend-record frame number and poke resume rec. */
    7.16 -    pfn = ctxt.cpu_ctxt.esi;
    7.17 +    pfn = ctxt.user_regs.esi;
    7.18      if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) )
    7.19      {
    7.20          xcio_error(ioctxt, "Suspend record frame number is bad");
    7.21          goto out;
    7.22      }
    7.23 -    ctxt.cpu_ctxt.esi = mfn = pfn_to_mfn_table[pfn];
    7.24 +    ctxt.user_regs.esi = mfn = pfn_to_mfn_table[pfn];
    7.25      p_srec = xc_map_foreign_range(
    7.26          xc_handle, dom, PAGE_SIZE, PROT_WRITE, mfn);
    7.27      p_srec->resume_info.nr_pages    = nr_pfns;
    7.28 @@ -599,7 +599,7 @@ int xc_linux_restore(int xc_handle, XcIO
    7.29  
    7.30      /*
    7.31       * Safety checking of saved context:
    7.32 -     *  1. cpu_ctxt is fine, as Xen checks that on context switch.
    7.33 +     *  1. user_regs is fine, as Xen checks that on context switch.
    7.34       *  2. fpu_ctxt is fine, as it can't hurt Xen.
    7.35       *  3. trap_ctxt needs the code selectors checked.
    7.36       *  4. fast_trap_idx is checked by Xen.
     8.1 --- a/tools/libxc/xc_linux_save.c	Thu Apr 28 13:52:41 2005 +0000
     8.2 +++ b/tools/libxc/xc_linux_save.c	Thu Apr 28 14:04:13 2005 +0000
     8.3 @@ -325,7 +325,7 @@ static int analysis_phase( int xc_handle
     8.4  
     8.5  int suspend_and_state(int xc_handle, XcIOContext *ioctxt,		      
     8.6                        xc_domaininfo_t *info,
     8.7 -                      full_execution_context_t *ctxt)
     8.8 +                      vcpu_guest_context_t *ctxt)
     8.9  {
    8.10      int i=0;
    8.11      
    8.12 @@ -391,7 +391,7 @@ int xc_linux_save(int xc_handle, XcIOCon
    8.13      unsigned long shared_info_frame;
    8.14      
    8.15      /* A copy of the CPU context of the guest. */
    8.16 -    full_execution_context_t ctxt;
    8.17 +    vcpu_guest_context_t ctxt;
    8.18  
    8.19      /* A table containg the type of each PFN (/not/ MFN!). */
    8.20      unsigned long *pfn_type = NULL;
    8.21 @@ -922,7 +922,7 @@ int xc_linux_save(int xc_handle, XcIOCon
    8.22                            "SUSPEND flags %08u shinfo %08lx eip %08u "
    8.23                            "esi %08u\n",info.flags,
    8.24                            info.shared_info_frame,
    8.25 -                          ctxt.cpu_ctxt.eip, ctxt.cpu_ctxt.esi );
    8.26 +                          ctxt.user_regs.eip, ctxt.user_regs.esi );
    8.27              } 
    8.28  
    8.29              if ( xc_shadow_control( xc_handle, domid, 
    8.30 @@ -995,7 +995,7 @@ int xc_linux_save(int xc_handle, XcIOCon
    8.31         domid for this to succeed. */
    8.32      p_srec = xc_map_foreign_range(xc_handle, domid,
    8.33                                     sizeof(*p_srec), PROT_READ, 
    8.34 -                                   ctxt.cpu_ctxt.esi);
    8.35 +                                   ctxt.user_regs.esi);
    8.36      if (!p_srec){
    8.37          xcio_error(ioctxt, "Couldn't map suspend record");
    8.38          goto out;
    8.39 @@ -1009,7 +1009,7 @@ int xc_linux_save(int xc_handle, XcIOCon
    8.40      }
    8.41  
    8.42      /* Canonicalise the suspend-record frame number. */
    8.43 -    if ( !translate_mfn_to_pfn(&ctxt.cpu_ctxt.esi) ){
    8.44 +    if ( !translate_mfn_to_pfn(&ctxt.user_regs.esi) ){
    8.45          xcio_error(ioctxt, "Suspend record is not in range of pseudophys map");
    8.46          goto out;
    8.47      }
     9.1 --- a/tools/libxc/xc_plan9_build.c	Thu Apr 28 13:52:41 2005 +0000
     9.2 +++ b/tools/libxc/xc_plan9_build.c	Thu Apr 28 14:04:13 2005 +0000
     9.3 @@ -113,7 +113,7 @@ setup_guest(int xc_handle,
     9.4  	      unsigned long tot_pages,
     9.5  	      unsigned long *virt_startinfo_addr,
     9.6  	      unsigned long *virt_load_addr,
     9.7 -	      full_execution_context_t * ctxt,
     9.8 +	      vcpu_guest_context_t * ctxt,
     9.9  	      const char *cmdline,
    9.10  	      unsigned long shared_info_frame, 
    9.11  	      unsigned int control_evtchn,
    9.12 @@ -411,7 +411,7 @@ xc_plan9_build(int xc_handle,
    9.13  	int kernel_fd = -1;
    9.14  	gzFile kernel_gfd = NULL;
    9.15  	int rc, i;
    9.16 -	full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
    9.17 +	vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
    9.18  	unsigned long virt_startinfo_addr;
    9.19  
    9.20  	if ((tot_pages = xc_get_tot_pages(xc_handle, domid)) < 0) {
    9.21 @@ -482,20 +482,20 @@ xc_plan9_build(int xc_handle,
    9.22  	 *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
    9.23  	 *       EFLAGS = IF | 2 (bit 1 is reserved and should always be 1)
    9.24  	 */
    9.25 -	ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS;
    9.26 -	ctxt->cpu_ctxt.es = FLAT_KERNEL_DS;
    9.27 -	ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS;
    9.28 -	ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS;
    9.29 -	ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS;
    9.30 -	ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS;
    9.31 -	ctxt->cpu_ctxt.eip = load_addr;
    9.32 -	ctxt->cpu_ctxt.eip = 0x80100020;
    9.33 +	ctxt->user_regs.ds = FLAT_KERNEL_DS;
    9.34 +	ctxt->user_regs.es = FLAT_KERNEL_DS;
    9.35 +	ctxt->user_regs.fs = FLAT_KERNEL_DS;
    9.36 +	ctxt->user_regs.gs = FLAT_KERNEL_DS;
    9.37 +	ctxt->user_regs.ss = FLAT_KERNEL_DS;
    9.38 +	ctxt->user_regs.cs = FLAT_KERNEL_CS;
    9.39 +	ctxt->user_regs.eip = load_addr;
    9.40 +	ctxt->user_regs.eip = 0x80100020;
    9.41  	/* put stack at top of second page */
    9.42 -	ctxt->cpu_ctxt.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT);
    9.43 +	ctxt->user_regs.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT);
    9.44  
    9.45  	/* why is this set? */
    9.46 -	ctxt->cpu_ctxt.esi = ctxt->cpu_ctxt.esp;
    9.47 -	ctxt->cpu_ctxt.eflags = (1 << 9) | (1 << 2);
    9.48 +	ctxt->user_regs.esi = ctxt->user_regs.esp;
    9.49 +	ctxt->user_regs.eflags = (1 << 9) | (1 << 2);
    9.50  
    9.51  	/* FPU is set up to default initial state. */
    9.52  	memset(ctxt->fpu_ctxt, 0, sizeof (ctxt->fpu_ctxt));
    9.53 @@ -519,7 +519,7 @@ xc_plan9_build(int xc_handle,
    9.54  	/* Ring 1 stack is the initial stack. */
    9.55  	/* put stack at top of second page */
    9.56  	ctxt->kernel_ss = FLAT_KERNEL_DS;
    9.57 -	ctxt->kernel_esp = ctxt->cpu_ctxt.esp;
    9.58 +	ctxt->kernel_esp = ctxt->user_regs.esp;
    9.59  
    9.60  	/* No debugging. */
    9.61  	memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg));
    10.1 --- a/tools/libxc/xc_ptrace.c	Thu Apr 28 13:52:41 2005 +0000
    10.2 +++ b/tools/libxc/xc_ptrace.c	Thu Apr 28 14:04:13 2005 +0000
    10.3 @@ -132,7 +132,7 @@ static long			nr_pages = 0;
    10.4  unsigned long			*page_array = NULL;
    10.5  static int                      regs_valid[MAX_VIRT_CPUS];
    10.6  static unsigned long            cr3[MAX_VIRT_CPUS];
    10.7 -static full_execution_context_t ctxt[MAX_VIRT_CPUS];
    10.8 +static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
    10.9  
   10.10  /* --------------------- */
   10.11  
   10.12 @@ -220,7 +220,7 @@ waitdomain(int domain, int *status, int 
   10.13  {
   10.14      dom0_op_t op;
   10.15      int retval;
   10.16 -    full_execution_context_t ctxt;
   10.17 +    vcpu_guest_context_t ctxt;
   10.18      struct timespec ts;
   10.19      ts.tv_sec = 0;
   10.20      ts.tv_nsec = 10*1000*1000;
   10.21 @@ -300,7 +300,7 @@ xc_ptrace(enum __ptrace_request request,
   10.22  	FETCH_REGS(cpu);
   10.23  
   10.24  	if (request == PTRACE_GETREGS) {
   10.25 -		SET_PT_REGS(pt, ctxt[cpu].cpu_ctxt); 
   10.26 +		SET_PT_REGS(pt, ctxt[cpu].user_regs); 
   10.27  		memcpy(data, &pt, sizeof(elf_gregset_t));
   10.28  	} else if (request == PTRACE_GETFPREGS)
   10.29  	    memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt));
   10.30 @@ -309,7 +309,7 @@ xc_ptrace(enum __ptrace_request request,
   10.31  	break;
   10.32      case PTRACE_SETREGS:
   10.33  	op.cmd = DOM0_SETDOMAININFO;
   10.34 -	SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].cpu_ctxt);
   10.35 +	SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].user_regs);
   10.36  	op.u.setdomaininfo.domain = domid;
   10.37  	/* XXX need to understand multiple exec_domains */
   10.38  	op.u.setdomaininfo.exec_domain = cpu;
   10.39 @@ -339,7 +339,7 @@ xc_ptrace(enum __ptrace_request request,
   10.40  	retval = do_dom0_op(xc_handle, &op);
   10.41  	break;
   10.42      case PTRACE_SINGLESTEP:
   10.43 -	ctxt[VCPU].cpu_ctxt.eflags |= PSL_T;
   10.44 +	ctxt[VCPU].user_regs.eflags |= PSL_T;
   10.45  	op.cmd = DOM0_SETDOMAININFO;
   10.46  	op.u.setdomaininfo.domain = domid;
   10.47  	op.u.setdomaininfo.exec_domain = 0;
   10.48 @@ -355,8 +355,8 @@ xc_ptrace(enum __ptrace_request request,
   10.49  	if (request != PTRACE_SINGLESTEP) {
   10.50  	    FETCH_REGS(cpu);
   10.51  	    /* Clear trace flag */
   10.52 -	    if (ctxt[cpu].cpu_ctxt.eflags & PSL_T) {
   10.53 -		ctxt[cpu].cpu_ctxt.eflags &= ~PSL_T;
   10.54 +	    if (ctxt[cpu].user_regs.eflags & PSL_T) {
   10.55 +		ctxt[cpu].user_regs.eflags &= ~PSL_T;
   10.56  		op.cmd = DOM0_SETDOMAININFO;
   10.57  		op.u.setdomaininfo.domain = domid;
   10.58  		op.u.setdomaininfo.exec_domain = cpu;
    11.1 --- a/tools/libxc/xc_vmx_build.c	Thu Apr 28 13:52:41 2005 +0000
    11.2 +++ b/tools/libxc/xc_vmx_build.c	Thu Apr 28 14:04:13 2005 +0000
    11.3 @@ -149,7 +149,7 @@ static int setup_guest(int xc_handle,
    11.4                           char *image, unsigned long image_size,
    11.5                           gzFile initrd_gfd, unsigned long initrd_len,
    11.6                           unsigned long nr_pages,
    11.7 -                         full_execution_context_t *ctxt,
    11.8 +                         vcpu_guest_context_t *ctxt,
    11.9                           const char *cmdline,
   11.10                           unsigned long shared_info_frame,
   11.11                           unsigned int control_evtchn,
   11.12 @@ -422,22 +422,22 @@ static int setup_guest(int xc_handle,
   11.13      /*
   11.14       * Initial register values:
   11.15       */
   11.16 -    ctxt->cpu_ctxt.ds = 0x68;
   11.17 -    ctxt->cpu_ctxt.es = 0x0;
   11.18 -    ctxt->cpu_ctxt.fs = 0x0;
   11.19 -    ctxt->cpu_ctxt.gs = 0x0;
   11.20 -    ctxt->cpu_ctxt.ss = 0x68;
   11.21 -    ctxt->cpu_ctxt.cs = 0x60;
   11.22 -    ctxt->cpu_ctxt.eip = dsi.v_kernentry;
   11.23 -    ctxt->cpu_ctxt.edx = vboot_gdt_start;
   11.24 -    ctxt->cpu_ctxt.eax = 0x800;
   11.25 -    ctxt->cpu_ctxt.esp = vboot_gdt_end;
   11.26 -    ctxt->cpu_ctxt.ebx = 0;	/* startup_32 expects this to be 0 to signal boot cpu */
   11.27 -    ctxt->cpu_ctxt.ecx = mem_mapp->nr_map;
   11.28 -    ctxt->cpu_ctxt.esi = vboot_params_start;
   11.29 -    ctxt->cpu_ctxt.edi = vboot_params_start + 0x2d0;
   11.30 +    ctxt->user_regs.ds = 0x68;
   11.31 +    ctxt->user_regs.es = 0x0;
   11.32 +    ctxt->user_regs.fs = 0x0;
   11.33 +    ctxt->user_regs.gs = 0x0;
   11.34 +    ctxt->user_regs.ss = 0x68;
   11.35 +    ctxt->user_regs.cs = 0x60;
   11.36 +    ctxt->user_regs.eip = dsi.v_kernentry;
   11.37 +    ctxt->user_regs.edx = vboot_gdt_start;
   11.38 +    ctxt->user_regs.eax = 0x800;
   11.39 +    ctxt->user_regs.esp = vboot_gdt_end;
   11.40 +    ctxt->user_regs.ebx = 0;	/* startup_32 expects this to be 0 to signal boot cpu */
   11.41 +    ctxt->user_regs.ecx = mem_mapp->nr_map;
   11.42 +    ctxt->user_regs.esi = vboot_params_start;
   11.43 +    ctxt->user_regs.edi = vboot_params_start + 0x2d0;
   11.44  
   11.45 -    ctxt->cpu_ctxt.eflags = (1<<2);
   11.46 +    ctxt->user_regs.eflags = (1<<2);
   11.47  
   11.48      return 0;
   11.49  
   11.50 @@ -488,7 +488,7 @@ int xc_vmx_build(int xc_handle,
   11.51      int initrd_fd = -1;
   11.52      gzFile initrd_gfd = NULL;
   11.53      int rc, i;
   11.54 -    full_execution_context_t st_ctxt, *ctxt = &st_ctxt;
   11.55 +    vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
   11.56      unsigned long nr_pages;
   11.57      char         *image = NULL;
   11.58      unsigned long image_size, initrd_size=0;
    12.1 --- a/tools/xentrace/xenctx.c	Thu Apr 28 13:52:41 2005 +0000
    12.2 +++ b/tools/xentrace/xenctx.c	Thu Apr 28 14:04:13 2005 +0000
    12.3 @@ -24,27 +24,26 @@
    12.4  #include "xc.h"
    12.5  
    12.6  #ifdef __i386__
    12.7 -void
    12.8 -print_ctx(full_execution_context_t *ctx1)
    12.9 +void print_ctx(vcpu_guest_context_t *ctx1)
   12.10  {
   12.11 -    execution_context_t *ctx = &ctx1->cpu_ctxt;
   12.12 -
   12.13 -    printf("eip: %08lx\t", ctx->eip);
   12.14 -    printf("esp: %08lx\n", ctx->esp);
   12.15 +    struct cpu_user_regs *regs = &ctx1->user_regs;
   12.16  
   12.17 -    printf("eax: %08lx\t", ctx->eax);
   12.18 -    printf("ebx: %08lx\t", ctx->ebx);
   12.19 -    printf("ecx: %08lx\t", ctx->ecx);
   12.20 -    printf("edx: %08lx\n", ctx->edx);
   12.21 +    printf("eip: %08lx\t", regs->eip);
   12.22 +    printf("esp: %08lx\n", regs->esp);
   12.23  
   12.24 -    printf("esi: %08lx\t", ctx->esi);
   12.25 -    printf("edi: %08lx\t", ctx->edi);
   12.26 -    printf("ebp: %08lx\n", ctx->ebp);
   12.27 +    printf("eax: %08lx\t", regs->eax);
   12.28 +    printf("ebx: %08lx\t", regs->ebx);
   12.29 +    printf("ecx: %08lx\t", regs->ecx);
   12.30 +    printf("edx: %08lx\n", regs->edx);
   12.31  
   12.32 -    printf(" cs: %08lx\t", ctx->cs);
   12.33 -    printf(" ds: %08lx\t", ctx->ds);
   12.34 -    printf(" fs: %08lx\t", ctx->fs);
   12.35 -    printf(" gs: %08lx\n", ctx->gs);
   12.36 +    printf("esi: %08lx\t", regs->esi);
   12.37 +    printf("edi: %08lx\t", regs->edi);
   12.38 +    printf("ebp: %08lx\n", regs->ebp);
   12.39 +
   12.40 +    printf(" cs: %08lx\t", regs->cs);
   12.41 +    printf(" ds: %08lx\t", regs->ds);
   12.42 +    printf(" fs: %08lx\t", regs->fs);
   12.43 +    printf(" gs: %08lx\n", regs->gs);
   12.44  
   12.45  }
   12.46  #endif
   12.47 @@ -53,7 +52,7 @@ void dump_ctx(u32 domid, u32 vcpu)
   12.48  {
   12.49      int ret;
   12.50      xc_domaininfo_t info;
   12.51 -    full_execution_context_t ctx;
   12.52 +    vcpu_guest_context_t ctx;
   12.53  
   12.54      int xc_handle = xc_interface_open(); /* for accessing control interface */
   12.55  
    13.1 --- a/xen/arch/ia64/dom0_ops.c	Thu Apr 28 13:52:41 2005 +0000
    13.2 +++ b/xen/arch/ia64/dom0_ops.c	Thu Apr 28 14:04:13 2005 +0000
    13.3 @@ -47,7 +47,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    13.4      return ret;
    13.5  }
    13.6  
    13.7 -void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c)
    13.8 +void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c)
    13.9  { 
   13.10      int i;
   13.11  
    14.1 --- a/xen/arch/ia64/domain.c	Thu Apr 28 13:52:41 2005 +0000
    14.2 +++ b/xen/arch/ia64/domain.c	Thu Apr 28 14:04:13 2005 +0000
    14.3 @@ -199,13 +199,13 @@ void arch_do_boot_vcpu(struct exec_domai
    14.4  	return;
    14.5  }
    14.6  
    14.7 -int arch_set_info_guest(struct exec_domain *p, full_execution_context_t *c)
    14.8 +int arch_set_info_guest(struct exec_domain *p, struct vcpu_guest_context *c)
    14.9  {
   14.10  	dummy();
   14.11  	return 1;
   14.12  }
   14.13  
   14.14 -int arch_final_setup_guest(struct exec_domain *p, full_execution_context_t *c)
   14.15 +int arch_final_setup_guest(struct exec_domain *p, struct vcpu_guest_context *c)
   14.16  {
   14.17  	dummy();
   14.18  	return 1;
    15.1 --- a/xen/arch/ia64/xenmisc.c	Thu Apr 28 13:52:41 2005 +0000
    15.2 +++ b/xen/arch/ia64/xenmisc.c	Thu Apr 28 14:04:13 2005 +0000
    15.3 @@ -66,7 +66,7 @@ void grant_table_destroy(struct domain *
    15.4  	return;
    15.5  }
    15.6  
    15.7 -struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); }
    15.8 +struct pt_regs *get_cpu_user_regs(void) { return ia64_task_regs(current); }
    15.9  
   15.10  void raise_actimer_softirq(void)
   15.11  {
    16.1 --- a/xen/arch/x86/apic.c	Thu Apr 28 13:52:41 2005 +0000
    16.2 +++ b/xen/arch/x86/apic.c	Thu Apr 28 14:04:13 2005 +0000
    16.3 @@ -825,7 +825,7 @@ int reprogram_ac_timer(s_time_t timeout)
    16.4      return 1;
    16.5  }
    16.6  
    16.7 -void smp_apic_timer_interrupt(struct xen_regs * regs)
    16.8 +void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
    16.9  {
   16.10      ack_APIC_irq();
   16.11      perfc_incrc(apic_timer);
   16.12 @@ -835,7 +835,7 @@ void smp_apic_timer_interrupt(struct xen
   16.13  /*
   16.14   * This interrupt should _never_ happen with our APIC/SMP architecture
   16.15   */
   16.16 -asmlinkage void smp_spurious_interrupt(struct xen_regs *regs)
   16.17 +asmlinkage void smp_spurious_interrupt(struct cpu_user_regs *regs)
   16.18  {
   16.19      unsigned long v;
   16.20  
   16.21 @@ -857,7 +857,7 @@ asmlinkage void smp_spurious_interrupt(s
   16.22   * This interrupt should never happen with our APIC/SMP architecture
   16.23   */
   16.24  
   16.25 -asmlinkage void smp_error_interrupt(struct xen_regs *regs)
   16.26 +asmlinkage void smp_error_interrupt(struct cpu_user_regs *regs)
   16.27  {
   16.28      unsigned long v, v1;
   16.29  
    17.1 --- a/xen/arch/x86/cdb.c	Thu Apr 28 13:52:41 2005 +0000
    17.2 +++ b/xen/arch/x86/cdb.c	Thu Apr 28 14:04:13 2005 +0000
    17.3 @@ -214,7 +214,7 @@ xendbg_send_reply(const char *buf, struc
    17.4  }
    17.5  
    17.6  static int
    17.7 -handle_register_read_command(struct xen_regs *regs, struct xendbg_context *ctx)
    17.8 +handle_register_read_command(struct cpu_user_regs *regs, struct xendbg_context *ctx)
    17.9  {
   17.10  	char buf[121];
   17.11  
   17.12 @@ -240,7 +240,7 @@ handle_register_read_command(struct xen_
   17.13  }
   17.14  
   17.15  static int
   17.16 -process_command(char *received_packet, struct xen_regs *regs,
   17.17 +process_command(char *received_packet, struct cpu_user_regs *regs,
   17.18  		struct xendbg_context *ctx)
   17.19  {
   17.20  	char *ptr;
   17.21 @@ -318,7 +318,7 @@ xdb_ctx = {
   17.22  };
   17.23  
   17.24  int
   17.25 -__trap_to_cdb(struct xen_regs *regs)
   17.26 +__trap_to_cdb(struct cpu_user_regs *regs)
   17.27  {
   17.28  	int resume = 0;
   17.29  	int r;
    18.1 --- a/xen/arch/x86/dom0_ops.c	Thu Apr 28 13:52:41 2005 +0000
    18.2 +++ b/xen/arch/x86/dom0_ops.c	Thu Apr 28 14:04:13 2005 +0000
    18.3 @@ -374,33 +374,33 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    18.4  }
    18.5  
    18.6  void arch_getdomaininfo_ctxt(
    18.7 -    struct exec_domain *ed, full_execution_context_t *c)
    18.8 +    struct exec_domain *ed, struct vcpu_guest_context *c)
    18.9  { 
   18.10      int i;
   18.11  #ifdef __i386__  /* Remove when x86_64 VMX is implemented */
   18.12  #ifdef CONFIG_VMX
   18.13 -    extern void save_vmx_execution_context(execution_context_t *);
   18.14 +    extern void save_vmx_cpu_user_regs(struct cpu_user_regs *);
   18.15  #endif
   18.16  #endif
   18.17  
   18.18      c->flags = 0;
   18.19 -    memcpy(&c->cpu_ctxt, 
   18.20 -           &ed->arch.user_ctxt,
   18.21 -           sizeof(ed->arch.user_ctxt));
   18.22 +    memcpy(&c->user_regs, 
   18.23 +           &ed->arch.user_regs,
   18.24 +           sizeof(ed->arch.user_regs));
   18.25      /* IOPL privileges are virtualised -- merge back into returned eflags. */
   18.26 -    BUG_ON((c->cpu_ctxt.eflags & EF_IOPL) != 0);
   18.27 -    c->cpu_ctxt.eflags |= ed->arch.iopl << 12;
   18.28 +    BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
   18.29 +    c->user_regs.eflags |= ed->arch.iopl << 12;
   18.30  
   18.31  #ifdef __i386__
   18.32  #ifdef CONFIG_VMX
   18.33      if ( VMX_DOMAIN(ed) )
   18.34 -        save_vmx_execution_context(&c->cpu_ctxt);
   18.35 +        save_vmx_cpu_user_regs(&c->user_regs);
   18.36  #endif
   18.37  #endif
   18.38  
   18.39      if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
   18.40          c->flags |= ECF_I387_VALID;
   18.41 -    if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) )
   18.42 +    if ( KERNEL_MODE(ed, &ed->arch.user_regs) )
   18.43          c->flags |= ECF_IN_KERNEL;
   18.44  #ifdef CONFIG_VMX
   18.45      if (VMX_DOMAIN(ed))
    19.1 --- a/xen/arch/x86/domain.c	Thu Apr 28 13:52:41 2005 +0000
    19.2 +++ b/xen/arch/x86/domain.c	Thu Apr 28 14:04:13 2005 +0000
    19.3 @@ -312,14 +312,14 @@ void arch_vmx_do_launch(struct exec_doma
    19.4      reset_stack_and_jump(vmx_asm_do_launch);
    19.5  }
    19.6  
    19.7 -static int vmx_final_setup_guest(struct exec_domain *ed,
    19.8 -                                   full_execution_context_t *full_context)
    19.9 +static int vmx_final_setup_guest(
   19.10 +    struct exec_domain *ed, struct vcpu_guest_context *ctxt)
   19.11  {
   19.12      int error;
   19.13 -    execution_context_t *context;
   19.14 +    struct cpu_user_regs *regs;
   19.15      struct vmcs_struct *vmcs;
   19.16  
   19.17 -    context = &full_context->cpu_ctxt;
   19.18 +    regs = &ctxt->user_regs;
   19.19  
   19.20      /*
   19.21       * Create a new VMCS
   19.22 @@ -333,7 +333,7 @@ static int vmx_final_setup_guest(struct 
   19.23  
   19.24      ed->arch.arch_vmx.vmcs = vmcs;
   19.25      error = construct_vmcs(
   19.26 -        &ed->arch.arch_vmx, context, full_context, VMCS_USE_HOST_ENV);
   19.27 +        &ed->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV);
   19.28      if ( error < 0 )
   19.29      {
   19.30          printk("Failed to construct a new VMCS\n");
   19.31 @@ -345,7 +345,7 @@ static int vmx_final_setup_guest(struct 
   19.32  
   19.33  #if defined (__i386)
   19.34      ed->arch.arch_vmx.vmx_platform.real_mode_data = 
   19.35 -        (unsigned long *) context->esi;
   19.36 +        (unsigned long *) regs->esi;
   19.37  #endif
   19.38  
   19.39      if (ed == ed->domain->exec_domain[0]) {
   19.40 @@ -374,7 +374,7 @@ out:
   19.41  
   19.42  /* This is called by arch_final_setup_guest and do_boot_vcpu */
   19.43  int arch_set_info_guest(
   19.44 -    struct exec_domain *ed, full_execution_context_t *c)
   19.45 +    struct exec_domain *ed, struct vcpu_guest_context *c)
   19.46  {
   19.47      struct domain *d = ed->domain;
   19.48      unsigned long phys_basetab;
   19.49 @@ -386,8 +386,8 @@ int arch_set_info_guest(
   19.50       * If SS RPL or DPL differs from CS RPL then we'll #GP.
   19.51       */
   19.52      if (!(c->flags & ECF_VMX_GUEST)) 
   19.53 -        if ( ((c->cpu_ctxt.cs & 3) == 0) ||
   19.54 -             ((c->cpu_ctxt.ss & 3) == 0) )
   19.55 +        if ( ((c->user_regs.cs & 3) == 0) ||
   19.56 +             ((c->user_regs.ss & 3) == 0) )
   19.57                  return -EINVAL;
   19.58  
   19.59      clear_bit(EDF_DONEFPUINIT, &ed->ed_flags);
   19.60 @@ -398,21 +398,21 @@ int arch_set_info_guest(
   19.61      if ( c->flags & ECF_IN_KERNEL )
   19.62          ed->arch.flags |= TF_kernel_mode;
   19.63  
   19.64 -    memcpy(&ed->arch.user_ctxt,
   19.65 -           &c->cpu_ctxt,
   19.66 -           sizeof(ed->arch.user_ctxt));
   19.67 +    memcpy(&ed->arch.user_regs,
   19.68 +           &c->user_regs,
   19.69 +           sizeof(ed->arch.user_regs));
   19.70  
   19.71      memcpy(&ed->arch.i387,
   19.72             &c->fpu_ctxt,
   19.73             sizeof(ed->arch.i387));
   19.74  
   19.75      /* IOPL privileges are virtualised. */
   19.76 -    ed->arch.iopl = (ed->arch.user_ctxt.eflags >> 12) & 3;
   19.77 -    ed->arch.user_ctxt.eflags &= ~EF_IOPL;
   19.78 +    ed->arch.iopl = (ed->arch.user_regs.eflags >> 12) & 3;
   19.79 +    ed->arch.user_regs.eflags &= ~EF_IOPL;
   19.80  
   19.81      /* Clear IOPL for unprivileged domains. */
   19.82      if (!IS_PRIV(d))
   19.83 -        ed->arch.user_ctxt.eflags &= 0xffffcfff;
   19.84 +        ed->arch.user_regs.eflags &= 0xffffcfff;
   19.85  
   19.86      if (test_bit(EDF_DONEINIT, &ed->ed_flags))
   19.87          return 0;
   19.88 @@ -507,7 +507,7 @@ void new_thread(struct exec_domain *d,
   19.89                  unsigned long start_stack,
   19.90                  unsigned long start_info)
   19.91  {
   19.92 -    execution_context_t *ec = &d->arch.user_ctxt;
   19.93 +    struct cpu_user_regs *regs = &d->arch.user_regs;
   19.94  
   19.95      /*
   19.96       * Initial register values:
   19.97 @@ -517,15 +517,15 @@ void new_thread(struct exec_domain *d,
   19.98       *          ESI = start_info
   19.99       *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
  19.100       */
  19.101 -    ec->ds = ec->es = ec->fs = ec->gs = FLAT_KERNEL_DS;
  19.102 -    ec->ss = FLAT_KERNEL_SS;
  19.103 -    ec->cs = FLAT_KERNEL_CS;
  19.104 -    ec->eip = start_pc;
  19.105 -    ec->esp = start_stack;
  19.106 -    ec->esi = start_info;
  19.107 +    regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS;
  19.108 +    regs->ss = FLAT_KERNEL_SS;
  19.109 +    regs->cs = FLAT_KERNEL_CS;
  19.110 +    regs->eip = start_pc;
  19.111 +    regs->esp = start_stack;
  19.112 +    regs->esi = start_info;
  19.113  
  19.114 -    __save_flags(ec->eflags);
  19.115 -    ec->eflags |= X86_EFLAGS_IF;
  19.116 +    __save_flags(regs->eflags);
  19.117 +    regs->eflags |= X86_EFLAGS_IF;
  19.118  }
  19.119  
  19.120  
  19.121 @@ -560,60 +560,60 @@ static void load_segments(struct exec_do
  19.122      int all_segs_okay = 1;
  19.123  
  19.124      /* Either selector != 0 ==> reload. */
  19.125 -    if ( unlikely(p->arch.user_ctxt.ds |
  19.126 -                  n->arch.user_ctxt.ds) )
  19.127 -        all_segs_okay &= loadsegment(ds, n->arch.user_ctxt.ds);
  19.128 +    if ( unlikely(p->arch.user_regs.ds |
  19.129 +                  n->arch.user_regs.ds) )
  19.130 +        all_segs_okay &= loadsegment(ds, n->arch.user_regs.ds);
  19.131  
  19.132      /* Either selector != 0 ==> reload. */
  19.133 -    if ( unlikely(p->arch.user_ctxt.es |
  19.134 -                  n->arch.user_ctxt.es) )
  19.135 -        all_segs_okay &= loadsegment(es, n->arch.user_ctxt.es);
  19.136 +    if ( unlikely(p->arch.user_regs.es |
  19.137 +                  n->arch.user_regs.es) )
  19.138 +        all_segs_okay &= loadsegment(es, n->arch.user_regs.es);
  19.139  
  19.140      /*
  19.141       * Either selector != 0 ==> reload.
  19.142       * Also reload to reset FS_BASE if it was non-zero.
  19.143       */
  19.144 -    if ( unlikely(p->arch.user_ctxt.fs |
  19.145 -                  p->arch.user_ctxt.fs_base |
  19.146 -                  n->arch.user_ctxt.fs) )
  19.147 +    if ( unlikely(p->arch.user_regs.fs |
  19.148 +                  p->arch.user_regs.fs_base |
  19.149 +                  n->arch.user_regs.fs) )
  19.150      {
  19.151 -        all_segs_okay &= loadsegment(fs, n->arch.user_ctxt.fs);
  19.152 -        if ( p->arch.user_ctxt.fs ) /* != 0 selector kills fs_base */
  19.153 -            p->arch.user_ctxt.fs_base = 0;
  19.154 +        all_segs_okay &= loadsegment(fs, n->arch.user_regs.fs);
  19.155 +        if ( p->arch.user_regs.fs ) /* != 0 selector kills fs_base */
  19.156 +            p->arch.user_regs.fs_base = 0;
  19.157      }
  19.158  
  19.159      /*
  19.160       * Either selector != 0 ==> reload.
  19.161       * Also reload to reset GS_BASE if it was non-zero.
  19.162       */
  19.163 -    if ( unlikely(p->arch.user_ctxt.gs |
  19.164 -                  p->arch.user_ctxt.gs_base_user |
  19.165 -                  n->arch.user_ctxt.gs) )
  19.166 +    if ( unlikely(p->arch.user_regs.gs |
  19.167 +                  p->arch.user_regs.gs_base_user |
  19.168 +                  n->arch.user_regs.gs) )
  19.169      {
  19.170          /* Reset GS_BASE with user %gs? */
  19.171 -        if ( p->arch.user_ctxt.gs || !n->arch.user_ctxt.gs_base_user )
  19.172 -            all_segs_okay &= loadsegment(gs, n->arch.user_ctxt.gs);
  19.173 -        if ( p->arch.user_ctxt.gs ) /* != 0 selector kills gs_base_user */
  19.174 -            p->arch.user_ctxt.gs_base_user = 0;
  19.175 +        if ( p->arch.user_regs.gs || !n->arch.user_regs.gs_base_user )
  19.176 +            all_segs_okay &= loadsegment(gs, n->arch.user_regs.gs);
  19.177 +        if ( p->arch.user_regs.gs ) /* != 0 selector kills gs_base_user */
  19.178 +            p->arch.user_regs.gs_base_user = 0;
  19.179      }
  19.180  
  19.181      /* This can only be non-zero if selector is NULL. */
  19.182 -    if ( n->arch.user_ctxt.fs_base )
  19.183 +    if ( n->arch.user_regs.fs_base )
  19.184          wrmsr(MSR_FS_BASE,
  19.185 -              n->arch.user_ctxt.fs_base,
  19.186 -              n->arch.user_ctxt.fs_base>>32);
  19.187 +              n->arch.user_regs.fs_base,
  19.188 +              n->arch.user_regs.fs_base>>32);
  19.189  
  19.190      /* Most kernels have non-zero GS base, so don't bother testing. */
  19.191      /* (This is also a serialising instruction, avoiding AMD erratum #88.) */
  19.192      wrmsr(MSR_SHADOW_GS_BASE,
  19.193 -          n->arch.user_ctxt.gs_base_kernel,
  19.194 -          n->arch.user_ctxt.gs_base_kernel>>32);
  19.195 +          n->arch.user_regs.gs_base_kernel,
  19.196 +          n->arch.user_regs.gs_base_kernel>>32);
  19.197  
  19.198      /* This can only be non-zero if selector is NULL. */
  19.199 -    if ( n->arch.user_ctxt.gs_base_user )
  19.200 +    if ( n->arch.user_regs.gs_base_user )
  19.201          wrmsr(MSR_GS_BASE,
  19.202 -              n->arch.user_ctxt.gs_base_user,
  19.203 -              n->arch.user_ctxt.gs_base_user>>32);
  19.204 +              n->arch.user_regs.gs_base_user,
  19.205 +              n->arch.user_regs.gs_base_user>>32);
  19.206  
  19.207      /* If in kernel mode then switch the GS bases around. */
  19.208      if ( n->arch.flags & TF_kernel_mode )
  19.209 @@ -621,7 +621,7 @@ static void load_segments(struct exec_do
  19.210  
  19.211      if ( unlikely(!all_segs_okay) )
  19.212      {
  19.213 -        struct xen_regs *regs = get_execution_context();
  19.214 +        struct cpu_user_regs *regs = get_cpu_user_regs();
  19.215          unsigned long   *rsp =
  19.216              (n->arch.flags & TF_kernel_mode) ?
  19.217              (unsigned long *)regs->rsp : 
  19.218 @@ -637,10 +637,10 @@ static void load_segments(struct exec_do
  19.219               put_user(regs->rflags,         rsp- 3) |
  19.220               put_user(regs->cs,             rsp- 4) |
  19.221               put_user(regs->rip,            rsp- 5) |
  19.222 -             put_user(n->arch.user_ctxt.gs, rsp- 6) |
  19.223 -             put_user(n->arch.user_ctxt.fs, rsp- 7) |
  19.224 -             put_user(n->arch.user_ctxt.es, rsp- 8) |
  19.225 -             put_user(n->arch.user_ctxt.ds, rsp- 9) |
  19.226 +             put_user(n->arch.user_regs.gs, rsp- 6) |
  19.227 +             put_user(n->arch.user_regs.fs, rsp- 7) |
  19.228 +             put_user(n->arch.user_regs.es, rsp- 8) |
  19.229 +             put_user(n->arch.user_regs.ds, rsp- 9) |
  19.230               put_user(regs->r11,            rsp-10) |
  19.231               put_user(regs->rcx,            rsp-11) )
  19.232          {
  19.233 @@ -659,10 +659,10 @@ static void load_segments(struct exec_do
  19.234  
  19.235  static void save_segments(struct exec_domain *p)
  19.236  {
  19.237 -    __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_ctxt.ds) );
  19.238 -    __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_ctxt.es) );
  19.239 -    __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_ctxt.fs) );
  19.240 -    __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_ctxt.gs) );
  19.241 +    __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_regs.ds) );
  19.242 +    __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_regs.es) );
  19.243 +    __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_regs.fs) );
  19.244 +    __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_regs.gs) );
  19.245  }
  19.246  
  19.247  static void clear_segments(void)
  19.248 @@ -679,7 +679,7 @@ static void clear_segments(void)
  19.249  
  19.250  long do_switch_to_user(void)
  19.251  {
  19.252 -    struct xen_regs       *regs = get_execution_context();
  19.253 +    struct cpu_user_regs       *regs = get_cpu_user_regs();
  19.254      struct switch_to_user  stu;
  19.255      struct exec_domain    *ed = current;
  19.256  
  19.257 @@ -728,14 +728,14 @@ static inline void switch_kernel_stack(s
  19.258  
  19.259  static void __context_switch(void)
  19.260  {
  19.261 -    execution_context_t *stack_ec = get_execution_context();
  19.262 +    struct cpu_user_regs *stack_ec = get_cpu_user_regs();
  19.263      unsigned int         cpu = smp_processor_id();
  19.264      struct exec_domain  *p = percpu_ctxt[cpu].curr_ed;
  19.265      struct exec_domain  *n = current;
  19.266  
  19.267      if ( !is_idle_task(p->domain) )
  19.268      {
  19.269 -        memcpy(&p->arch.user_ctxt,
  19.270 +        memcpy(&p->arch.user_regs,
  19.271                 stack_ec, 
  19.272                 CTXT_SWITCH_STACK_BYTES);
  19.273          unlazy_fpu(p);
  19.274 @@ -746,7 +746,7 @@ static void __context_switch(void)
  19.275      if ( !is_idle_task(n->domain) )
  19.276      {
  19.277          memcpy(stack_ec,
  19.278 -               &n->arch.user_ctxt,
  19.279 +               &n->arch.user_regs,
  19.280                 CTXT_SWITCH_STACK_BYTES);
  19.281  
  19.282          /* Maybe switch the debug registers. */
  19.283 @@ -844,7 +844,7 @@ unsigned long __hypercall_create_continu
  19.284      unsigned int op, unsigned int nr_args, ...)
  19.285  {
  19.286      struct mc_state *mcs = &mc_state[smp_processor_id()];
  19.287 -    execution_context_t *ec;
  19.288 +    struct cpu_user_regs *regs;
  19.289      unsigned int i;
  19.290      va_list args;
  19.291  
  19.292 @@ -859,37 +859,37 @@ unsigned long __hypercall_create_continu
  19.293      }
  19.294      else
  19.295      {
  19.296 -        ec       = get_execution_context();
  19.297 +        regs       = get_cpu_user_regs();
  19.298  #if defined(__i386__)
  19.299 -        ec->eax  = op;
  19.300 -        ec->eip -= 2;  /* re-execute 'int 0x82' */
  19.301 +        regs->eax  = op;
  19.302 +        regs->eip -= 2;  /* re-execute 'int 0x82' */
  19.303          
  19.304          for ( i = 0; i < nr_args; i++ )
  19.305          {
  19.306              switch ( i )
  19.307              {
  19.308 -            case 0: ec->ebx = va_arg(args, unsigned long); break;
  19.309 -            case 1: ec->ecx = va_arg(args, unsigned long); break;
  19.310 -            case 2: ec->edx = va_arg(args, unsigned long); break;
  19.311 -            case 3: ec->esi = va_arg(args, unsigned long); break;
  19.312 -            case 4: ec->edi = va_arg(args, unsigned long); break;
  19.313 -            case 5: ec->ebp = va_arg(args, unsigned long); break;
  19.314 +            case 0: regs->ebx = va_arg(args, unsigned long); break;
  19.315 +            case 1: regs->ecx = va_arg(args, unsigned long); break;
  19.316 +            case 2: regs->edx = va_arg(args, unsigned long); break;
  19.317 +            case 3: regs->esi = va_arg(args, unsigned long); break;
  19.318 +            case 4: regs->edi = va_arg(args, unsigned long); break;
  19.319 +            case 5: regs->ebp = va_arg(args, unsigned long); break;
  19.320              }
  19.321          }
  19.322  #elif defined(__x86_64__)
  19.323 -        ec->rax  = op;
  19.324 -        ec->rip -= 2;  /* re-execute 'syscall' */
  19.325 +        regs->rax  = op;
  19.326 +        regs->rip -= 2;  /* re-execute 'syscall' */
  19.327          
  19.328          for ( i = 0; i < nr_args; i++ )
  19.329          {
  19.330              switch ( i )
  19.331              {
  19.332 -            case 0: ec->rdi = va_arg(args, unsigned long); break;
  19.333 -            case 1: ec->rsi = va_arg(args, unsigned long); break;
  19.334 -            case 2: ec->rdx = va_arg(args, unsigned long); break;
  19.335 -            case 3: ec->r10 = va_arg(args, unsigned long); break;
  19.336 -            case 4: ec->r8  = va_arg(args, unsigned long); break;
  19.337 -            case 5: ec->r9  = va_arg(args, unsigned long); break;
  19.338 +            case 0: regs->rdi = va_arg(args, unsigned long); break;
  19.339 +            case 1: regs->rsi = va_arg(args, unsigned long); break;
  19.340 +            case 2: regs->rdx = va_arg(args, unsigned long); break;
  19.341 +            case 3: regs->r10 = va_arg(args, unsigned long); break;
  19.342 +            case 4: regs->r8  = va_arg(args, unsigned long); break;
  19.343 +            case 5: regs->r9  = va_arg(args, unsigned long); break;
  19.344              }
  19.345          }
  19.346  #endif
    20.1 --- a/xen/arch/x86/extable.c	Thu Apr 28 13:52:41 2005 +0000
    20.2 +++ b/xen/arch/x86/extable.c	Thu Apr 28 14:04:13 2005 +0000
    20.3 @@ -68,7 +68,7 @@ search_exception_table(unsigned long add
    20.4  }
    20.5  
    20.6  unsigned long
    20.7 -search_pre_exception_table(struct xen_regs *regs)
    20.8 +search_pre_exception_table(struct cpu_user_regs *regs)
    20.9  {
   20.10      unsigned long addr = (unsigned long)regs->eip;
   20.11      unsigned long fixup = search_one_table(
    21.1 --- a/xen/arch/x86/irq.c	Thu Apr 28 13:52:41 2005 +0000
    21.2 +++ b/xen/arch/x86/irq.c	Thu Apr 28 14:04:13 2005 +0000
    21.3 @@ -17,7 +17,7 @@ irq_desc_t irq_desc[NR_IRQS];
    21.4  
    21.5  static void __do_IRQ_guest(int irq);
    21.6  
    21.7 -void no_action(int cpl, void *dev_id, struct xen_regs *regs) { }
    21.8 +void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
    21.9  
   21.10  static void enable_none(unsigned int irq) { }
   21.11  static unsigned int startup_none(unsigned int irq) { return 0; }
   21.12 @@ -87,7 +87,7 @@ void enable_irq(unsigned int irq)
   21.13      spin_unlock_irqrestore(&desc->lock, flags);
   21.14  }
   21.15  
   21.16 -asmlinkage void do_IRQ(struct xen_regs *regs)
   21.17 +asmlinkage void do_IRQ(struct cpu_user_regs *regs)
   21.18  {       
   21.19      unsigned int      irq = regs->entry_vector;
   21.20      irq_desc_t       *desc = &irq_desc[irq];
    22.1 --- a/xen/arch/x86/mm.c	Thu Apr 28 13:52:41 2005 +0000
    22.2 +++ b/xen/arch/x86/mm.c	Thu Apr 28 14:04:13 2005 +0000
    22.3 @@ -2842,7 +2842,7 @@ int ptwr_do_page_fault(struct domain *d,
    22.4      return EXCRET_fault_fixed;
    22.5  
    22.6   emulate:
    22.7 -    if ( x86_emulate_memop(get_execution_context(), addr,
    22.8 +    if ( x86_emulate_memop(get_cpu_user_regs(), addr,
    22.9                             &ptwr_mem_emulator, BITS_PER_LONG/8) )
   22.10          return 0;
   22.11      perfc_incrc(ptwr_emulations);
    23.1 --- a/xen/arch/x86/nmi.c	Thu Apr 28 13:52:41 2005 +0000
    23.2 +++ b/xen/arch/x86/nmi.c	Thu Apr 28 14:04:13 2005 +0000
    23.3 @@ -267,7 +267,7 @@ void touch_nmi_watchdog (void)
    23.4          alert_counter[i] = 0;
    23.5  }
    23.6  
    23.7 -void nmi_watchdog_tick (struct xen_regs * regs)
    23.8 +void nmi_watchdog_tick (struct cpu_user_regs * regs)
    23.9  {
   23.10      int sum, cpu = smp_processor_id();
   23.11  
    24.1 --- a/xen/arch/x86/shadow.c	Thu Apr 28 13:52:41 2005 +0000
    24.2 +++ b/xen/arch/x86/shadow.c	Thu Apr 28 14:04:13 2005 +0000
    24.3 @@ -2421,7 +2421,7 @@ void __shadow_sync_all(struct domain *d)
    24.4      free_out_of_sync_state(d);
    24.5  }
    24.6  
    24.7 -int shadow_fault(unsigned long va, struct xen_regs *regs)
    24.8 +int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
    24.9  {
   24.10      l1_pgentry_t gpte, spte, orig_gpte;
   24.11      struct exec_domain *ed = current;
    25.1 --- a/xen/arch/x86/time.c	Thu Apr 28 13:52:41 2005 +0000
    25.2 +++ b/xen/arch/x86/time.c	Thu Apr 28 14:04:13 2005 +0000
    25.3 @@ -51,7 +51,7 @@ static s_time_t        stime_irq;       
    25.4  static unsigned long   wc_sec, wc_usec; /* UTC time at last 'time update'.   */
    25.5  static rwlock_t        time_lock = RW_LOCK_UNLOCKED;
    25.6  
    25.7 -void timer_interrupt(int irq, void *dev_id, struct xen_regs *regs)
    25.8 +void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
    25.9  {
   25.10      write_lock_irq(&time_lock);
   25.11  
    26.1 --- a/xen/arch/x86/traps.c	Thu Apr 28 13:52:41 2005 +0000
    26.2 +++ b/xen/arch/x86/traps.c	Thu Apr 28 14:04:13 2005 +0000
    26.3 @@ -95,7 +95,7 @@ asmlinkage void machine_check(void);
    26.4   * are disabled). In such situations we can't do much that is safe. We try to
    26.5   * print out some tracing and then we just spin.
    26.6   */
    26.7 -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs)
    26.8 +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
    26.9  {
   26.10      int cpu = smp_processor_id();
   26.11      unsigned long cr2;
   26.12 @@ -136,7 +136,7 @@ asmlinkage void fatal_trap(int trapnr, s
   26.13  }
   26.14  
   26.15  static inline int do_trap(int trapnr, char *str,
   26.16 -                          struct xen_regs *regs, 
   26.17 +                          struct cpu_user_regs *regs, 
   26.18                            int use_error_code)
   26.19  {
   26.20      struct exec_domain *ed = current;
   26.21 @@ -186,13 +186,13 @@ static inline int do_trap(int trapnr, ch
   26.22  }
   26.23  
   26.24  #define DO_ERROR_NOCODE(trapnr, str, name) \
   26.25 -asmlinkage int do_##name(struct xen_regs *regs) \
   26.26 +asmlinkage int do_##name(struct cpu_user_regs *regs) \
   26.27  { \
   26.28      return do_trap(trapnr, str, regs, 0); \
   26.29  }
   26.30  
   26.31  #define DO_ERROR(trapnr, str, name) \
   26.32 -asmlinkage int do_##name(struct xen_regs *regs) \
   26.33 +asmlinkage int do_##name(struct cpu_user_regs *regs) \
   26.34  { \
   26.35      return do_trap(trapnr, str, regs, 1); \
   26.36  }
   26.37 @@ -209,7 +209,7 @@ DO_ERROR_NOCODE(16, "fpu error", coproce
   26.38  DO_ERROR(17, "alignment check", alignment_check)
   26.39  DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
   26.40  
   26.41 -asmlinkage int do_int3(struct xen_regs *regs)
   26.42 +asmlinkage int do_int3(struct cpu_user_regs *regs)
   26.43  {
   26.44      struct exec_domain *ed = current;
   26.45      struct trap_bounce *tb = &ed->arch.trap_bounce;
   26.46 @@ -234,7 +234,7 @@ asmlinkage int do_int3(struct xen_regs *
   26.47      return 0;
   26.48  }
   26.49  
   26.50 -asmlinkage void do_machine_check(struct xen_regs *regs)
   26.51 +asmlinkage void do_machine_check(struct cpu_user_regs *regs)
   26.52  {
   26.53      fatal_trap(TRAP_machine_check, regs);
   26.54  }
   26.55 @@ -257,7 +257,7 @@ void propagate_page_fault(unsigned long 
   26.56      ed->arch.guest_cr2 = addr;
   26.57  }
   26.58  
   26.59 -asmlinkage int do_page_fault(struct xen_regs *regs)
   26.60 +asmlinkage int do_page_fault(struct cpu_user_regs *regs)
   26.61  {
   26.62      unsigned long off, addr, fixup;
   26.63      struct exec_domain *ed = current;
   26.64 @@ -374,7 +374,7 @@ long do_fpu_taskswitch(int set)
   26.65  /* Has the guest requested sufficient permission for this I/O access? */
   26.66  static inline int guest_io_okay(
   26.67      unsigned int port, unsigned int bytes,
   26.68 -    struct exec_domain *ed, struct xen_regs *regs)
   26.69 +    struct exec_domain *ed, struct cpu_user_regs *regs)
   26.70  {
   26.71      u16 x;
   26.72  #if defined(__x86_64__)
   26.73 @@ -404,7 +404,7 @@ static inline int guest_io_okay(
   26.74  /* Has the administrator granted sufficient permission for this I/O access? */
   26.75  static inline int admin_io_okay(
   26.76      unsigned int port, unsigned int bytes,
   26.77 -    struct exec_domain *ed, struct xen_regs *regs)
   26.78 +    struct exec_domain *ed, struct cpu_user_regs *regs)
   26.79  {
   26.80      struct domain *d = ed->domain;
   26.81      u16 x;
   26.82 @@ -436,7 +436,7 @@ static inline int admin_io_okay(
   26.83          goto read_fault;                        \
   26.84      eip += _size; (_type)_x; })
   26.85  
   26.86 -static int emulate_privileged_op(struct xen_regs *regs)
   26.87 +static int emulate_privileged_op(struct cpu_user_regs *regs)
   26.88  {
   26.89      struct exec_domain *ed = current;
   26.90      unsigned long *reg, eip = regs->eip;
   26.91 @@ -743,7 +743,7 @@ static int emulate_privileged_op(struct 
   26.92      return EXCRET_fault_fixed;
   26.93  }
   26.94  
   26.95 -asmlinkage int do_general_protection(struct xen_regs *regs)
   26.96 +asmlinkage int do_general_protection(struct cpu_user_regs *regs)
   26.97  {
   26.98      struct exec_domain *ed = current;
   26.99      struct trap_bounce *tb = &ed->arch.trap_bounce;
  26.100 @@ -851,7 +851,7 @@ static void nmi_softirq(void)
  26.101          send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
  26.102  }
  26.103  
  26.104 -asmlinkage void mem_parity_error(struct xen_regs *regs)
  26.105 +asmlinkage void mem_parity_error(struct cpu_user_regs *regs)
  26.106  {
  26.107      /* Clear and disable the parity-error line. */
  26.108      outb((inb(0x61)&15)|4,0x61);
  26.109 @@ -870,7 +870,7 @@ asmlinkage void mem_parity_error(struct 
  26.110      }
  26.111  }
  26.112  
  26.113 -asmlinkage void io_check_error(struct xen_regs *regs)
  26.114 +asmlinkage void io_check_error(struct cpu_user_regs *regs)
  26.115  {
  26.116      /* Clear and disable the I/O-error line. */
  26.117      outb((inb(0x61)&15)|8,0x61);
  26.118 @@ -896,7 +896,7 @@ static void unknown_nmi_error(unsigned c
  26.119      printk("Do you have a strange power saving mode enabled?\n");
  26.120  }
  26.121  
  26.122 -asmlinkage void do_nmi(struct xen_regs *regs, unsigned long reason)
  26.123 +asmlinkage void do_nmi(struct cpu_user_regs *regs, unsigned long reason)
  26.124  {
  26.125      ++nmi_count(smp_processor_id());
  26.126  
  26.127 @@ -911,7 +911,7 @@ asmlinkage void do_nmi(struct xen_regs *
  26.128          unknown_nmi_error((unsigned char)(reason&0xff));
  26.129  }
  26.130  
  26.131 -asmlinkage int math_state_restore(struct xen_regs *regs)
  26.132 +asmlinkage int math_state_restore(struct cpu_user_regs *regs)
  26.133  {
  26.134      /* Prevent recursion. */
  26.135      clts();
  26.136 @@ -936,7 +936,7 @@ asmlinkage int math_state_restore(struct
  26.137      return EXCRET_fault_fixed;
  26.138  }
  26.139  
  26.140 -asmlinkage int do_debug(struct xen_regs *regs)
  26.141 +asmlinkage int do_debug(struct cpu_user_regs *regs)
  26.142  {
  26.143      unsigned long condition;
  26.144      struct exec_domain *ed = current;
  26.145 @@ -978,7 +978,7 @@ asmlinkage int do_debug(struct xen_regs 
  26.146      return EXCRET_not_a_fault;
  26.147  }
  26.148  
  26.149 -asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs)
  26.150 +asmlinkage int do_spurious_interrupt_bug(struct cpu_user_regs *regs)
  26.151  {
  26.152      return EXCRET_not_a_fault;
  26.153  }
    27.1 --- a/xen/arch/x86/vmx.c	Thu Apr 28 13:52:41 2005 +0000
    27.2 +++ b/xen/arch/x86/vmx.c	Thu Apr 28 14:04:13 2005 +0000
    27.3 @@ -46,7 +46,7 @@ unsigned int opt_vmx_debug_level = 0;
    27.4  
    27.5  extern long evtchn_send(int lport);
    27.6  extern long do_block(void);
    27.7 -void do_nmi(struct xen_regs *, unsigned long);
    27.8 +void do_nmi(struct cpu_user_regs *, unsigned long);
    27.9  
   27.10  int start_vmx()
   27.11  {
   27.12 @@ -105,7 +105,7 @@ static void inline __update_guest_eip(un
   27.13  
   27.14  #include <asm/domain_page.h>
   27.15  
   27.16 -static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs) 
   27.17 +static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
   27.18  {
   27.19      struct exec_domain *ed = current;
   27.20      unsigned long eip;
   27.21 @@ -154,7 +154,7 @@ static int vmx_do_page_fault(unsigned lo
   27.22      return result;
   27.23  }
   27.24  
   27.25 -static void vmx_do_general_protection_fault(struct xen_regs *regs) 
   27.26 +static void vmx_do_general_protection_fault(struct cpu_user_regs *regs) 
   27.27  {
   27.28      unsigned long eip, error_code;
   27.29      unsigned long intr_fields;
   27.30 @@ -181,7 +181,7 @@ static void vmx_do_general_protection_fa
   27.31      __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
   27.32  }
   27.33  
   27.34 -static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs) 
   27.35 +static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs) 
   27.36  {
   27.37      unsigned int eax, ebx, ecx, edx;
   27.38      unsigned long eip;
   27.39 @@ -217,7 +217,7 @@ static void vmx_vmexit_do_cpuid(unsigned
   27.40  #define CASE_GET_REG_P(REG, reg)    \
   27.41      case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
   27.42  
   27.43 -static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs)
   27.44 +static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs)
   27.45  {
   27.46      unsigned int reg;
   27.47      unsigned long *reg_p = 0;
   27.48 @@ -288,7 +288,7 @@ static void vmx_vmexit_do_invlpg(unsigne
   27.49      shadow_invlpg(ed, va);
   27.50  }
   27.51  
   27.52 -static void vmx_io_instruction(struct xen_regs *regs, 
   27.53 +static void vmx_io_instruction(struct cpu_user_regs *regs, 
   27.54                     unsigned long exit_qualification, unsigned long inst_len) 
   27.55  {
   27.56      struct exec_domain *d = current;
   27.57 @@ -728,7 +728,7 @@ static int vmx_set_cr0(unsigned long val
   27.58  /*
   27.59   * Write to control registers
   27.60   */
   27.61 -static int mov_to_cr(int gp, int cr, struct xen_regs *regs)
   27.62 +static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
   27.63  {
   27.64      unsigned long value;
   27.65      unsigned long old_cr;
   27.66 @@ -847,7 +847,7 @@ static int mov_to_cr(int gp, int cr, str
   27.67  /*
   27.68   * Read from control registers. CR0 and CR4 are read from the shadow.
   27.69   */
   27.70 -static void mov_from_cr(int cr, int gp, struct xen_regs *regs)
   27.71 +static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
   27.72  {
   27.73      unsigned long value;
   27.74      struct exec_domain *d = current;
   27.75 @@ -878,7 +878,7 @@ static void mov_from_cr(int cr, int gp, 
   27.76      VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value);
   27.77  }
   27.78  
   27.79 -static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs *regs)
   27.80 +static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs)
   27.81  {
   27.82      unsigned int gp, cr;
   27.83      unsigned long value;
   27.84 @@ -916,7 +916,7 @@ static int vmx_cr_access(unsigned long e
   27.85      return 1;
   27.86  }
   27.87  
   27.88 -static inline void vmx_do_msr_read(struct xen_regs *regs)
   27.89 +static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
   27.90  {
   27.91      VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx",
   27.92                  (unsigned long)regs->ecx, (unsigned long)regs->eax, 
   27.93 @@ -973,7 +973,7 @@ static void vmx_print_line(const char c,
   27.94          print_buf[index++] = c;
   27.95  }
   27.96  
   27.97 -void save_vmx_execution_context(execution_context_t *ctxt)
   27.98 +void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
   27.99  {
  27.100      __vmread(GUEST_SS_SELECTOR, &ctxt->ss);
  27.101      __vmread(GUEST_ESP, &ctxt->esp);
  27.102 @@ -988,7 +988,7 @@ void save_vmx_execution_context(executio
  27.103  }
  27.104  
  27.105  #ifdef XEN_DEBUGGER
  27.106 -void save_xen_regs(struct xen_regs *regs)
  27.107 +void save_cpu_user_regs(struct cpu_user_regs *regs)
  27.108  {
  27.109      __vmread(GUEST_SS_SELECTOR, &regs->xss);
  27.110      __vmread(GUEST_ESP, &regs->esp);
  27.111 @@ -1002,7 +1002,7 @@ void save_xen_regs(struct xen_regs *regs
  27.112      __vmread(GUEST_DS_SELECTOR, &regs->xds);
  27.113  }
  27.114  
  27.115 -void restore_xen_regs(struct xen_regs *regs)
  27.116 +void restore_cpu_user_regs(struct cpu_user_regs *regs)
  27.117  {
  27.118      __vmwrite(GUEST_SS_SELECTOR, regs->xss);
  27.119      __vmwrite(GUEST_ESP, regs->esp);
  27.120 @@ -1017,7 +1017,7 @@ void restore_xen_regs(struct xen_regs *r
  27.121  }
  27.122  #endif
  27.123  
  27.124 -asmlinkage void vmx_vmexit_handler(struct xen_regs regs)
  27.125 +asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
  27.126  {
  27.127      unsigned int exit_reason, idtv_info_field;
  27.128      unsigned long exit_qualification, eip, inst_len = 0;
  27.129 @@ -1080,16 +1080,16 @@ asmlinkage void vmx_vmexit_handler(struc
  27.130  #ifdef XEN_DEBUGGER
  27.131          case TRAP_debug:
  27.132          {
  27.133 -            save_xen_regs(&regs);
  27.134 +            save_cpu_user_regs(&regs);
  27.135              pdb_handle_exception(1, &regs, 1);
  27.136 -            restore_xen_regs(&regs);
  27.137 +            restore_cpu_user_regs(&regs);
  27.138              break;
  27.139          }
  27.140          case TRAP_int3:
  27.141          {
  27.142 -            save_xen_regs(&regs);
  27.143 +            save_cpu_user_regs(&regs);
  27.144              pdb_handle_exception(3, &regs, 1);
  27.145 -            restore_xen_regs(&regs);
  27.146 +            restore_cpu_user_regs(&regs);
  27.147              break;
  27.148          }
  27.149  #endif
  27.150 @@ -1139,9 +1139,9 @@ asmlinkage void vmx_vmexit_handler(struc
  27.151      case EXIT_REASON_EXTERNAL_INTERRUPT: 
  27.152      {
  27.153          extern int vector_irq[];
  27.154 -        extern asmlinkage void do_IRQ(struct xen_regs *);
  27.155 -        extern void smp_apic_timer_interrupt(struct xen_regs *);
  27.156 -        extern void timer_interrupt(int, void *, struct xen_regs *);
  27.157 +        extern asmlinkage void do_IRQ(struct cpu_user_regs *);
  27.158 +        extern void smp_apic_timer_interrupt(struct cpu_user_regs *);
  27.159 +        extern void timer_interrupt(int, void *, struct cpu_user_regs *);
  27.160          unsigned int    vector;
  27.161  
  27.162          if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
    28.1 --- a/xen/arch/x86/vmx_intercept.c	Thu Apr 28 13:52:41 2005 +0000
    28.2 +++ b/xen/arch/x86/vmx_intercept.c	Thu Apr 28 14:04:13 2005 +0000
    28.3 @@ -140,19 +140,19 @@ static int pit_read_io(struct vmx_virpit
    28.4  /* vmx_io_assist light-weight version, specific to PIT DM */ 
    28.5  static void resume_pit_io(ioreq_t *p)
    28.6  {
    28.7 -    execution_context_t *ec = get_execution_context();
    28.8 -    unsigned long old_eax = ec->eax;
    28.9 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   28.10 +    unsigned long old_eax = regs->eax;
   28.11      p->state = STATE_INVALID;
   28.12  
   28.13      switch(p->size) {
   28.14      case 1:
   28.15 -        ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   28.16 +        regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   28.17          break;
   28.18      case 2:
   28.19 -        ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
   28.20 +        regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
   28.21          break;
   28.22      case 4:
   28.23 -        ec->eax = (p->u.data & 0xffffffff);
   28.24 +        regs->eax = (p->u.data & 0xffffffff);
   28.25          break;
   28.26      default:
   28.27          BUG();
    29.1 --- a/xen/arch/x86/vmx_io.c	Thu Apr 28 13:52:41 2005 +0000
    29.2 +++ b/xen/arch/x86/vmx_io.c	Thu Apr 28 14:04:13 2005 +0000
    29.3 @@ -38,7 +38,7 @@
    29.4  extern long do_block();
    29.5    
    29.6  #if defined (__i386__)
    29.7 -static void load_xen_regs(struct xen_regs *regs)
    29.8 +static void load_cpu_user_regs(struct cpu_user_regs *regs)
    29.9  { 
   29.10      /*
   29.11       * Write the guest register value into VMCS
   29.12 @@ -50,7 +50,7 @@ static void load_xen_regs(struct xen_reg
   29.13      __vmwrite(GUEST_EIP, regs->eip);
   29.14  }
   29.15  
   29.16 -static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value)
   29.17 +static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
   29.18  {
   29.19      switch (size) {
   29.20      case BYTE:
   29.21 @@ -170,12 +170,12 @@ static void set_reg_value (int size, int
   29.22      }
   29.23  }
   29.24  #else
   29.25 -static void load_xen_regs(struct xen_regs *regs)
   29.26 +static void load_cpu_user_regs(struct cpu_user_regs *regs)
   29.27  { 
   29.28  	/* XXX: TBD */
   29.29  	return;
   29.30  }
   29.31 -static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value)
   29.32 +static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
   29.33  {
   29.34  	/* XXX: TBD */
   29.35  	return;
   29.36 @@ -187,11 +187,11 @@ void vmx_io_assist(struct exec_domain *e
   29.37      vcpu_iodata_t *vio;
   29.38      ioreq_t *p;
   29.39      struct domain *d = ed->domain;
   29.40 -    execution_context_t *ec = get_execution_context();
   29.41 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   29.42      unsigned long old_eax;
   29.43      int sign;
   29.44      struct mi_per_cpu_info *mpci_p;
   29.45 -    struct xen_regs *inst_decoder_regs;
   29.46 +    struct cpu_user_regs *inst_decoder_regs;
   29.47  
   29.48      mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci;
   29.49      inst_decoder_regs = mpci_p->inst_decoder_regs;
   29.50 @@ -230,8 +230,8 @@ void vmx_io_assist(struct exec_domain *e
   29.51      sign = (p->df) ? -1 : 1;
   29.52      if (p->port_mm) {
   29.53          if (p->pdata_valid) {
   29.54 -            ec->esi += sign * p->count * p->size;
   29.55 -            ec->edi += sign * p->count * p->size;
   29.56 +            regs->esi += sign * p->count * p->size;
   29.57 +            regs->edi += sign * p->count * p->size;
   29.58          } else {
   29.59              if (p->dir == IOREQ_WRITE) {
   29.60                  return;
   29.61 @@ -244,38 +244,38 @@ void vmx_io_assist(struct exec_domain *e
   29.62              if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
   29.63                  p->u.data = p->u.data & 0xffff;
   29.64              }        
   29.65 -            set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data);
   29.66 +            set_reg_value(size, index, 0, regs, p->u.data);
   29.67  
   29.68          }
   29.69 -        load_xen_regs((struct xen_regs *)ec);
   29.70 +        load_cpu_user_regs(regs);
   29.71          return;
   29.72      }
   29.73  
   29.74      if (p->dir == IOREQ_WRITE) {
   29.75          if (p->pdata_valid) {
   29.76 -            ec->esi += sign * p->count * p->size;
   29.77 -            ec->ecx -= p->count;
   29.78 +            regs->esi += sign * p->count * p->size;
   29.79 +            regs->ecx -= p->count;
   29.80          }
   29.81          return;
   29.82      } else {
   29.83          if (p->pdata_valid) {
   29.84 -            ec->edi += sign * p->count * p->size;
   29.85 -            ec->ecx -= p->count;
   29.86 +            regs->edi += sign * p->count * p->size;
   29.87 +            regs->ecx -= p->count;
   29.88              return;
   29.89          }
   29.90      }
   29.91  
   29.92 -    old_eax = ec->eax;
   29.93 +    old_eax = regs->eax;
   29.94  
   29.95      switch(p->size) {
   29.96      case 1:
   29.97 -        ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   29.98 +        regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
   29.99          break;
  29.100      case 2:
  29.101 -        ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
  29.102 +        regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
  29.103          break;
  29.104      case 4:
  29.105 -        ec->eax = (p->u.data & 0xffffffff);
  29.106 +        regs->eax = (p->u.data & 0xffffffff);
  29.107          break;
  29.108      default:
  29.109          BUG();
    30.1 --- a/xen/arch/x86/vmx_platform.c	Thu Apr 28 13:52:41 2005 +0000
    30.2 +++ b/xen/arch/x86/vmx_platform.c	Thu Apr 28 14:04:13 2005 +0000
    30.3 @@ -39,17 +39,17 @@
    30.4  #define DECODE_failure  0
    30.5  
    30.6  #if defined (__x86_64__)
    30.7 -static void store_xen_regs(struct xen_regs *regs)
    30.8 +static void store_cpu_user_regs(struct cpu_user_regs *regs)
    30.9  {
   30.10  
   30.11  }
   30.12  
   30.13 -static long get_reg_value(int size, int index, int seg, struct xen_regs *regs) 
   30.14 +static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) 
   30.15  {
   30.16      return 0;
   30.17  }
   30.18  #elif defined (__i386__)
   30.19 -static void store_xen_regs(struct xen_regs *regs)
   30.20 +static void store_cpu_user_regs(struct cpu_user_regs *regs)
   30.21  {
   30.22      __vmread(GUEST_SS_SELECTOR, &regs->ss);
   30.23      __vmread(GUEST_ESP, &regs->esp);
   30.24 @@ -60,7 +60,7 @@ static void store_xen_regs(struct xen_re
   30.25      __vmread(GUEST_EIP, &regs->eip);
   30.26  }
   30.27  
   30.28 -static long get_reg_value(int size, int index, int seg, struct xen_regs *regs)
   30.29 +static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
   30.30  {                    
   30.31      /*               
   30.32       * Reference the db_reg[] table
   30.33 @@ -468,7 +468,7 @@ static void send_mmio_req(unsigned long 
   30.34      ioreq_t *p;
   30.35      int vm86;
   30.36      struct mi_per_cpu_info *mpci_p;
   30.37 -    struct xen_regs *inst_decoder_regs;
   30.38 +    struct cpu_user_regs *inst_decoder_regs;
   30.39      extern long evtchn_send(int lport);
   30.40      extern long do_block(void);
   30.41  
   30.42 @@ -528,7 +528,7 @@ void handle_mmio(unsigned long va, unsig
   30.43      unsigned long eip, eflags, cs;
   30.44      unsigned long inst_len, inst_addr;
   30.45      struct mi_per_cpu_info *mpci_p;
   30.46 -    struct xen_regs *inst_decoder_regs;
   30.47 +    struct cpu_user_regs *inst_decoder_regs;
   30.48      struct instruction mmio_inst;
   30.49      unsigned char inst[MAX_INST_LEN];
   30.50      int vm86, ret;
   30.51 @@ -569,7 +569,7 @@ void handle_mmio(unsigned long va, unsig
   30.52          domain_crash_synchronous();
   30.53  
   30.54      __vmwrite(GUEST_EIP, eip + inst_len);
   30.55 -    store_xen_regs(inst_decoder_regs);
   30.56 +    store_cpu_user_regs(inst_decoder_regs);
   30.57  
   30.58      // Only handle "mov" and "movs" instructions!
   30.59      if (!strncmp((char *)mmio_inst.i_name, "movz", 4)) {
    31.1 --- a/xen/arch/x86/vmx_vmcs.c	Thu Apr 28 13:52:41 2005 +0000
    31.2 +++ b/xen/arch/x86/vmx_vmcs.c	Thu Apr 28 14:04:13 2005 +0000
    31.3 @@ -100,7 +100,7 @@ struct host_execution_env {
    31.4  
    31.5  #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
    31.6  
    31.7 -int vmx_setup_platform(struct exec_domain *d, execution_context_t *context)
    31.8 +int vmx_setup_platform(struct exec_domain *d, struct cpu_user_regs *regs)
    31.9  {
   31.10      int i;
   31.11      unsigned int n;
   31.12 @@ -108,15 +108,15 @@ int vmx_setup_platform(struct exec_domai
   31.13      struct e820entry *e820p;
   31.14      unsigned long gpfn = 0;
   31.15  
   31.16 -    context->ebx = 0;   /* Linux expects ebx to be 0 for boot proc */
   31.17 +    regs->ebx = 0;   /* Linux expects ebx to be 0 for boot proc */
   31.18  
   31.19 -    n = context->ecx;
   31.20 +    n = regs->ecx;
   31.21      if (n > 32) {
   31.22          VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n);
   31.23          return -1;
   31.24      }
   31.25  
   31.26 -    addr = context->edi;
   31.27 +    addr = regs->edi;
   31.28      offset = (addr & ~PAGE_MASK);
   31.29      addr = round_pgdown(addr);
   31.30      mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT);
   31.31 @@ -162,14 +162,14 @@ void vmx_do_launch(struct exec_domain *e
   31.32      struct Xgt_desc_struct desc;
   31.33      unsigned long pfn = 0;
   31.34      struct pfn_info *page;
   31.35 -    execution_context_t *ec = get_execution_context();
   31.36 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   31.37  
   31.38      cpu = smp_processor_id();
   31.39  
   31.40      page = (struct pfn_info *) alloc_domheap_page(NULL);
   31.41      pfn = (unsigned long) (page - frame_table);
   31.42  
   31.43 -    vmx_setup_platform(ed, ec);
   31.44 +    vmx_setup_platform(ed, regs);
   31.45  
   31.46      __asm__ __volatile__ ("sgdt  (%0) \n" :: "a"(&desc) : "memory");
   31.47      host_env.gdtr_limit = desc.size;
   31.48 @@ -202,8 +202,8 @@ void vmx_do_launch(struct exec_domain *e
   31.49   * Initially set the same environement as host.
   31.50   */
   31.51  static inline int 
   31.52 -construct_init_vmcs_guest(execution_context_t *context, 
   31.53 -                          full_execution_context_t *full_context,
   31.54 +construct_init_vmcs_guest(struct cpu_user_regs *regs, 
   31.55 +                          struct vcpu_guest_context *ctxt,
   31.56                            struct host_execution_env *host_env)
   31.57  {
   31.58      int error = 0;
   31.59 @@ -232,12 +232,12 @@ construct_init_vmcs_guest(execution_cont
   31.60      error |= __vmwrite(CR3_TARGET_COUNT, 0);
   31.61  
   31.62      /* Guest Selectors */
   31.63 -    error |= __vmwrite(GUEST_CS_SELECTOR, context->cs);
   31.64 -    error |= __vmwrite(GUEST_ES_SELECTOR, context->es);
   31.65 -    error |= __vmwrite(GUEST_SS_SELECTOR, context->ss);
   31.66 -    error |= __vmwrite(GUEST_DS_SELECTOR, context->ds);
   31.67 -    error |= __vmwrite(GUEST_FS_SELECTOR, context->fs);
   31.68 -    error |= __vmwrite(GUEST_GS_SELECTOR, context->gs);
   31.69 +    error |= __vmwrite(GUEST_CS_SELECTOR, regs->cs);
   31.70 +    error |= __vmwrite(GUEST_ES_SELECTOR, regs->es);
   31.71 +    error |= __vmwrite(GUEST_SS_SELECTOR, regs->ss);
   31.72 +    error |= __vmwrite(GUEST_DS_SELECTOR, regs->ds);
   31.73 +    error |= __vmwrite(GUEST_FS_SELECTOR, regs->fs);
   31.74 +    error |= __vmwrite(GUEST_GS_SELECTOR, regs->gs);
   31.75  
   31.76      /* Guest segment Limits */
   31.77      error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
   31.78 @@ -268,10 +268,10 @@ construct_init_vmcs_guest(execution_cont
   31.79      arbytes.fields.seg_type = 0xb;          /* type = 0xb */
   31.80      error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
   31.81  
   31.82 -    error |= __vmwrite(GUEST_GDTR_BASE, context->edx);
   31.83 -    context->edx = 0;
   31.84 -    error |= __vmwrite(GUEST_GDTR_LIMIT, context->eax);
   31.85 -    context->eax = 0;
   31.86 +    error |= __vmwrite(GUEST_GDTR_BASE, regs->edx);
   31.87 +    regs->edx = 0;
   31.88 +    error |= __vmwrite(GUEST_GDTR_LIMIT, regs->eax);
   31.89 +    regs->eax = 0;
   31.90  
   31.91      arbytes.fields.s = 0;                   /* not code or data segement */
   31.92      arbytes.fields.seg_type = 0x2;          /* LTD */
   31.93 @@ -302,10 +302,10 @@ construct_init_vmcs_guest(execution_cont
   31.94      error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
   31.95      error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
   31.96  
   31.97 -    error |= __vmwrite(GUEST_ESP, context->esp);
   31.98 -    error |= __vmwrite(GUEST_EIP, context->eip);
   31.99 +    error |= __vmwrite(GUEST_ESP, regs->esp);
  31.100 +    error |= __vmwrite(GUEST_EIP, regs->eip);
  31.101  
  31.102 -    eflags = context->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
  31.103 +    eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
  31.104      eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
  31.105  
  31.106      error |= __vmwrite(GUEST_EFLAGS, eflags);
  31.107 @@ -380,8 +380,8 @@ static inline int construct_vmcs_host(st
  31.108   */
  31.109  
  31.110  int construct_vmcs(struct arch_vmx_struct *arch_vmx,
  31.111 -                   execution_context_t *context,
  31.112 -                   full_execution_context_t *full_context,
  31.113 +                   struct cpu_user_regs *regs,
  31.114 +                   struct vcpu_guest_context *ctxt,
  31.115                     int use_host_env)
  31.116  {
  31.117      int error;
  31.118 @@ -415,7 +415,7 @@ int construct_vmcs(struct arch_vmx_struc
  31.119          return -EINVAL;         
  31.120      }
  31.121      /* guest selectors */
  31.122 -    if ((error = construct_init_vmcs_guest(context, full_context, &host_env))) {
  31.123 +    if ((error = construct_init_vmcs_guest(regs, ctxt, &host_env))) {
  31.124          printk("construct_vmcs: construct_vmcs_guest failed\n");
  31.125          return -EINVAL;         
  31.126      }       
    32.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Thu Apr 28 13:52:41 2005 +0000
    32.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Thu Apr 28 14:04:13 2005 +0000
    32.3 @@ -24,26 +24,26 @@
    32.4  
    32.5  void __dummy__(void)
    32.6  {
    32.7 -    OFFSET(XREGS_eax, struct xen_regs, eax);
    32.8 -    OFFSET(XREGS_ebx, struct xen_regs, ebx);
    32.9 -    OFFSET(XREGS_ecx, struct xen_regs, ecx);
   32.10 -    OFFSET(XREGS_edx, struct xen_regs, edx);
   32.11 -    OFFSET(XREGS_esi, struct xen_regs, esi);
   32.12 -    OFFSET(XREGS_edi, struct xen_regs, edi);
   32.13 -    OFFSET(XREGS_esp, struct xen_regs, esp);
   32.14 -    OFFSET(XREGS_ebp, struct xen_regs, ebp);
   32.15 -    OFFSET(XREGS_eip, struct xen_regs, eip);
   32.16 -    OFFSET(XREGS_cs, struct xen_regs, cs);
   32.17 -    OFFSET(XREGS_ds, struct xen_regs, ds);
   32.18 -    OFFSET(XREGS_es, struct xen_regs, es);
   32.19 -    OFFSET(XREGS_fs, struct xen_regs, fs);
   32.20 -    OFFSET(XREGS_gs, struct xen_regs, gs);
   32.21 -    OFFSET(XREGS_ss, struct xen_regs, ss);
   32.22 -    OFFSET(XREGS_eflags, struct xen_regs, eflags);
   32.23 -    OFFSET(XREGS_error_code, struct xen_regs, error_code);
   32.24 -    OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
   32.25 -    OFFSET(XREGS_kernel_sizeof, struct xen_regs, esp);
   32.26 -    DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs));
   32.27 +    OFFSET(UREGS_eax, struct cpu_user_regs, eax);
   32.28 +    OFFSET(UREGS_ebx, struct cpu_user_regs, ebx);
   32.29 +    OFFSET(UREGS_ecx, struct cpu_user_regs, ecx);
   32.30 +    OFFSET(UREGS_edx, struct cpu_user_regs, edx);
   32.31 +    OFFSET(UREGS_esi, struct cpu_user_regs, esi);
   32.32 +    OFFSET(UREGS_edi, struct cpu_user_regs, edi);
   32.33 +    OFFSET(UREGS_esp, struct cpu_user_regs, esp);
   32.34 +    OFFSET(UREGS_ebp, struct cpu_user_regs, ebp);
   32.35 +    OFFSET(UREGS_eip, struct cpu_user_regs, eip);
   32.36 +    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
   32.37 +    OFFSET(UREGS_ds, struct cpu_user_regs, ds);
   32.38 +    OFFSET(UREGS_es, struct cpu_user_regs, es);
   32.39 +    OFFSET(UREGS_fs, struct cpu_user_regs, fs);
   32.40 +    OFFSET(UREGS_gs, struct cpu_user_regs, gs);
   32.41 +    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
   32.42 +    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
   32.43 +    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
   32.44 +    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
   32.45 +    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
   32.46 +    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
   32.47      BLANK();
   32.48  
   32.49      OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    33.1 --- a/xen/arch/x86/x86_32/call_with_regs.S	Thu Apr 28 13:52:41 2005 +0000
    33.2 +++ b/xen/arch/x86/x86_32/call_with_regs.S	Thu Apr 28 14:04:13 2005 +0000
    33.3 @@ -2,35 +2,35 @@
    33.4  
    33.5  #include <asm/asm-offsets.h>
    33.6  
    33.7 -	// int call_with_registers(void (*f)(struct xen_regs *r)) ->
    33.8 -	// build a xen_regs structure, and then call f with that.
    33.9 +	// int call_with_registers(void (*f)(struct cpu_user_regs *r)) ->
   33.10 +	// build a cpu_user_regs structure, and then call f with that.
   33.11  call_with_registers:
   33.12  	pushf
   33.13 -	subl $XREGS_user_sizeof, %esp
   33.14 -	movl %ebx, XREGS_ebx(%esp)
   33.15 -	movl %ecx, XREGS_ecx(%esp)
   33.16 -	movl %edx, XREGS_edx(%esp)
   33.17 -	movl %esi, XREGS_esi(%esp)
   33.18 -	movl %edi, XREGS_edi(%esp)
   33.19 -	movl %ebp, XREGS_ebp(%esp)
   33.20 -	movl %eax, XREGS_eax(%esp)
   33.21 -	movw $0, XREGS_error_code(%esp)
   33.22 -	movw $0, XREGS_entry_vector(%esp)
   33.23 -	movl XREGS_user_sizeof+4(%esp), %eax
   33.24 -	movl %eax, XREGS_eip(%esp)
   33.25 -	movl %cs, XREGS_cs(%esp)
   33.26 -	movl XREGS_user_sizeof(%esp), %eax
   33.27 -	movl %eax, XREGS_eflags(%esp)
   33.28 -	movl %esp, XREGS_esp(%esp)
   33.29 -	addl $XREGS_user_sizeof+4, XREGS_esp(%esp)
   33.30 -	movl %ss, XREGS_ss(%esp)
   33.31 -	movl %es, XREGS_es(%esp)
   33.32 -	movl %ds, XREGS_ds(%esp)
   33.33 -	movl %fs, XREGS_fs(%esp)
   33.34 -	movl %gs, XREGS_gs(%esp)
   33.35 +	subl $UREGS_user_sizeof, %esp
   33.36 +	movl %ebx, UREGS_ebx(%esp)
   33.37 +	movl %ecx, UREGS_ecx(%esp)
   33.38 +	movl %edx, UREGS_edx(%esp)
   33.39 +	movl %esi, UREGS_esi(%esp)
   33.40 +	movl %edi, UREGS_edi(%esp)
   33.41 +	movl %ebp, UREGS_ebp(%esp)
   33.42 +	movl %eax, UREGS_eax(%esp)
   33.43 +	movw $0, UREGS_error_code(%esp)
   33.44 +	movw $0, UREGS_entry_vector(%esp)
   33.45 +	movl UREGS_user_sizeof+4(%esp), %eax
   33.46 +	movl %eax, UREGS_eip(%esp)
   33.47 +	movl %cs, UREGS_cs(%esp)
   33.48 +	movl UREGS_user_sizeof(%esp), %eax
   33.49 +	movl %eax, UREGS_eflags(%esp)
   33.50 +	movl %esp, UREGS_esp(%esp)
   33.51 +	addl $UREGS_user_sizeof+4, UREGS_esp(%esp)
   33.52 +	movl %ss, UREGS_ss(%esp)
   33.53 +	movl %es, UREGS_es(%esp)
   33.54 +	movl %ds, UREGS_ds(%esp)
   33.55 +	movl %fs, UREGS_fs(%esp)
   33.56 +	movl %gs, UREGS_gs(%esp)
   33.57  
   33.58 -	movl XREGS_user_sizeof+8(%esp), %eax
   33.59 +	movl UREGS_user_sizeof+8(%esp), %eax
   33.60  	pushl %esp
   33.61  	call *%eax
   33.62 -	add $XREGS_user_sizeof + 8, %esp
   33.63 +	add $UREGS_user_sizeof + 8, %esp
   33.64  	ret
    34.1 --- a/xen/arch/x86/x86_32/entry.S	Thu Apr 28 13:52:41 2005 +0000
    34.2 +++ b/xen/arch/x86/x86_32/entry.S	Thu Apr 28 14:04:13 2005 +0000
    34.3 @@ -76,7 +76,7 @@
    34.4   * and we set it to the fixed value.
    34.5   *
    34.6   * We also need the room, especially because orig_eax field is used 
    34.7 - * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
    34.8 + * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
    34.9   *   (10) u32 gs;                 
   34.10   *   (9)  u32 fs;
   34.11   *   (8)  u32 ds;
   34.12 @@ -99,7 +99,7 @@
   34.13          pushl $VMX_MONITOR_EFLAGS; \
   34.14          popf; \
   34.15          subl $(NR_SKIPPED_REGS*4), %esp; \
   34.16 -        movl $0, 0xc(%esp); /* eflags==0 identifies xen_regs as VMX guest */ \
   34.17 +        movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \
   34.18          pushl %eax; \
   34.19          pushl %ebp; \
   34.20          pushl %edi; \
   34.21 @@ -174,12 +174,12 @@ vmx_process_softirqs:
   34.22  
   34.23          ALIGN
   34.24  restore_all_guest:
   34.25 -        testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
   34.26 +        testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
   34.27          jnz  restore_all_vm86
   34.28 -FLT1:   movl XREGS_ds(%esp),%ds
   34.29 -FLT2:   movl XREGS_es(%esp),%es
   34.30 -FLT3:   movl XREGS_fs(%esp),%fs
   34.31 -FLT4:   movl XREGS_gs(%esp),%gs
   34.32 +FLT1:   movl UREGS_ds(%esp),%ds
   34.33 +FLT2:   movl UREGS_es(%esp),%es
   34.34 +FLT3:   movl UREGS_fs(%esp),%fs
   34.35 +FLT4:   movl UREGS_gs(%esp),%gs
   34.36  restore_all_vm86:
   34.37          popl %ebx
   34.38          popl %ecx
   34.39 @@ -193,13 +193,13 @@ FLT5:   iret
   34.40  .section .fixup,"ax"
   34.41  FIX5:   subl  $28,%esp
   34.42          pushl 28(%esp)                 # error_code/entry_vector
   34.43 -        movl  %eax,XREGS_eax+4(%esp)
   34.44 -        movl  %ebp,XREGS_ebp+4(%esp)
   34.45 -        movl  %edi,XREGS_edi+4(%esp)
   34.46 -        movl  %esi,XREGS_esi+4(%esp)
   34.47 -        movl  %edx,XREGS_edx+4(%esp)
   34.48 -        movl  %ecx,XREGS_ecx+4(%esp)
   34.49 -        movl  %ebx,XREGS_ebx+4(%esp)
   34.50 +        movl  %eax,UREGS_eax+4(%esp)
   34.51 +        movl  %ebp,UREGS_ebp+4(%esp)
   34.52 +        movl  %edi,UREGS_edi+4(%esp)
   34.53 +        movl  %esi,UREGS_esi+4(%esp)
   34.54 +        movl  %edx,UREGS_edx+4(%esp)
   34.55 +        movl  %ecx,UREGS_ecx+4(%esp)
   34.56 +        movl  %ebx,UREGS_ebx+4(%esp)
   34.57  FIX1:   SET_XEN_SEGMENTS(a)
   34.58          movl  %eax,%fs
   34.59          movl  %eax,%gs
   34.60 @@ -224,10 +224,10 @@ failsafe_callback:
   34.61          movw  $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
   34.62          call  create_bounce_frame
   34.63          xorl  %eax,%eax
   34.64 -        movl  %eax,XREGS_ds(%esp)
   34.65 -        movl  %eax,XREGS_es(%esp)
   34.66 -        movl  %eax,XREGS_fs(%esp)
   34.67 -        movl  %eax,XREGS_gs(%esp)
   34.68 +        movl  %eax,UREGS_ds(%esp)
   34.69 +        movl  %eax,UREGS_es(%esp)
   34.70 +        movl  %eax,UREGS_fs(%esp)
   34.71 +        movl  %eax,UREGS_gs(%esp)
   34.72          jmp   test_all_events
   34.73  .previous
   34.74  .section __pre_ex_table,"a"
   34.75 @@ -262,7 +262,7 @@ ENTRY(hypercall)
   34.76          andl $(NR_hypercalls-1),%eax
   34.77          PERFC_INCR(PERFC_hypercalls, %eax)
   34.78          call *SYMBOL_NAME(hypercall_table)(,%eax,4)
   34.79 -        movl %eax,XREGS_eax(%esp)       # save the return value
   34.80 +        movl %eax,UREGS_eax(%esp)       # save the return value
   34.81  
   34.82  test_all_events:
   34.83          xorl %ecx,%ecx
   34.84 @@ -301,41 +301,41 @@ process_softirqs:
   34.85  /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
   34.86  /*   {EIP, CS, EFLAGS, [ESP, SS]}                                        */
   34.87  /* %edx == trap_bounce, %ebx == struct exec_domain                       */
   34.88 -/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
   34.89 +/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
   34.90  create_bounce_frame:
   34.91 -        movl XREGS_eflags+4(%esp),%ecx
   34.92 -        movb XREGS_cs+4(%esp),%cl
   34.93 +        movl UREGS_eflags+4(%esp),%ecx
   34.94 +        movb UREGS_cs+4(%esp),%cl
   34.95          testl $(2|X86_EFLAGS_VM),%ecx
   34.96          jz   ring1 /* jump if returning to an existing ring-1 activation */
   34.97          movl EDOMAIN_kernel_sp(%ebx),%esi
   34.98  FLT6:   movl EDOMAIN_kernel_ss(%ebx),%gs
   34.99 -        testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
  34.100 +        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
  34.101          jz   nvm86_1
  34.102          subl $16,%esi       /* push ES/DS/FS/GS (VM86 stack frame) */
  34.103 -        movl XREGS_es+4(%esp),%eax
  34.104 +        movl UREGS_es+4(%esp),%eax
  34.105  FLT7:   movl %eax,%gs:(%esi)
  34.106 -        movl XREGS_ds+4(%esp),%eax
  34.107 +        movl UREGS_ds+4(%esp),%eax
  34.108  FLT8:   movl %eax,%gs:4(%esi)
  34.109 -        movl XREGS_fs+4(%esp),%eax
  34.110 +        movl UREGS_fs+4(%esp),%eax
  34.111  FLT9:   movl %eax,%gs:8(%esi)
  34.112 -        movl XREGS_gs+4(%esp),%eax
  34.113 +        movl UREGS_gs+4(%esp),%eax
  34.114  FLT10:  movl %eax,%gs:12(%esi)
  34.115  nvm86_1:subl $8,%esi        /* push SS/ESP (inter-priv iret) */
  34.116 -        movl XREGS_esp+4(%esp),%eax
  34.117 +        movl UREGS_esp+4(%esp),%eax
  34.118  FLT11:  movl %eax,%gs:(%esi) 
  34.119 -        movl XREGS_ss+4(%esp),%eax
  34.120 +        movl UREGS_ss+4(%esp),%eax
  34.121  FLT12:  movl %eax,%gs:4(%esi) 
  34.122          jmp 1f
  34.123  ring1:  /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
  34.124 -        movl XREGS_esp+4(%esp),%esi
  34.125 -FLT13:  movl XREGS_ss+4(%esp),%gs 
  34.126 +        movl UREGS_esp+4(%esp),%esi
  34.127 +FLT13:  movl UREGS_ss+4(%esp),%gs 
  34.128  1:      /* Construct a stack frame: EFLAGS, CS/EIP */
  34.129          subl $12,%esi
  34.130 -        movl XREGS_eip+4(%esp),%eax
  34.131 +        movl UREGS_eip+4(%esp),%eax
  34.132  FLT14:  movl %eax,%gs:(%esi) 
  34.133 -        movl XREGS_cs+4(%esp),%eax
  34.134 +        movl UREGS_cs+4(%esp),%eax
  34.135  FLT15:  movl %eax,%gs:4(%esi) 
  34.136 -        movl XREGS_eflags+4(%esp),%eax
  34.137 +        movl UREGS_eflags+4(%esp),%eax
  34.138  FLT16:  movl %eax,%gs:8(%esi)
  34.139          movb TRAPBOUNCE_flags(%edx),%cl
  34.140          test $TBF_EXCEPTION_ERRCODE,%cl
  34.141 @@ -351,7 +351,7 @@ FLT18:  movl %eax,%gs:(%esi)
  34.142  1:      testb $TBF_FAILSAFE,%cl
  34.143          jz   2f
  34.144          subl $16,%esi                # add DS/ES/FS/GS to failsafe stack frame
  34.145 -        testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
  34.146 +        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
  34.147          jz   nvm86_2
  34.148          xorl %eax,%eax               # VM86: we write zero selector values
  34.149  FLT19:  movl %eax,%gs:(%esi) 
  34.150 @@ -359,30 +359,30 @@ FLT20:  movl %eax,%gs:4(%esi)
  34.151  FLT21:  movl %eax,%gs:8(%esi) 
  34.152  FLT22:  movl %eax,%gs:12(%esi)
  34.153          jmp  2f
  34.154 -nvm86_2:movl XREGS_ds+4(%esp),%eax   # non-VM86: write real selector values
  34.155 +nvm86_2:movl UREGS_ds+4(%esp),%eax   # non-VM86: write real selector values
  34.156  FLT23:  movl %eax,%gs:(%esi) 
  34.157 -        movl XREGS_es+4(%esp),%eax
  34.158 +        movl UREGS_es+4(%esp),%eax
  34.159  FLT24:  movl %eax,%gs:4(%esi)
  34.160 -        movl XREGS_fs+4(%esp),%eax
  34.161 +        movl UREGS_fs+4(%esp),%eax
  34.162  FLT25:  movl %eax,%gs:8(%esi) 
  34.163 -        movl XREGS_gs+4(%esp),%eax
  34.164 +        movl UREGS_gs+4(%esp),%eax
  34.165  FLT26:  movl %eax,%gs:12(%esi)
  34.166 -2:      testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
  34.167 +2:      testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
  34.168          jz   nvm86_3
  34.169          xorl %eax,%eax      /* zero DS-GS, just as a real CPU would */
  34.170 -        movl %eax,XREGS_ds+4(%esp)
  34.171 -        movl %eax,XREGS_es+4(%esp)
  34.172 -        movl %eax,XREGS_fs+4(%esp)
  34.173 -        movl %eax,XREGS_gs+4(%esp)
  34.174 +        movl %eax,UREGS_ds+4(%esp)
  34.175 +        movl %eax,UREGS_es+4(%esp)
  34.176 +        movl %eax,UREGS_fs+4(%esp)
  34.177 +        movl %eax,UREGS_gs+4(%esp)
  34.178  nvm86_3:/* Rewrite our stack frame and return to ring 1. */
  34.179          /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
  34.180 -        andl $0xfffcbeff,XREGS_eflags+4(%esp)
  34.181 -        movl %gs,XREGS_ss+4(%esp)
  34.182 -        movl %esi,XREGS_esp+4(%esp)
  34.183 +        andl $0xfffcbeff,UREGS_eflags+4(%esp)
  34.184 +        movl %gs,UREGS_ss+4(%esp)
  34.185 +        movl %esi,UREGS_esp+4(%esp)
  34.186          movzwl TRAPBOUNCE_cs(%edx),%eax
  34.187 -        movl %eax,XREGS_cs+4(%esp)
  34.188 +        movl %eax,UREGS_cs+4(%esp)
  34.189          movl TRAPBOUNCE_eip(%edx),%eax
  34.190 -        movl %eax,XREGS_eip+4(%esp)
  34.191 +        movl %eax,UREGS_eip+4(%esp)
  34.192          movb $0,TRAPBOUNCE_flags(%edx)
  34.193          ret
  34.194  .section __ex_table,"a"
  34.195 @@ -410,8 +410,8 @@ process_guest_exception_and_events:
  34.196          ALIGN
  34.197  ENTRY(ret_from_intr)
  34.198          GET_CURRENT(%ebx)
  34.199 -        movl  XREGS_eflags(%esp),%eax
  34.200 -        movb  XREGS_cs(%esp),%al
  34.201 +        movl  UREGS_eflags(%esp),%eax
  34.202 +        movb  UREGS_cs(%esp),%al
  34.203          testl $(3|X86_EFLAGS_VM),%eax
  34.204          jnz   test_all_events
  34.205          jmp   restore_all_xen
  34.206 @@ -422,26 +422,26 @@ ENTRY(divide_error)
  34.207  error_code:
  34.208          SAVE_ALL_NOSEGREGS(a)
  34.209          SET_XEN_SEGMENTS(a)
  34.210 -        testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
  34.211 +        testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
  34.212          jz    exception_with_ints_disabled
  34.213          sti                             # re-enable interrupts
  34.214          xorl  %eax,%eax
  34.215 -        movw  XREGS_entry_vector(%esp),%ax
  34.216 +        movw  UREGS_entry_vector(%esp),%ax
  34.217          movl  %esp,%edx
  34.218 -	pushl %edx			# push the xen_regs pointer
  34.219 +	pushl %edx			# push the cpu_user_regs pointer
  34.220  	GET_CURRENT(%ebx)
  34.221          PERFC_INCR(PERFC_exceptions, %eax)
  34.222  	call  *SYMBOL_NAME(exception_table)(,%eax,4)
  34.223          addl  $4,%esp
  34.224 -        movl  XREGS_eflags(%esp),%eax
  34.225 -        movb  XREGS_cs(%esp),%al
  34.226 +        movl  UREGS_eflags(%esp),%eax
  34.227 +        movb  UREGS_cs(%esp),%al
  34.228          testl $(3|X86_EFLAGS_VM),%eax
  34.229  	jz    restore_all_xen
  34.230          jmp   process_guest_exception_and_events
  34.231  
  34.232  exception_with_ints_disabled:
  34.233 -        movl  XREGS_eflags(%esp),%eax
  34.234 -        movb  XREGS_cs(%esp),%al
  34.235 +        movl  UREGS_eflags(%esp),%eax
  34.236 +        movb  UREGS_cs(%esp),%al
  34.237          testl $(3|X86_EFLAGS_VM),%eax   # interrupts disabled outside Xen?
  34.238          jnz   FATAL_exception_with_ints_disabled
  34.239          pushl %esp
  34.240 @@ -449,21 +449,21 @@ exception_with_ints_disabled:
  34.241          addl  $4,%esp
  34.242          testl %eax,%eax                 # no fixup code for faulting EIP?
  34.243          jz    FATAL_exception_with_ints_disabled
  34.244 -        movl  %eax,XREGS_eip(%esp)
  34.245 +        movl  %eax,UREGS_eip(%esp)
  34.246          movl  %esp,%esi
  34.247          subl  $4,%esp
  34.248          movl  %esp,%edi
  34.249 -        movl  $XREGS_kernel_sizeof/4,%ecx
  34.250 +        movl  $UREGS_kernel_sizeof/4,%ecx
  34.251          rep;  movsl                     # make room for error_code/entry_vector
  34.252 -        movl  XREGS_error_code(%esp),%eax # error_code/entry_vector
  34.253 -        movl  %eax,XREGS_kernel_sizeof(%esp)
  34.254 +        movl  UREGS_error_code(%esp),%eax # error_code/entry_vector
  34.255 +        movl  %eax,UREGS_kernel_sizeof(%esp)
  34.256          jmp   restore_all_xen           # return to fixup code
  34.257  
  34.258  FATAL_exception_with_ints_disabled:
  34.259          xorl  %esi,%esi
  34.260 -        movw  XREGS_entry_vector(%esp),%si
  34.261 +        movw  UREGS_entry_vector(%esp),%si
  34.262          movl  %esp,%edx
  34.263 -	pushl %edx			# push the xen_regs pointer
  34.264 +	pushl %edx			# push the cpu_user_regs pointer
  34.265          pushl %esi                      # push the trapnr (entry vector)
  34.266          call  SYMBOL_NAME(fatal_trap)
  34.267          ud2
  34.268 @@ -557,8 +557,8 @@ ENTRY(nmi)
  34.269          # In all other cases we bail without touching DS-GS, as we have
  34.270          # interrupted an enclosing Xen activation in tricky prologue or
  34.271          # epilogue code.
  34.272 -        movl  XREGS_eflags(%esp),%eax
  34.273 -        movb  XREGS_cs(%esp),%al
  34.274 +        movl  UREGS_eflags(%esp),%eax
  34.275 +        movb  UREGS_cs(%esp),%al
  34.276          testl $(3|X86_EFLAGS_VM),%eax
  34.277          jnz   do_watchdog_tick
  34.278          movl  %ds,%eax
  34.279 @@ -608,8 +608,8 @@ nmi_parity_err:
  34.280          push %edx
  34.281          call SYMBOL_NAME(mem_parity_error)
  34.282          addl $4,%esp
  34.283 -nmi_out:movl  %ss:XREGS_eflags(%esp),%eax
  34.284 -        movb  %ss:XREGS_cs(%esp),%al
  34.285 +nmi_out:movl  %ss:UREGS_eflags(%esp),%eax
  34.286 +        movb  %ss:UREGS_cs(%esp),%al
  34.287          testl $(3|X86_EFLAGS_VM),%eax
  34.288          jz    restore_all_xen
  34.289          movl  $(__HYPERVISOR_DS),%edx
  34.290 @@ -657,27 +657,27 @@ do_switch_vm86:
  34.291          addl $4,%esp
  34.292  
  34.293          # GS:ESI == Ring-1 stack activation
  34.294 -        movl XREGS_esp(%esp),%esi
  34.295 -VFLT1:  movl XREGS_ss(%esp),%gs
  34.296 +        movl UREGS_esp(%esp),%esi
  34.297 +VFLT1:  movl UREGS_ss(%esp),%gs
  34.298  
  34.299          # ES:EDI == Ring-0 stack activation
  34.300 -        leal XREGS_eip(%esp),%edi
  34.301 +        leal UREGS_eip(%esp),%edi
  34.302  
  34.303          # Restore the hypercall-number-clobbered EAX on our stack frame
  34.304  VFLT2:  movl %gs:(%esi),%eax
  34.305 -        movl %eax,XREGS_eax(%esp)
  34.306 +        movl %eax,UREGS_eax(%esp)
  34.307          addl $4,%esi
  34.308          	
  34.309        	# Copy the VM86 activation from the ring-1 stack to the ring-0 stack
  34.310 -        movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
  34.311 +        movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx
  34.312  VFLT3:  movl %gs:(%esi),%eax
  34.313          stosl
  34.314          addl $4,%esi
  34.315          loop VFLT3
  34.316  
  34.317          # Fix up EFLAGS: IOPL=0, IF=1, VM=1
  34.318 -        andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
  34.319 -        orl  $X86_EFLAGS_IF|X86_EFLAGS_VM,XREGS_eflags(%esp)
  34.320 +        andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp)
  34.321 +        orl  $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp)
  34.322          
  34.323          jmp test_all_events
  34.324  
    35.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Thu Apr 28 13:52:41 2005 +0000
    35.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Thu Apr 28 14:04:13 2005 +0000
    35.3 @@ -263,7 +263,7 @@ int fixup_seg(u16 seg, unsigned long off
    35.4   * Called from the general-protection fault handler to attempt to decode
    35.5   * and emulate an instruction that depends on 4GB segments.
    35.6   */
    35.7 -int gpf_emulate_4gb(struct xen_regs *regs)
    35.8 +int gpf_emulate_4gb(struct cpu_user_regs *regs)
    35.9  {
   35.10      struct exec_domain *d = current;
   35.11      trap_info_t   *ti;
    36.1 --- a/xen/arch/x86/x86_32/traps.c	Thu Apr 28 13:52:41 2005 +0000
    36.2 +++ b/xen/arch/x86/x86_32/traps.c	Thu Apr 28 14:04:13 2005 +0000
    36.3 @@ -29,9 +29,10 @@ static inline int kernel_text_address(un
    36.4  void show_guest_stack(void)
    36.5  {
    36.6      int i;
    36.7 -    execution_context_t *ec = get_execution_context();
    36.8 -    unsigned long *stack = (unsigned long *)ec->esp;
    36.9 -    printk("Guest EIP is %08x\n   ", ec->eip);
   36.10 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   36.11 +    unsigned long *stack = (unsigned long *)regs->esp;
   36.12 +
   36.13 +    printk("Guest EIP is %08x\n   ", regs->eip);
   36.14  
   36.15      for ( i = 0; i < kstack_depth_to_print; i++ )
   36.16      {
   36.17 @@ -89,7 +90,7 @@ void show_stack(unsigned long *esp)
   36.18      show_trace( esp );
   36.19  }
   36.20  
   36.21 -void show_registers(struct xen_regs *regs)
   36.22 +void show_registers(struct cpu_user_regs *regs)
   36.23  {
   36.24      unsigned long ss, ds, es, fs, gs, cs;
   36.25      unsigned long eip, esp, eflags;
   36.26 @@ -215,9 +216,9 @@ asmlinkage void do_double_fault(void)
   36.27  }
   36.28  
   36.29  BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
   36.30 -asmlinkage void smp_deferred_nmi(struct xen_regs regs)
   36.31 +asmlinkage void smp_deferred_nmi(struct cpu_user_regs regs)
   36.32  {
   36.33 -    asmlinkage void do_nmi(struct xen_regs *, unsigned long);
   36.34 +    asmlinkage void do_nmi(struct cpu_user_regs *, unsigned long);
   36.35      ack_APIC_irq();
   36.36      do_nmi(&regs, 0);
   36.37  }
    37.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Thu Apr 28 13:52:41 2005 +0000
    37.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Thu Apr 28 14:04:13 2005 +0000
    37.3 @@ -24,30 +24,30 @@
    37.4  
    37.5  void __dummy__(void)
    37.6  {
    37.7 -    OFFSET(XREGS_r15, struct xen_regs, r15);
    37.8 -    OFFSET(XREGS_r14, struct xen_regs, r14);
    37.9 -    OFFSET(XREGS_r13, struct xen_regs, r13);
   37.10 -    OFFSET(XREGS_r12, struct xen_regs, r12);
   37.11 -    OFFSET(XREGS_rbp, struct xen_regs, rbp);
   37.12 -    OFFSET(XREGS_rbx, struct xen_regs, rbx);
   37.13 -    OFFSET(XREGS_r11, struct xen_regs, r11);
   37.14 -    OFFSET(XREGS_r10, struct xen_regs, r10);
   37.15 -    OFFSET(XREGS_r9, struct xen_regs, r9);
   37.16 -    OFFSET(XREGS_r8, struct xen_regs, r8);
   37.17 -    OFFSET(XREGS_rax, struct xen_regs, rax);
   37.18 -    OFFSET(XREGS_rcx, struct xen_regs, rcx);
   37.19 -    OFFSET(XREGS_rdx, struct xen_regs, rdx);
   37.20 -    OFFSET(XREGS_rsi, struct xen_regs, rsi);
   37.21 -    OFFSET(XREGS_rdi, struct xen_regs, rdi);
   37.22 -    OFFSET(XREGS_error_code, struct xen_regs, error_code);
   37.23 -    OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
   37.24 -    OFFSET(XREGS_rip, struct xen_regs, rip);
   37.25 -    OFFSET(XREGS_cs, struct xen_regs, cs);
   37.26 -    OFFSET(XREGS_eflags, struct xen_regs, eflags);
   37.27 -    OFFSET(XREGS_rsp, struct xen_regs, rsp);
   37.28 -    OFFSET(XREGS_ss, struct xen_regs, ss);
   37.29 -    OFFSET(XREGS_kernel_sizeof, struct xen_regs, es);
   37.30 -    DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs));
   37.31 +    OFFSET(UREGS_r15, struct cpu_user_regs, r15);
   37.32 +    OFFSET(UREGS_r14, struct cpu_user_regs, r14);
   37.33 +    OFFSET(UREGS_r13, struct cpu_user_regs, r13);
   37.34 +    OFFSET(UREGS_r12, struct cpu_user_regs, r12);
   37.35 +    OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
   37.36 +    OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
   37.37 +    OFFSET(UREGS_r11, struct cpu_user_regs, r11);
   37.38 +    OFFSET(UREGS_r10, struct cpu_user_regs, r10);
   37.39 +    OFFSET(UREGS_r9, struct cpu_user_regs, r9);
   37.40 +    OFFSET(UREGS_r8, struct cpu_user_regs, r8);
   37.41 +    OFFSET(UREGS_rax, struct cpu_user_regs, rax);
   37.42 +    OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
   37.43 +    OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
   37.44 +    OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
   37.45 +    OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
   37.46 +    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
   37.47 +    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
   37.48 +    OFFSET(UREGS_rip, struct cpu_user_regs, rip);
   37.49 +    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
   37.50 +    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
   37.51 +    OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
   37.52 +    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
   37.53 +    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
   37.54 +    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
   37.55      BLANK();
   37.56  
   37.57      OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    38.1 --- a/xen/arch/x86/x86_64/entry.S	Thu Apr 28 13:52:41 2005 +0000
    38.2 +++ b/xen/arch/x86/x86_64/entry.S	Thu Apr 28 14:04:13 2005 +0000
    38.3 @@ -123,7 +123,7 @@ ENTRY(syscall_enter)
    38.4          leaq  SYMBOL_NAME(hypercall_table)(%rip),%r10
    38.5          PERFC_INCR(PERFC_hypercalls, %rax)
    38.6          callq *(%r10,%rax,8)
    38.7 -        movq %rax,XREGS_rax(%rsp)       # save the return value
    38.8 +        movq %rax,UREGS_rax(%rsp)       # save the return value
    38.9  
   38.10  /* %rbx: struct exec_domain */
   38.11  test_all_events:
   38.12 @@ -160,7 +160,7 @@ test_all_events:
   38.13   * and we set it to the fixed value.
   38.14   *
   38.15   * We also need the room, especially because orig_eax field is used 
   38.16 - * by do_IRQ(). Compared the xen_regs, we skip pushing for the following:
   38.17 + * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
   38.18   *   (13) u64 gs_base_user;                 
   38.19   *   (12) u64 gs_base_kernel;                 
   38.20   *   (11) u64 fs_base;                 
   38.21 @@ -303,8 +303,8 @@ create_bounce_frame:
   38.22          movq  EDOMAIN_kernel_sp(%rbx),%rsi
   38.23          jmp   2f
   38.24  1:      /* In kernel context already: push new frame at existing %rsp. */
   38.25 -        movq  XREGS_rsp+8(%rsp),%rsi
   38.26 -        andb  $0xfc,XREGS_cs+8(%rsp)    # Indicate kernel context to guest.
   38.27 +        movq  UREGS_rsp+8(%rsp),%rsi
   38.28 +        andb  $0xfc,UREGS_cs+8(%rsp)    # Indicate kernel context to guest.
   38.29  2:      movq  $HYPERVISOR_VIRT_START,%rax
   38.30          cmpq  %rax,%rsi
   38.31          jb    1f                        # In +ve address space? Then okay.
   38.32 @@ -312,15 +312,15 @@ 2:      movq  $HYPERVISOR_VIRT_START,%ra
   38.33          cmpq  %rax,%rsi
   38.34          jb    domain_crash_synchronous  # Above Xen private area? Then okay.
   38.35  1:      subq  $40,%rsi
   38.36 -        movq  XREGS_ss+8(%rsp),%rax
   38.37 +        movq  UREGS_ss+8(%rsp),%rax
   38.38  FLT2:   movq  %rax,32(%rsi)             # SS
   38.39 -        movq  XREGS_rsp+8(%rsp),%rax
   38.40 +        movq  UREGS_rsp+8(%rsp),%rax
   38.41  FLT3:   movq  %rax,24(%rsi)             # RSP
   38.42 -        movq  XREGS_eflags+8(%rsp),%rax
   38.43 +        movq  UREGS_eflags+8(%rsp),%rax
   38.44  FLT4:   movq  %rax,16(%rsi)             # RFLAGS
   38.45 -        movq  XREGS_cs+8(%rsp),%rax
   38.46 +        movq  UREGS_cs+8(%rsp),%rax
   38.47  FLT5:   movq  %rax,8(%rsi)              # CS
   38.48 -        movq  XREGS_rip+8(%rsp),%rax
   38.49 +        movq  UREGS_rip+8(%rsp),%rax
   38.50  FLT6:   movq  %rax,(%rsi)               # RIP
   38.51          movb  TRAPBOUNCE_flags(%rdx),%cl
   38.52          testb $TBF_EXCEPTION_ERRCODE,%cl
   38.53 @@ -345,19 +345,19 @@ FLT11:  movq  %rax,8(%rsi)              
   38.54          movl  %ds,%eax
   38.55  FLT12:  movq  %rax,(%rsi)               # DS
   38.56  2:      subq  $16,%rsi
   38.57 -        movq  XREGS_r11+8(%rsp),%rax
   38.58 +        movq  UREGS_r11+8(%rsp),%rax
   38.59  FLT13:  movq  %rax,8(%rsi)              # R11
   38.60 -        movq  XREGS_rcx+8(%rsp),%rax
   38.61 +        movq  UREGS_rcx+8(%rsp),%rax
   38.62  FLT14:  movq  %rax,(%rsi)               # RCX
   38.63          /* Rewrite our stack frame and return to guest-OS mode. */
   38.64          /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
   38.65 -        movq  $TRAP_syscall,XREGS_entry_vector+8(%rsp)
   38.66 -        andl  $0xfffcbeff,XREGS_eflags+8(%rsp)
   38.67 -        movq  $__GUEST_SS,XREGS_ss+8(%rsp)
   38.68 -        movq  %rsi,XREGS_rsp+8(%rsp)
   38.69 -        movq  $__GUEST_CS,XREGS_cs+8(%rsp)
   38.70 +        movq  $TRAP_syscall,UREGS_entry_vector+8(%rsp)
   38.71 +        andl  $0xfffcbeff,UREGS_eflags+8(%rsp)
   38.72 +        movq  $__GUEST_SS,UREGS_ss+8(%rsp)
   38.73 +        movq  %rsi,UREGS_rsp+8(%rsp)
   38.74 +        movq  $__GUEST_CS,UREGS_cs+8(%rsp)
   38.75          movq  TRAPBOUNCE_eip(%rdx),%rax
   38.76 -        movq  %rax,XREGS_rip+8(%rsp)
   38.77 +        movq  %rax,UREGS_rip+8(%rsp)
   38.78          movb  $0,TRAPBOUNCE_flags(%rdx)
   38.79          ret
   38.80  .section __ex_table,"a"
   38.81 @@ -383,7 +383,7 @@ process_guest_exception_and_events:
   38.82  /* No special register assumptions. */
   38.83  ENTRY(ret_from_intr)
   38.84          GET_CURRENT(%rbx)
   38.85 -        testb $3,XREGS_cs(%rsp)
   38.86 +        testb $3,UREGS_cs(%rsp)
   38.87          jnz   test_all_events
   38.88          jmp   restore_all_xen
   38.89  
   38.90 @@ -391,43 +391,43 @@ ENTRY(ret_from_intr)
   38.91  /* No special register assumptions. */
   38.92  error_code:
   38.93          SAVE_ALL
   38.94 -        testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%rsp)
   38.95 +        testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
   38.96          jz    exception_with_ints_disabled
   38.97          sti
   38.98          movq  %rsp,%rdi
   38.99 -        movl  XREGS_entry_vector(%rsp),%eax
  38.100 +        movl  UREGS_entry_vector(%rsp),%eax
  38.101          leaq  SYMBOL_NAME(exception_table)(%rip),%rdx
  38.102          GET_CURRENT(%rbx)
  38.103          PERFC_INCR(PERFC_exceptions, %rax)
  38.104          callq *(%rdx,%rax,8)
  38.105 -        testb $3,XREGS_cs(%rsp)
  38.106 +        testb $3,UREGS_cs(%rsp)
  38.107          jz    restore_all_xen
  38.108          jmp   process_guest_exception_and_events
  38.109  
  38.110  /* No special register assumptions. */
  38.111  exception_with_ints_disabled:
  38.112 -        testb $3,XREGS_cs(%rsp)         # interrupts disabled outside Xen?
  38.113 +        testb $3,UREGS_cs(%rsp)         # interrupts disabled outside Xen?
  38.114          jnz   FATAL_exception_with_ints_disabled
  38.115          movq  %rsp,%rdi
  38.116          call  search_pre_exception_table
  38.117          testq %rax,%rax                 # no fixup code for faulting EIP?
  38.118          jz    FATAL_exception_with_ints_disabled
  38.119 -        movq  %rax,XREGS_rip(%rsp)
  38.120 -        subq  $8,XREGS_rsp(%rsp)        # add ec/ev to previous stack frame
  38.121 -        testb $15,XREGS_rsp(%rsp)       # return %rsp is now aligned?
  38.122 +        movq  %rax,UREGS_rip(%rsp)
  38.123 +        subq  $8,UREGS_rsp(%rsp)        # add ec/ev to previous stack frame
  38.124 +        testb $15,UREGS_rsp(%rsp)       # return %rsp is now aligned?
  38.125          jz    1f                        # then there is a pad quadword already
  38.126          movq  %rsp,%rsi
  38.127          subq  $8,%rsp
  38.128          movq  %rsp,%rdi
  38.129 -        movq  $XREGS_kernel_sizeof/8,%rcx
  38.130 +        movq  $UREGS_kernel_sizeof/8,%rcx
  38.131          rep;  movsq                     # make room for ec/ev
  38.132 -1:      movq  XREGS_error_code(%rsp),%rax # ec/ev
  38.133 -        movq  %rax,XREGS_kernel_sizeof(%rsp)
  38.134 +1:      movq  UREGS_error_code(%rsp),%rax # ec/ev
  38.135 +        movq  %rax,UREGS_kernel_sizeof(%rsp)
  38.136          jmp   restore_all_xen           # return to fixup code
  38.137  
  38.138  /* No special register assumptions. */
  38.139  FATAL_exception_with_ints_disabled:
  38.140 -        movl  XREGS_entry_vector(%rsp),%edi
  38.141 +        movl  UREGS_entry_vector(%rsp),%edi
  38.142          movq  %rsp,%rsi
  38.143          call  SYMBOL_NAME(fatal_trap)
  38.144          ud2
    39.1 --- a/xen/arch/x86/x86_64/mm.c	Thu Apr 28 13:52:41 2005 +0000
    39.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu Apr 28 14:04:13 2005 +0000
    39.3 @@ -253,19 +253,19 @@ long do_set_segment_base(unsigned int wh
    39.4      switch ( which )
    39.5      {
    39.6      case SEGBASE_FS:
    39.7 -        ed->arch.user_ctxt.fs_base = base;
    39.8 +        ed->arch.user_regs.fs_base = base;
    39.9          if ( wrmsr_user(MSR_FS_BASE, base, base>>32) )
   39.10              ret = -EFAULT;
   39.11          break;
   39.12  
   39.13      case SEGBASE_GS_USER:
   39.14 -        ed->arch.user_ctxt.gs_base_user = base;
   39.15 +        ed->arch.user_regs.gs_base_user = base;
   39.16          if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) )
   39.17              ret = -EFAULT;
   39.18          break;
   39.19  
   39.20      case SEGBASE_GS_KERNEL:
   39.21 -        ed->arch.user_ctxt.gs_base_kernel = base;
   39.22 +        ed->arch.user_regs.gs_base_kernel = base;
   39.23          if ( wrmsr_user(MSR_GS_BASE, base, base>>32) )
   39.24              ret = -EFAULT;
   39.25          break;
    40.1 --- a/xen/arch/x86/x86_64/traps.c	Thu Apr 28 13:52:41 2005 +0000
    40.2 +++ b/xen/arch/x86/x86_64/traps.c	Thu Apr 28 14:04:13 2005 +0000
    40.3 @@ -24,9 +24,10 @@ static inline int kernel_text_address(un
    40.4  void show_guest_stack(void)
    40.5  {
    40.6      int i;
    40.7 -    execution_context_t *ec = get_execution_context();
    40.8 -    unsigned long *stack = (unsigned long *)ec->rsp;
    40.9 -    printk("Guest RIP is %016lx\n   ", ec->rip);
   40.10 +    struct cpu_user_regs *regs = get_cpu_user_regs();
   40.11 +    unsigned long *stack = (unsigned long *)regs->rsp;
   40.12 +
   40.13 +    printk("Guest RIP is %016lx\n   ", regs->rip);
   40.14  
   40.15      for ( i = 0; i < kstack_depth_to_print; i++ )
   40.16      {
   40.17 @@ -84,7 +85,7 @@ void show_stack(unsigned long *rsp)
   40.18      show_trace(rsp);
   40.19  }
   40.20  
   40.21 -void show_registers(struct xen_regs *regs)
   40.22 +void show_registers(struct cpu_user_regs *regs)
   40.23  {
   40.24      printk("CPU:    %d\nEIP:    %04lx:[<%016lx>]      \nEFLAGS: %016lx\n",
   40.25             smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags);
   40.26 @@ -130,7 +131,7 @@ void show_page_walk(unsigned long addr)
   40.27  }
   40.28  
   40.29  asmlinkage void double_fault(void);
   40.30 -asmlinkage void do_double_fault(struct xen_regs *regs)
   40.31 +asmlinkage void do_double_fault(struct cpu_user_regs *regs)
   40.32  {
   40.33      /* Disable the NMI watchdog. It's useless now. */
   40.34      watchdog_on = 0;
    41.1 --- a/xen/arch/x86/x86_emulate.c	Thu Apr 28 13:52:41 2005 +0000
    41.2 +++ b/xen/arch/x86/x86_emulate.c	Thu Apr 28 14:04:13 2005 +0000
    41.3 @@ -377,7 +377,7 @@ do{ __asm__ __volatile__ (              
    41.4  
    41.5  void *
    41.6  decode_register(
    41.7 -    u8 modrm_reg, struct xen_regs *regs, int highbyte_regs)
    41.8 +    u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs)
    41.9  {
   41.10      void *p;
   41.11  
   41.12 @@ -417,7 +417,7 @@ decode_register(
   41.13  
   41.14  int 
   41.15  x86_emulate_memop(
   41.16 -    struct xen_regs *regs,
   41.17 +    struct cpu_user_regs *regs,
   41.18      unsigned long cr2,
   41.19      struct x86_mem_emulator *ops,
   41.20      int mode)
   41.21 @@ -430,7 +430,7 @@ x86_emulate_memop(
   41.22      struct operand src, dst;
   41.23  
   41.24      /* Shadow copy of register state. Committed on successful emulation. */
   41.25 -    struct xen_regs _regs = *regs;
   41.26 +    struct cpu_user_regs _regs = *regs;
   41.27  
   41.28      /* Legacy prefixes. */
   41.29      for ( i = 0; i < 8; i++ )
    42.1 --- a/xen/common/dom0_ops.c	Thu Apr 28 13:52:41 2005 +0000
    42.2 +++ b/xen/common/dom0_ops.c	Thu Apr 28 14:04:13 2005 +0000
    42.3 @@ -21,7 +21,7 @@
    42.4  
    42.5  extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
    42.6  extern void arch_getdomaininfo_ctxt(
    42.7 -    struct exec_domain *, full_execution_context_t *);
    42.8 +    struct exec_domain *, struct vcpu_guest_context *);
    42.9  
   42.10  static inline int is_free_domid(domid_t dom)
   42.11  {
   42.12 @@ -279,7 +279,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   42.13  
   42.14      case DOM0_GETDOMAININFO:
   42.15      { 
   42.16 -        full_execution_context_t *c;
   42.17 +        struct vcpu_guest_context *c;
   42.18          struct domain            *d;
   42.19          struct exec_domain       *ed;
   42.20  
   42.21 @@ -331,7 +331,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   42.22  
   42.23          if ( op->u.getdomaininfo.ctxt != NULL )
   42.24          {
   42.25 -            if ( (c = xmalloc(full_execution_context_t)) == NULL )
   42.26 +            if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
   42.27              {
   42.28                  ret = -ENOMEM;
   42.29                  put_domain(d);
    43.1 --- a/xen/common/domain.c	Thu Apr 28 13:52:41 2005 +0000
    43.2 +++ b/xen/common/domain.c	Thu Apr 28 14:04:13 2005 +0000
    43.3 @@ -231,7 +231,7 @@ void domain_destruct(struct domain *d)
    43.4  int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo)
    43.5  {
    43.6      int rc = 0;
    43.7 -    full_execution_context_t *c = NULL;
    43.8 +    struct vcpu_guest_context *c = NULL;
    43.9      unsigned long vcpu = setdomaininfo->exec_domain;
   43.10      struct exec_domain *ed; 
   43.11  
   43.12 @@ -242,7 +242,7 @@ int set_info_guest(struct domain *p, dom
   43.13          !test_bit(EDF_CTRLPAUSE, &ed->ed_flags))
   43.14          return -EINVAL;
   43.15  
   43.16 -    if ( (c = xmalloc(full_execution_context_t)) == NULL )
   43.17 +    if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
   43.18          return -ENOMEM;
   43.19  
   43.20      if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) )
   43.21 @@ -266,12 +266,12 @@ int set_info_guest(struct domain *p, dom
   43.22   * than domain 0. ie. the domains that are being built by the userspace dom0
   43.23   * domain builder.
   43.24   */
   43.25 -long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt) 
   43.26 +long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) 
   43.27  {
   43.28      struct domain *d = current->domain;
   43.29      struct exec_domain *ed;
   43.30      int rc = 0;
   43.31 -    full_execution_context_t *c;
   43.32 +    struct vcpu_guest_context *c;
   43.33  
   43.34      if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
   43.35          return -EINVAL;
   43.36 @@ -279,7 +279,7 @@ long do_boot_vcpu(unsigned long vcpu, fu
   43.37      if ( alloc_exec_domain_struct(d, vcpu) == NULL )
   43.38          return -ENOMEM;
   43.39  
   43.40 -    if ( (c = xmalloc(full_execution_context_t)) == NULL )
   43.41 +    if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
   43.42      {
   43.43          rc = -ENOMEM;
   43.44          goto out;
    44.1 --- a/xen/common/keyhandler.c	Thu Apr 28 13:52:41 2005 +0000
    44.2 +++ b/xen/common/keyhandler.c	Thu Apr 28 14:04:13 2005 +0000
    44.3 @@ -36,7 +36,7 @@ static void keypress_softirq(void)
    44.4          (*h)(key);
    44.5  }
    44.6  
    44.7 -void handle_keypress(unsigned char key, struct xen_regs *regs)
    44.8 +void handle_keypress(unsigned char key, struct cpu_user_regs *regs)
    44.9  {
   44.10      irq_keyhandler_t *h;
   44.11  
   44.12 @@ -83,13 +83,13 @@ static void show_handlers(unsigned char 
   44.13                     key_table[i].desc);
   44.14  }
   44.15  
   44.16 -static void dump_registers(unsigned char key, struct xen_regs *regs)
   44.17 +static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
   44.18  {
   44.19      printk("'%c' pressed -> dumping registers\n", key); 
   44.20      show_registers(regs); 
   44.21  }
   44.22  
   44.23 -static void halt_machine(unsigned char key, struct xen_regs *regs)
   44.24 +static void halt_machine(unsigned char key, struct cpu_user_regs *regs)
   44.25  {
   44.26      printk("'%c' pressed -> rebooting machine\n", key); 
   44.27      machine_restart(NULL); 
   44.28 @@ -125,9 +125,12 @@ static void do_task_queues(unsigned char
   44.29              printk("Notifying guest... %d/%d\n", d->id, ed->eid); 
   44.30              printk("port %d/%d stat %d %d %d\n",
   44.31                     VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
   44.32 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_pending[0]),
   44.33 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_mask[0]),
   44.34 -                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, &ed->vcpu_info->evtchn_pending_sel));
   44.35 +                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 
   44.36 +                            &d->shared_info->evtchn_pending[0]),
   44.37 +                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], 
   44.38 +                            &d->shared_info->evtchn_mask[0]),
   44.39 +                   test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, 
   44.40 +                            &ed->vcpu_info->evtchn_pending_sel));
   44.41              send_guest_virq(ed, VIRQ_DEBUG);
   44.42          }
   44.43      }
   44.44 @@ -147,7 +150,7 @@ extern void perfc_printall(unsigned char
   44.45  extern void perfc_reset(unsigned char key);
   44.46  #endif
   44.47  
   44.48 -void do_debug_key(unsigned char key, struct xen_regs *regs)
   44.49 +void do_debug_key(unsigned char key, struct cpu_user_regs *regs)
   44.50  {
   44.51      (void)debugger_trap_fatal(0xf001, regs);
   44.52      nop(); /* Prevent the compiler doing tail call
    45.1 --- a/xen/drivers/char/console.c	Thu Apr 28 13:52:41 2005 +0000
    45.2 +++ b/xen/drivers/char/console.c	Thu Apr 28 14:04:13 2005 +0000
    45.3 @@ -260,7 +260,7 @@ static void switch_serial_input(void)
    45.4      }
    45.5  }
    45.6  
    45.7 -static void __serial_rx(unsigned char c, struct xen_regs *regs)
    45.8 +static void __serial_rx(unsigned char c, struct cpu_user_regs *regs)
    45.9  {
   45.10      if ( xen_rx )
   45.11      {
   45.12 @@ -274,7 +274,7 @@ static void __serial_rx(unsigned char c,
   45.13      }
   45.14  }
   45.15  
   45.16 -static void serial_rx(unsigned char c, struct xen_regs *regs)
   45.17 +static void serial_rx(unsigned char c, struct cpu_user_regs *regs)
   45.18  {
   45.19      static int switch_code_count = 0;
   45.20  
    46.1 --- a/xen/drivers/char/serial.c	Thu Apr 28 13:52:41 2005 +0000
    46.2 +++ b/xen/drivers/char/serial.c	Thu Apr 28 14:04:13 2005 +0000
    46.3 @@ -105,7 +105,7 @@ static struct uart com[2] = {
    46.4   * PRIVATE FUNCTIONS
    46.5   */
    46.6  
    46.7 -static void uart_rx(struct uart *uart, struct xen_regs *regs)
    46.8 +static void uart_rx(struct uart *uart, struct cpu_user_regs *regs)
    46.9  {
   46.10      unsigned char c;
   46.11  
   46.12 @@ -132,7 +132,7 @@ static void uart_rx(struct uart *uart, s
   46.13  }
   46.14  
   46.15  static void serial_interrupt(
   46.16 -    int irq, void *dev_id, struct xen_regs *regs)
   46.17 +    int irq, void *dev_id, struct cpu_user_regs *regs)
   46.18  {
   46.19      uart_rx((struct uart *)dev_id, regs);
   46.20  }
    47.1 --- a/xen/include/asm-ia64/debugger.h	Thu Apr 28 13:52:41 2005 +0000
    47.2 +++ b/xen/include/asm-ia64/debugger.h	Thu Apr 28 14:04:13 2005 +0000
    47.3 @@ -26,13 +26,13 @@
    47.4  
    47.5  /* The main trap handlers use these helper macros which include early bail. */
    47.6  static inline int debugger_trap_entry(
    47.7 -    unsigned int vector, struct xen_regs *regs)
    47.8 +    unsigned int vector, struct cpu_user_regs *regs)
    47.9  {
   47.10      return 0;
   47.11  }
   47.12  
   47.13  static inline int debugger_trap_fatal(
   47.14 -    unsigned int vector, struct xen_regs *regs)
   47.15 +    unsigned int vector, struct cpu_user_regs *regs)
   47.16  {
   47.17      return 0;
   47.18  }
    48.1 --- a/xen/include/asm-ia64/domain.h	Thu Apr 28 13:52:41 2005 +0000
    48.2 +++ b/xen/include/asm-ia64/domain.h	Thu Apr 28 14:04:13 2005 +0000
    48.3 @@ -6,7 +6,7 @@
    48.4  extern void arch_do_createdomain(struct exec_domain *);
    48.5  
    48.6  extern int arch_final_setup_guestos(
    48.7 -    struct exec_domain *, full_execution_context_t *);
    48.8 +    struct exec_domain *, struct vcpu_guest_context *);
    48.9  
   48.10  extern void domain_relinquish_resources(struct domain *);
   48.11  
    49.1 --- a/xen/include/asm-ia64/regs.h	Thu Apr 28 13:52:41 2005 +0000
    49.2 +++ b/xen/include/asm-ia64/regs.h	Thu Apr 28 14:04:13 2005 +0000
    49.3 @@ -1,2 +1,2 @@
    49.4  #include <asm/ptrace.h>
    49.5 -#define xen_regs pt_regs
    49.6 +#define cpu_user_regs pt_regs
    50.1 --- a/xen/include/asm-x86/apic.h	Thu Apr 28 13:52:41 2005 +0000
    50.2 +++ b/xen/include/asm-x86/apic.h	Thu Apr 28 14:04:13 2005 +0000
    50.3 @@ -74,10 +74,10 @@ extern void sync_Arb_IDs (void);
    50.4  extern void init_bsp_APIC (void);
    50.5  extern void setup_local_APIC (void);
    50.6  extern void init_apic_mappings (void);
    50.7 -extern void smp_local_timer_interrupt (struct xen_regs * regs);
    50.8 +extern void smp_local_timer_interrupt (struct cpu_user_regs * regs);
    50.9  extern void setup_APIC_clocks (void);
   50.10  extern void setup_apic_nmi_watchdog (void);
   50.11 -extern void nmi_watchdog_tick (struct xen_regs * regs);
   50.12 +extern void nmi_watchdog_tick (struct cpu_user_regs * regs);
   50.13  extern void touch_nmi_watchdog(void);
   50.14  extern int APIC_init_uniprocessor (void);
   50.15  extern void disable_APIC_timer(void);
    51.1 --- a/xen/include/asm-x86/debugger.h	Thu Apr 28 13:52:41 2005 +0000
    51.2 +++ b/xen/include/asm-x86/debugger.h	Thu Apr 28 14:04:13 2005 +0000
    51.3 @@ -38,11 +38,11 @@
    51.4  #define DEBUGGER_trap_fatal(_v, _r) \
    51.5      if ( debugger_trap_fatal(_v, _r) ) return EXCRET_fault_fixed;
    51.6  
    51.7 -int call_with_registers(int (*f)(struct xen_regs *r));
    51.8 +int call_with_registers(int (*f)(struct cpu_user_regs *r));
    51.9  
   51.10  #if defined(CRASH_DEBUG)
   51.11  
   51.12 -extern int __trap_to_cdb(struct xen_regs *r);
   51.13 +extern int __trap_to_cdb(struct cpu_user_regs *r);
   51.14  #define debugger_trap_entry(_v, _r) (0)
   51.15  #define debugger_trap_fatal(_v, _r) __trap_to_cdb(_r)
   51.16  #define debugger_trap_immediate() call_with_registers(__trap_to_cdb)
   51.17 @@ -52,7 +52,7 @@ extern int __trap_to_cdb(struct xen_regs
   51.18  #include <xen/softirq.h>
   51.19  
   51.20  static inline int debugger_trap_entry(
   51.21 -    unsigned int vector, struct xen_regs *regs)
   51.22 +    unsigned int vector, struct cpu_user_regs *regs)
   51.23  {
   51.24      struct exec_domain *ed = current;
   51.25  
   51.26 @@ -77,16 +77,16 @@ static inline int debugger_trap_entry(
   51.27  
   51.28  #elif 0
   51.29  
   51.30 -extern int kdb_trap(int, int, struct xen_regs *);
   51.31 +extern int kdb_trap(int, int, struct cpu_user_regs *);
   51.32  
   51.33  static inline int debugger_trap_entry(
   51.34 -    unsigned int vector, struct xen_regs *regs)
   51.35 +    unsigned int vector, struct cpu_user_regs *regs)
   51.36  {
   51.37      return 0;
   51.38  }
   51.39  
   51.40  static inline int debugger_trap_fatal(
   51.41 -    unsigned int vector, struct xen_regs *regs)
   51.42 +    unsigned int vector, struct cpu_user_regs *regs)
   51.43  {
   51.44      return kdb_trap(vector, 0, regs);
   51.45  }
    52.1 --- a/xen/include/asm-x86/domain.h	Thu Apr 28 13:52:41 2005 +0000
    52.2 +++ b/xen/include/asm-x86/domain.h	Thu Apr 28 14:04:13 2005 +0000
    52.3 @@ -78,7 +78,7 @@ struct arch_exec_domain
    52.4      struct i387_state  i387;
    52.5  
    52.6      /* general user-visible register state */
    52.7 -    execution_context_t user_ctxt;
    52.8 +    struct cpu_user_regs user_regs;
    52.9  
   52.10      void (*schedule_tail) (struct exec_domain *);
   52.11  
    53.1 --- a/xen/include/asm-x86/processor.h	Thu Apr 28 13:52:41 2005 +0000
    53.2 +++ b/xen/include/asm-x86/processor.h	Thu Apr 28 14:04:13 2005 +0000
    53.3 @@ -191,7 +191,9 @@ extern void dodgy_tsc(void);
    53.4  /*
    53.5   * Generic CPUID function
    53.6   */
    53.7 -static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
    53.8 +static inline void cpuid(
    53.9 +    int op, unsigned int *eax, unsigned int *ebx,
   53.10 +    unsigned int *ecx, unsigned int *edx)
   53.11  {
   53.12      __asm__("cpuid"
   53.13              : "=a" (*eax),
   53.14 @@ -405,7 +407,7 @@ long set_fast_trap(struct exec_domain *p
   53.15  
   53.16  #endif
   53.17  
   53.18 -extern int gpf_emulate_4gb(struct xen_regs *regs);
   53.19 +extern int gpf_emulate_4gb(struct cpu_user_regs *regs);
   53.20  
   53.21  extern void write_ptbase(struct exec_domain *ed);
   53.22  
   53.23 @@ -499,9 +501,9 @@ extern inline void prefetchw(const void 
   53.24  void show_guest_stack();
   53.25  void show_trace(unsigned long *esp);
   53.26  void show_stack(unsigned long *esp);
   53.27 -void show_registers(struct xen_regs *regs);
   53.28 +void show_registers(struct cpu_user_regs *regs);
   53.29  void show_page_walk(unsigned long addr);
   53.30 -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs);
   53.31 +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
   53.32  
   53.33  #endif /* !__ASSEMBLY__ */
   53.34  
    54.1 --- a/xen/include/asm-x86/shadow.h	Thu Apr 28 13:52:41 2005 +0000
    54.2 +++ b/xen/include/asm-x86/shadow.h	Thu Apr 28 14:04:13 2005 +0000
    54.3 @@ -63,7 +63,7 @@
    54.4  
    54.5  extern void shadow_mode_init(void);
    54.6  extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
    54.7 -extern int shadow_fault(unsigned long va, struct xen_regs *regs);
    54.8 +extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs);
    54.9  extern int shadow_mode_enable(struct domain *p, unsigned int mode);
   54.10  extern void shadow_invlpg(struct exec_domain *, unsigned long);
   54.11  extern struct out_of_sync_entry *shadow_mark_mfn_out_of_sync(
    55.1 --- a/xen/include/asm-x86/vmx.h	Thu Apr 28 13:52:41 2005 +0000
    55.2 +++ b/xen/include/asm-x86/vmx.h	Thu Apr 28 14:04:13 2005 +0000
    55.3 @@ -25,7 +25,7 @@
    55.4  #include <asm/processor.h>
    55.5  #include <asm/vmx_vmcs.h>
    55.6  
    55.7 -extern void vmx_asm_vmexit_handler(struct xen_regs);
    55.8 +extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
    55.9  extern void vmx_asm_do_resume(void);
   55.10  extern void vmx_asm_do_launch(void);
   55.11  extern void vmx_intr_assist(struct exec_domain *d);
    56.1 --- a/xen/include/asm-x86/vmx_platform.h	Thu Apr 28 13:52:41 2005 +0000
    56.2 +++ b/xen/include/asm-x86/vmx_platform.h	Thu Apr 28 14:04:13 2005 +0000
    56.3 @@ -73,7 +73,7 @@ struct instruction {
    56.4  struct mi_per_cpu_info
    56.5  {
    56.6      unsigned long          mmio_target;
    56.7 -    struct xen_regs        *inst_decoder_regs;
    56.8 +    struct cpu_user_regs        *inst_decoder_regs;
    56.9  };
   56.10  
   56.11  struct virutal_platform_def {
   56.12 @@ -85,7 +85,7 @@ struct virutal_platform_def {
   56.13  };
   56.14  
   56.15  extern void handle_mmio(unsigned long, unsigned long);
   56.16 -extern int vmx_setup_platform(struct exec_domain *, execution_context_t *);
   56.17 +extern int vmx_setup_platform(struct exec_domain *, struct cpu_user_regs *);
   56.18  
   56.19  // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame.
   56.20  #define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT)))
    57.1 --- a/xen/include/asm-x86/vmx_vmcs.h	Thu Apr 28 13:52:41 2005 +0000
    57.2 +++ b/xen/include/asm-x86/vmx_vmcs.h	Thu Apr 28 14:04:13 2005 +0000
    57.3 @@ -65,8 +65,8 @@ void free_vmcs(struct vmcs_struct *);
    57.4  int  load_vmcs(struct arch_vmx_struct *, u64);
    57.5  int  store_vmcs(struct arch_vmx_struct *, u64);
    57.6  void dump_vmcs(void);
    57.7 -int  construct_vmcs(struct arch_vmx_struct *, execution_context_t *, 
    57.8 -                    full_execution_context_t *, int);
    57.9 +int  construct_vmcs(struct arch_vmx_struct *, struct cpu_user_regs *, 
   57.10 +                    struct vcpu_guest_context *, int);
   57.11  
   57.12  #define VMCS_USE_HOST_ENV       1
   57.13  #define VMCS_USE_SEPARATE_ENV   0
    58.1 --- a/xen/include/asm-x86/x86_32/asm_defns.h	Thu Apr 28 13:52:41 2005 +0000
    58.2 +++ b/xen/include/asm-x86/x86_32/asm_defns.h	Thu Apr 28 14:04:13 2005 +0000
    58.3 @@ -13,16 +13,16 @@
    58.4          "pushl %edx;" \
    58.5          "pushl %ecx;" \
    58.6          "pushl %ebx;" \
    58.7 -        "testl $"STR(X86_EFLAGS_VM)","STR(XREGS_eflags)"(%esp);" \
    58.8 +        "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);" \
    58.9          "jz 2f;" \
   58.10          "call setup_vm86_frame;" \
   58.11          "jmp 3f;" \
   58.12 -        "2:testb $3,"STR(XREGS_cs)"(%esp);" \
   58.13 +        "2:testb $3,"STR(UREGS_cs)"(%esp);" \
   58.14          "jz 1f;" \
   58.15 -        "movl %ds,"STR(XREGS_ds)"(%esp);" \
   58.16 -        "movl %es,"STR(XREGS_es)"(%esp);" \
   58.17 -        "movl %fs,"STR(XREGS_fs)"(%esp);" \
   58.18 -        "movl %gs,"STR(XREGS_gs)"(%esp);" \
   58.19 +        "movl %ds,"STR(UREGS_ds)"(%esp);" \
   58.20 +        "movl %es,"STR(UREGS_es)"(%esp);" \
   58.21 +        "movl %fs,"STR(UREGS_fs)"(%esp);" \
   58.22 +        "movl %gs,"STR(UREGS_gs)"(%esp);" \
   58.23          "3:"
   58.24  
   58.25  #define SAVE_ALL_NOSEGREGS(_reg) \
   58.26 @@ -50,16 +50,16 @@
   58.27          pushl %edx; \
   58.28          pushl %ecx; \
   58.29          pushl %ebx; \
   58.30 -        testl $X86_EFLAGS_VM,XREGS_eflags(%esp); \
   58.31 +        testl $X86_EFLAGS_VM,UREGS_eflags(%esp); \
   58.32          jz 2f; \
   58.33          call setup_vm86_frame; \
   58.34          jmp 3f; \
   58.35 -        2:testb $3,XREGS_cs(%esp); \
   58.36 +        2:testb $3,UREGS_cs(%esp); \
   58.37          jz 1f; \
   58.38 -        movl %ds,XREGS_ds(%esp); \
   58.39 -        movl %es,XREGS_es(%esp); \
   58.40 -        movl %fs,XREGS_fs(%esp); \
   58.41 -        movl %gs,XREGS_gs(%esp); \
   58.42 +        movl %ds,UREGS_ds(%esp); \
   58.43 +        movl %es,UREGS_es(%esp); \
   58.44 +        movl %fs,UREGS_fs(%esp); \
   58.45 +        movl %gs,UREGS_gs(%esp); \
   58.46          3:
   58.47  
   58.48  #define SAVE_ALL_NOSEGREGS(_reg) \
   58.49 @@ -98,7 +98,7 @@ asmlinkage void x(void); \
   58.50  
   58.51  #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
   58.52  #define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
   58.53 -asmlinkage void x(struct xen_regs * regs); \
   58.54 +asmlinkage void x(struct cpu_user_regs * regs); \
   58.55  __asm__( \
   58.56  "\n"__ALIGN_STR"\n" \
   58.57  SYMBOL_NAME_STR(x) ":\n\t" \
    59.1 --- a/xen/include/asm-x86/x86_32/current.h	Thu Apr 28 13:52:41 2005 +0000
    59.2 +++ b/xen/include/asm-x86/x86_32/current.h	Thu Apr 28 14:04:13 2005 +0000
    59.3 @@ -5,7 +5,7 @@
    59.4  struct domain;
    59.5  
    59.6  #define STACK_RESERVED \
    59.7 -    (sizeof(execution_context_t) + sizeof(struct domain *))
    59.8 +    (sizeof(struct cpu_user_regs) + sizeof(struct domain *))
    59.9  
   59.10  static inline struct exec_domain *get_current(void)
   59.11  {
   59.12 @@ -23,13 +23,13 @@ static inline void set_current(struct ex
   59.13                : : "r" (STACK_SIZE-4), "r" (ed) );    
   59.14  }
   59.15  
   59.16 -static inline execution_context_t *get_execution_context(void)
   59.17 +static inline struct cpu_user_regs *get_cpu_user_regs(void)
   59.18  {
   59.19 -    execution_context_t *execution_context;
   59.20 +    struct cpu_user_regs *cpu_user_regs;
   59.21      __asm__ ( "andl %%esp,%0; addl %2,%0"
   59.22 -              : "=r" (execution_context) 
   59.23 +              : "=r" (cpu_user_regs) 
   59.24                : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
   59.25 -    return execution_context;
   59.26 +    return cpu_user_regs;
   59.27  }
   59.28  
   59.29  /*
   59.30 @@ -49,7 +49,7 @@ static inline unsigned long get_stack_bo
   59.31  #define reset_stack_and_jump(__fn)                                \
   59.32      __asm__ __volatile__ (                                        \
   59.33          "movl %0,%%esp; jmp "STR(__fn)                            \
   59.34 -        : : "r" (get_execution_context()) )
   59.35 +        : : "r" (get_cpu_user_regs()) )
   59.36  
   59.37  #define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
   59.38  
    60.1 --- a/xen/include/asm-x86/x86_32/regs.h	Thu Apr 28 13:52:41 2005 +0000
    60.2 +++ b/xen/include/asm-x86/x86_32/regs.h	Thu Apr 28 14:04:13 2005 +0000
    60.3 @@ -16,6 +16,6 @@
    60.4      ((_dpl) >= (VM86_MODE(_r) ? 3 : ((_r)->cs & 3)))
    60.5  
    60.6  /* Number of bytes of on-stack execution state to be context-switched. */
    60.7 -#define CTXT_SWITCH_STACK_BYTES (sizeof(execution_context_t))
    60.8 +#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
    60.9  
   60.10  #endif
    61.1 --- a/xen/include/asm-x86/x86_64/asm_defns.h	Thu Apr 28 13:52:41 2005 +0000
    61.2 +++ b/xen/include/asm-x86/x86_64/asm_defns.h	Thu Apr 28 14:04:13 2005 +0000
    61.3 @@ -106,7 +106,7 @@ asmlinkage void x(void); \
    61.4  
    61.5  #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
    61.6  #define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
    61.7 -asmlinkage void x(struct xen_regs * regs); \
    61.8 +asmlinkage void x(struct cpu_user_regs * regs); \
    61.9  __asm__( \
   61.10  "\n"__ALIGN_STR"\n" \
   61.11  SYMBOL_NAME_STR(x) ":\n\t" \
    62.1 --- a/xen/include/asm-x86/x86_64/current.h	Thu Apr 28 13:52:41 2005 +0000
    62.2 +++ b/xen/include/asm-x86/x86_64/current.h	Thu Apr 28 14:04:13 2005 +0000
    62.3 @@ -5,7 +5,7 @@
    62.4  struct domain;
    62.5  
    62.6  #define STACK_RESERVED \
    62.7 -    (sizeof(execution_context_t) + sizeof(struct domain *))
    62.8 +    (sizeof(struct cpu_user_regs) + sizeof(struct domain *))
    62.9  
   62.10  static inline struct exec_domain *get_current(void)
   62.11  {
   62.12 @@ -23,13 +23,13 @@ static inline void set_current(struct ex
   62.13                : : "r" (STACK_SIZE-8), "r" (ed) );    
   62.14  }
   62.15  
   62.16 -static inline execution_context_t *get_execution_context(void)
   62.17 +static inline struct cpu_user_regs *get_cpu_user_regs(void)
   62.18  {
   62.19 -    execution_context_t *execution_context;
   62.20 +    struct cpu_user_regs *cpu_user_regs;
   62.21      __asm__( "andq %%rsp,%0; addq %2,%0"
   62.22 -	    : "=r" (execution_context)
   62.23 +	    : "=r" (cpu_user_regs)
   62.24  	    : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) ); 
   62.25 -    return execution_context;
   62.26 +    return cpu_user_regs;
   62.27  }
   62.28  
   62.29  /*
   62.30 @@ -49,7 +49,7 @@ static inline unsigned long get_stack_bo
   62.31  #define reset_stack_and_jump(__fn)                                \
   62.32      __asm__ __volatile__ (                                        \
   62.33          "movq %0,%%rsp; jmp "STR(__fn)                            \
   62.34 -        : : "r" (get_execution_context()) )
   62.35 +        : : "r" (get_cpu_user_regs()) )
   62.36  
   62.37  #define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed)
   62.38  
    63.1 --- a/xen/include/asm-x86/x86_64/regs.h	Thu Apr 28 13:52:41 2005 +0000
    63.2 +++ b/xen/include/asm-x86/x86_64/regs.h	Thu Apr 28 14:04:13 2005 +0000
    63.3 @@ -17,6 +17,6 @@
    63.4  
    63.5  /* Number of bytes of on-stack execution state to be context-switched. */
    63.6  /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
    63.7 -#define CTXT_SWITCH_STACK_BYTES (offsetof(execution_context_t, es))
    63.8 +#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es))
    63.9  
   63.10  #endif
    64.1 --- a/xen/include/asm-x86/x86_emulate.h	Thu Apr 28 13:52:41 2005 +0000
    64.2 +++ b/xen/include/asm-x86/x86_emulate.h	Thu Apr 28 14:04:13 2005 +0000
    64.3 @@ -139,7 +139,7 @@ x86_emulate_write_std(
    64.4      unsigned long val,
    64.5      unsigned int bytes);
    64.6  
    64.7 -struct xen_regs;
    64.8 +struct cpu_user_regs;
    64.9  
   64.10  /*
   64.11   * x86_emulate_memop: Emulate an instruction that faulted attempting to
   64.12 @@ -152,7 +152,7 @@ struct xen_regs;
   64.13   */
   64.14  extern int
   64.15  x86_emulate_memop(
   64.16 -    struct xen_regs *regs,
   64.17 +    struct cpu_user_regs *regs,
   64.18      unsigned long cr2,
   64.19      struct x86_mem_emulator *ops,
   64.20      int mode);
   64.21 @@ -164,6 +164,6 @@ x86_emulate_memop(
   64.22   */
   64.23  extern void *
   64.24  decode_register(
   64.25 -    u8 modrm_reg, struct xen_regs *regs, int highbyte_regs);
   64.26 +    u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs);
   64.27  
   64.28  #endif /* __X86_EMULATE_H__ */
    65.1 --- a/xen/include/public/arch-ia64.h	Thu Apr 28 13:52:41 2005 +0000
    65.2 +++ b/xen/include/public/arch-ia64.h	Thu Apr 28 14:04:13 2005 +0000
    65.3 @@ -22,7 +22,7 @@ typedef unsigned long cpureg_t;   /* Ful
    65.4  
    65.5  typedef struct
    65.6  {
    65.7 -} PACKED execution_context_t;
    65.8 +} PACKED struct cpu_user_regs;
    65.9  
   65.10  /*
   65.11   * NB. This may become a 64-bit count with no shift. If this happens then the 
   65.12 @@ -91,9 +91,9 @@ typedef struct {
   65.13   * The following is all CPU context. Note that the i387_ctxt block is filled 
   65.14   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
   65.15   */
   65.16 -typedef struct {
   65.17 +typedef struct vcpu_guest_context {
   65.18      //unsigned long flags;
   65.19 -} PACKED full_execution_context_t;
   65.20 +} PACKED vcpu_guest_context_t;
   65.21  
   65.22  #endif /* !__ASSEMBLY__ */
   65.23  
    66.1 --- a/xen/include/public/arch-x86_32.h	Thu Apr 28 13:52:41 2005 +0000
    66.2 +++ b/xen/include/public/arch-x86_32.h	Thu Apr 28 14:04:13 2005 +0000
    66.3 @@ -97,8 +97,7 @@ typedef struct {
    66.4      memory_t address; /* 4: code address                                  */
    66.5  } PACKED trap_info_t; /* 8 bytes */
    66.6  
    66.7 -typedef struct xen_regs
    66.8 -{
    66.9 +typedef struct cpu_user_regs {
   66.10      u32 ebx;
   66.11      u32 ecx;
   66.12      u32 edx;
   66.13 @@ -117,7 +116,7 @@ typedef struct xen_regs
   66.14      u32 ds;
   66.15      u32 fs;
   66.16      u32 gs;
   66.17 -} PACKED execution_context_t;
   66.18 +} cpu_user_regs_t;
   66.19  
   66.20  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
   66.21  
   66.22 @@ -125,12 +124,12 @@ typedef u64 tsc_timestamp_t; /* RDTSC ti
   66.23   * The following is all CPU context. Note that the i387_ctxt block is filled 
   66.24   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
   66.25   */
   66.26 -typedef struct {
   66.27 +typedef struct vcpu_guest_context {
   66.28  #define ECF_I387_VALID (1<<0)
   66.29  #define ECF_VMX_GUEST  (1<<1)
   66.30 -#define ECF_IN_KERNEL (1<<2)
   66.31 +#define ECF_IN_KERNEL  (1<<2)
   66.32      unsigned long flags;
   66.33 -    execution_context_t cpu_ctxt;           /* User-level CPU registers     */
   66.34 +    cpu_user_regs_t user_regs;              /* User-level CPU registers     */
   66.35      char          fpu_ctxt[256];            /* User-level FPU registers     */
   66.36      trap_info_t   trap_ctxt[256];           /* Virtual IDT                  */
   66.37      unsigned int  fast_trap_idx;            /* "Fast trap" vector offset    */
   66.38 @@ -144,7 +143,7 @@ typedef struct {
   66.39      unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
   66.40      unsigned long failsafe_callback_eip;
   66.41      unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
   66.42 -} PACKED full_execution_context_t;
   66.43 +} PACKED vcpu_guest_context_t;
   66.44  
   66.45  typedef struct {
   66.46      /* MFN of a table of MFNs that make up p2m table */
    67.1 --- a/xen/include/public/arch-x86_64.h	Thu Apr 28 13:52:41 2005 +0000
    67.2 +++ b/xen/include/public/arch-x86_64.h	Thu Apr 28 14:04:13 2005 +0000
    67.3 @@ -142,8 +142,7 @@ typedef struct {
    67.4      memory_t address; /* 8: code address                                  */
    67.5  } PACKED trap_info_t; /* 16 bytes */
    67.6  
    67.7 -typedef struct xen_regs
    67.8 -{
    67.9 +typedef struct cpu_user_regs {
   67.10      u64 r15;
   67.11      u64 r14;
   67.12      u64 r13;
   67.13 @@ -173,7 +172,7 @@ typedef struct xen_regs
   67.14      u64 fs_base;
   67.15      u64 gs_base_kernel;
   67.16      u64 gs_base_user;
   67.17 -} PACKED execution_context_t;
   67.18 +} cpu_user_regs_t;
   67.19  
   67.20  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
   67.21  
   67.22 @@ -181,12 +180,12 @@ typedef u64 tsc_timestamp_t; /* RDTSC ti
   67.23   * The following is all CPU context. Note that the i387_ctxt block is filled 
   67.24   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
   67.25   */
   67.26 -typedef struct {
   67.27 +typedef struct vcpu_guest_context {
   67.28  #define ECF_I387_VALID (1<<0)
   67.29  #define ECF_VMX_GUEST  (1<<1)
   67.30 -#define ECF_IN_KERNEL (1<<2)
   67.31 +#define ECF_IN_KERNEL  (1<<2)
   67.32      unsigned long flags;
   67.33 -    execution_context_t cpu_ctxt;           /* User-level CPU registers     */
   67.34 +    cpu_user_regs_t user_regs;              /* User-level CPU registers     */
   67.35      char          fpu_ctxt[512];            /* User-level FPU registers     */
   67.36      trap_info_t   trap_ctxt[256];           /* Virtual IDT                  */
   67.37      unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
   67.38 @@ -198,7 +197,7 @@ typedef struct {
   67.39      unsigned long failsafe_callback_eip;
   67.40      unsigned long syscall_callback_eip;
   67.41      unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
   67.42 -} PACKED full_execution_context_t;
   67.43 +} PACKED vcpu_guest_context_t;
   67.44  
   67.45  typedef struct {
   67.46      /* MFN of a table of MFNs that make up p2m table */
    68.1 --- a/xen/include/public/dom0_ops.h	Thu Apr 28 13:52:41 2005 +0000
    68.2 +++ b/xen/include/public/dom0_ops.h	Thu Apr 28 14:04:13 2005 +0000
    68.3 @@ -83,7 +83,7 @@ typedef struct {
    68.4  #define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
    68.5  #define DOMFLAGS_SHUTDOWNSHIFT 16
    68.6      u32      flags;
    68.7 -    full_execution_context_t *ctxt;   /* NB. IN/OUT variable. */
    68.8 +    vcpu_guest_context_t *ctxt;   /* NB. IN/OUT variable. */
    68.9      memory_t tot_pages;
   68.10      memory_t max_pages;
   68.11      memory_t shared_info_frame;       /* MFN of shared_info struct */
   68.12 @@ -96,7 +96,7 @@ typedef struct {
   68.13      domid_t                   domain;
   68.14      u16                       exec_domain;
   68.15      /* IN/OUT parameters */
   68.16 -    full_execution_context_t *ctxt;
   68.17 +    vcpu_guest_context_t *ctxt;
   68.18  } dom0_setdomaininfo_t;
   68.19  
   68.20  #define DOM0_MSR              15
    69.1 --- a/xen/include/xen/domain.h	Thu Apr 28 13:52:41 2005 +0000
    69.2 +++ b/xen/include/xen/domain.h	Thu Apr 28 14:04:13 2005 +0000
    69.3 @@ -15,7 +15,7 @@ extern void arch_do_createdomain(struct 
    69.4  extern void arch_do_boot_vcpu(struct exec_domain *ed);
    69.5  
    69.6  extern int  arch_set_info_guest(
    69.7 -    struct exec_domain *d, full_execution_context_t *c);
    69.8 +    struct exec_domain *d, struct vcpu_guest_context *c);
    69.9  
   69.10  extern void free_perdomain_pt(struct domain *d);
   69.11  
    70.1 --- a/xen/include/xen/irq.h	Thu Apr 28 13:52:41 2005 +0000
    70.2 +++ b/xen/include/xen/irq.h	Thu Apr 28 14:04:13 2005 +0000
    70.3 @@ -8,7 +8,7 @@
    70.4  
    70.5  struct irqaction
    70.6  {
    70.7 -    void (*handler)(int, void *, struct xen_regs *);
    70.8 +    void (*handler)(int, void *, struct cpu_user_regs *);
    70.9      const char *name;
   70.10      void *dev_id;
   70.11  };
   70.12 @@ -63,7 +63,7 @@ extern int setup_irq(unsigned int, struc
   70.13  extern void free_irq(unsigned int);
   70.14  
   70.15  extern hw_irq_controller no_irq_type;
   70.16 -extern void no_action(int cpl, void *dev_id, struct xen_regs *regs);
   70.17 +extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
   70.18  
   70.19  struct domain;
   70.20  struct exec_domain;
    71.1 --- a/xen/include/xen/keyhandler.h	Thu Apr 28 13:52:41 2005 +0000
    71.2 +++ b/xen/include/xen/keyhandler.h	Thu Apr 28 14:04:13 2005 +0000
    71.3 @@ -23,11 +23,11 @@ extern void register_keyhandler(
    71.4   * synchronously in hard-IRQ context with interrupts disabled. The @regs
    71.5   * callback parameter points at the interrupted register context.
    71.6   */
    71.7 -typedef void irq_keyhandler_t(unsigned char key, struct xen_regs *regs);
    71.8 +typedef void irq_keyhandler_t(unsigned char key, struct cpu_user_regs *regs);
    71.9  extern void register_irq_keyhandler(
   71.10      unsigned char key, irq_keyhandler_t *handler, char *desc); 
   71.11  
   71.12  /* Inject a keypress into the key-handling subsystem. */
   71.13 -extern void handle_keypress(unsigned char key, struct xen_regs *regs);
   71.14 +extern void handle_keypress(unsigned char key, struct cpu_user_regs *regs);
   71.15  
   71.16  #endif /* __XEN_KEYHANDLER_H__ */
    72.1 --- a/xen/include/xen/serial.h	Thu Apr 28 13:52:41 2005 +0000
    72.2 +++ b/xen/include/xen/serial.h	Thu Apr 28 14:04:13 2005 +0000
    72.3 @@ -28,7 +28,7 @@ void serial_init_stage2(void);
    72.4  int parse_serial_handle(char *conf);
    72.5  
    72.6  /* Register a character-receive hook on the specified COM port. */
    72.7 -typedef void (*serial_rx_fn)(unsigned char, struct xen_regs *);
    72.8 +typedef void (*serial_rx_fn)(unsigned char, struct cpu_user_regs *);
    72.9  void serial_set_rx_handler(int handle, serial_rx_fn fn);
   72.10  
   72.11  /* Transmit a single character via the specified COM port. */