ia64/xen-unstable

changeset 4689:cd690b71434a

bitkeeper revision 1.1389.1.4 (427125bdwah0mehgnafVLP-gRLDM_w)

Avoid field duplication between vcpu_guest_context and arch_exec_domain
structures. The latter now includes the former as a sub-field.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 28 18:04:45 2005 +0000 (2005-04-28)
parents b2ca9de6952a
children 57dcb8c9f1d8
files linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S tools/libxc/xc_linux_build.c tools/libxc/xc_plan9_build.c tools/libxc/xc_vmx_build.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/i387.c xen/arch/x86/mm.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/seg_fixup.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/domain.h xen/include/asm-x86/ldt.h xen/include/asm-x86/processor.h xen/include/asm-x86/x86_64/current.h xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h
line diff
     1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Apr 28 15:12:37 2005 +0000
     1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Apr 28 18:04:45 2005 +0000
     1.3 @@ -876,7 +876,7 @@ static int __init do_boot_cpu(int apicid
     1.4  	ctxt.user_regs.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12);
     1.5  
     1.6  	/* FPU is set up to default initial state. */
     1.7 -	memset(ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
     1.8 +	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
     1.9  
    1.10  	/* Virtual IDT is empty at start-of-day. */
    1.11  	for ( i = 0; i < 256; i++ )
    1.12 @@ -903,8 +903,8 @@ static int __init do_boot_cpu(int apicid
    1.13  	}
    1.14  
    1.15  	/* Ring 1 stack is the initial stack. */
    1.16 -	ctxt.kernel_ss  = __KERNEL_DS;
    1.17 -	ctxt.kernel_esp = idle->thread.esp;
    1.18 +	ctxt.kernel_ss = __KERNEL_DS;
    1.19 +	ctxt.kernel_sp = idle->thread.esp;
    1.20  
    1.21  	/* Callback handlers. */
    1.22  	ctxt.event_callback_cs     = __KERNEL_CS;
     2.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S	Thu Apr 28 15:12:37 2005 +0000
     2.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S	Thu Apr 28 18:04:45 2005 +0000
     2.3 @@ -50,7 +50,7 @@
     2.4  
     2.5  
     2.6  EVENT_MASK      = (CS+4)
     2.7 -ECF_IN_SYSCALL  = (1<<8)
     2.8 +VGCF_IN_SYSCALL = (1<<8)
     2.9          
    2.10  /*
    2.11   * Copied from arch/xen/i386/kernel/entry.S
    2.12 @@ -169,7 +169,7 @@ ECF_IN_SYSCALL  = (1<<8)
    2.13           *     struct switch_to_user {
    2.14           *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
    2.15           *     } PACKED;
    2.16 -         * #define ECF_IN_SYSCALL (1<<8) 
    2.17 +         * #define VGCF_IN_SYSCALL (1<<8) 
    2.18           */
    2.19          .macro SWITCH_TO_USER flag
    2.20          movl $0,%gs:pda_kernel_mode     # change to user mode
    2.21 @@ -275,7 +275,7 @@ sysret_check:
    2.22  	jnz  sysret_careful 
    2.23          XEN_UNBLOCK_EVENTS(%rsi)                
    2.24  	RESTORE_ARGS 0,8,0
    2.25 -        SWITCH_TO_USER ECF_IN_SYSCALL
    2.26 +        SWITCH_TO_USER VGCF_IN_SYSCALL
    2.27  
    2.28  	/* Handle reschedules */
    2.29  	/* edx:	work, edi: workmask */	
     3.1 --- a/tools/libxc/xc_linux_build.c	Thu Apr 28 15:12:37 2005 +0000
     3.2 +++ b/tools/libxc/xc_linux_build.c	Thu Apr 28 18:04:45 2005 +0000
     3.3 @@ -412,7 +412,7 @@ int xc_linux_build(int xc_handle,
     3.4      ctxt->user_regs.eflags = (1<<9) | (1<<2);
     3.5  
     3.6      /* FPU is set up to default initial state. */
     3.7 -    memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
     3.8 +    memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
     3.9  
    3.10      /* Virtual IDT is empty at start-of-day. */
    3.11      for ( i = 0; i < 256; i++ )
    3.12 @@ -432,8 +432,8 @@ int xc_linux_build(int xc_handle,
    3.13      ctxt->gdt_ents = 0;
    3.14  
    3.15      /* Ring 1 stack is the initial stack. */
    3.16 -    ctxt->kernel_ss  = FLAT_KERNEL_DS;
    3.17 -    ctxt->kernel_esp = vstartinfo_start + 2*PAGE_SIZE;
    3.18 +    ctxt->kernel_ss = FLAT_KERNEL_DS;
    3.19 +    ctxt->kernel_sp = vstartinfo_start + 2*PAGE_SIZE;
    3.20  
    3.21      /* No debugging. */
    3.22      memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
     4.1 --- a/tools/libxc/xc_plan9_build.c	Thu Apr 28 15:12:37 2005 +0000
     4.2 +++ b/tools/libxc/xc_plan9_build.c	Thu Apr 28 18:04:45 2005 +0000
     4.3 @@ -498,7 +498,7 @@ xc_plan9_build(int xc_handle,
     4.4  	ctxt->user_regs.eflags = (1 << 9) | (1 << 2);
     4.5  
     4.6  	/* FPU is set up to default initial state. */
     4.7 -	memset(ctxt->fpu_ctxt, 0, sizeof (ctxt->fpu_ctxt));
     4.8 +	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
     4.9  
    4.10  	/* Virtual IDT is empty at start-of-day. */
    4.11  	for (i = 0; i < 256; i++) {
    4.12 @@ -519,7 +519,7 @@ xc_plan9_build(int xc_handle,
    4.13  	/* Ring 1 stack is the initial stack. */
    4.14  	/* put stack at top of second page */
    4.15  	ctxt->kernel_ss = FLAT_KERNEL_DS;
    4.16 -	ctxt->kernel_esp = ctxt->user_regs.esp;
    4.17 +	ctxt->kernel_sp = ctxt->user_regs.esp;
    4.18  
    4.19  	/* No debugging. */
    4.20  	memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg));
     5.1 --- a/tools/libxc/xc_vmx_build.c	Thu Apr 28 15:12:37 2005 +0000
     5.2 +++ b/tools/libxc/xc_vmx_build.c	Thu Apr 28 18:04:45 2005 +0000
     5.3 @@ -565,9 +565,9 @@ int xc_vmx_build(int xc_handle,
     5.4      if ( image != NULL )
     5.5          free(image);
     5.6  
     5.7 -    ctxt->flags = ECF_VMX_GUEST;
     5.8 +    ctxt->flags = VGCF_VMX_GUEST;
     5.9      /* FPU is set up to default initial state. */
    5.10 -    memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
    5.11 +    memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
    5.12  
    5.13      /* Virtual IDT is empty at start-of-day. */
    5.14      for ( i = 0; i < 256; i++ )
    5.15 @@ -588,8 +588,8 @@ int xc_vmx_build(int xc_handle,
    5.16  
    5.17      /* Ring 1 stack is the initial stack. */
    5.18  /*
    5.19 -    ctxt->kernel_ss  = FLAT_KERNEL_DS;
    5.20 -    ctxt->kernel_esp = vstartinfo_start;
    5.21 +    ctxt->kernel_ss = FLAT_KERNEL_DS;
    5.22 +    ctxt->kernel_sp = vstartinfo_start;
    5.23  */
    5.24      /* No debugging. */
    5.25      memset(ctxt->debugreg, 0, sizeof(ctxt->debugreg));
     6.1 --- a/xen/arch/x86/dom0_ops.c	Thu Apr 28 15:12:37 2005 +0000
     6.2 +++ b/xen/arch/x86/dom0_ops.c	Thu Apr 28 18:04:45 2005 +0000
     6.3 @@ -383,10 +383,8 @@ void arch_getdomaininfo_ctxt(
     6.4  #endif
     6.5  #endif
     6.6  
     6.7 -    c->flags = 0;
     6.8 -    memcpy(&c->user_regs, 
     6.9 -           &ed->arch.user_regs,
    6.10 -           sizeof(ed->arch.user_regs));
    6.11 +    memcpy(c, &ed->arch.guest_context, sizeof(*c));
    6.12 +
    6.13      /* IOPL privileges are virtualised -- merge back into returned eflags. */
    6.14      BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
    6.15      c->user_regs.eflags |= ed->arch.iopl << 12;
    6.16 @@ -398,30 +396,22 @@ void arch_getdomaininfo_ctxt(
    6.17  #endif
    6.18  #endif
    6.19  
    6.20 +    c->flags = 0;
    6.21      if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) )
    6.22 -        c->flags |= ECF_I387_VALID;
    6.23 -    if ( KERNEL_MODE(ed, &ed->arch.user_regs) )
    6.24 -        c->flags |= ECF_IN_KERNEL;
    6.25 +        c->flags |= VGCF_I387_VALID;
    6.26 +    if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) )
    6.27 +        c->flags |= VGCF_IN_KERNEL;
    6.28  #ifdef CONFIG_VMX
    6.29      if (VMX_DOMAIN(ed))
    6.30 -        c->flags |= ECF_VMX_GUEST;
    6.31 +        c->flags |= VGCF_VMX_GUEST;
    6.32  #endif
    6.33 -    memcpy(&c->fpu_ctxt,
    6.34 -           &ed->arch.i387,
    6.35 -           sizeof(ed->arch.i387));
    6.36 -    memcpy(&c->trap_ctxt,
    6.37 -           ed->arch.traps,
    6.38 -           sizeof(ed->arch.traps));
    6.39 +
    6.40  #ifdef ARCH_HAS_FAST_TRAP
    6.41      if ( (ed->arch.fast_trap_desc.a == 0) &&
    6.42           (ed->arch.fast_trap_desc.b == 0) )
    6.43          c->fast_trap_idx = 0;
    6.44 -    else
    6.45 -        c->fast_trap_idx = 
    6.46 -            ed->arch.fast_trap_idx;
    6.47  #endif
    6.48 -    c->ldt_base = ed->arch.ldt_base;
    6.49 -    c->ldt_ents = ed->arch.ldt_ents;
    6.50 +
    6.51      c->gdt_ents = 0;
    6.52      if ( GET_GDT_ADDRESS(ed) == GDT_VIRT_START(ed) )
    6.53      {
    6.54 @@ -430,22 +420,8 @@ void arch_getdomaininfo_ctxt(
    6.55                  l1e_get_pfn(ed->arch.perdomain_ptes[i]);
    6.56          c->gdt_ents = GET_GDT_ENTRIES(ed);
    6.57      }
    6.58 -    c->kernel_ss  = ed->arch.kernel_ss;
    6.59 -    c->kernel_esp = ed->arch.kernel_sp;
    6.60 -    c->pt_base   = 
    6.61 -        pagetable_val(ed->arch.guest_table);
    6.62 -    memcpy(c->debugreg, 
    6.63 -           ed->arch.debugreg, 
    6.64 -           sizeof(ed->arch.debugreg));
    6.65 -#if defined(__i386__)
    6.66 -    c->event_callback_cs     = ed->arch.event_selector;
    6.67 -    c->event_callback_eip    = ed->arch.event_address;
    6.68 -    c->failsafe_callback_cs  = ed->arch.failsafe_selector;
    6.69 -    c->failsafe_callback_eip = ed->arch.failsafe_address;
    6.70 -#elif defined(__x86_64__)
    6.71 -    c->event_callback_eip    = ed->arch.event_address;
    6.72 -    c->failsafe_callback_eip = ed->arch.failsafe_address;
    6.73 -    c->syscall_callback_eip  = ed->arch.syscall_address;
    6.74 -#endif
    6.75 +
    6.76 +    c->pt_base = pagetable_val(ed->arch.guest_table);
    6.77 +
    6.78      c->vm_assist = ed->domain->vm_assist;
    6.79  }
     7.1 --- a/xen/arch/x86/domain.c	Thu Apr 28 15:12:37 2005 +0000
     7.2 +++ b/xen/arch/x86/domain.c	Thu Apr 28 18:04:45 2005 +0000
     7.3 @@ -385,65 +385,42 @@ int arch_set_info_guest(
     7.4       * #GP. If DS, ES, FS, GS are DPL 0 then they'll be cleared automatically.
     7.5       * If SS RPL or DPL differs from CS RPL then we'll #GP.
     7.6       */
     7.7 -    if (!(c->flags & ECF_VMX_GUEST)) 
     7.8 +    if ( !(c->flags & VGCF_VMX_GUEST) )
     7.9 +    {
    7.10          if ( ((c->user_regs.cs & 3) == 0) ||
    7.11               ((c->user_regs.ss & 3) == 0) )
    7.12                  return -EINVAL;
    7.13 +    }
    7.14  
    7.15      clear_bit(EDF_DONEFPUINIT, &ed->ed_flags);
    7.16 -    if ( c->flags & ECF_I387_VALID )
    7.17 +    if ( c->flags & VGCF_I387_VALID )
    7.18          set_bit(EDF_DONEFPUINIT, &ed->ed_flags);
    7.19  
    7.20      ed->arch.flags &= ~TF_kernel_mode;
    7.21 -    if ( c->flags & ECF_IN_KERNEL )
    7.22 +    if ( c->flags & VGCF_IN_KERNEL )
    7.23          ed->arch.flags |= TF_kernel_mode;
    7.24  
    7.25 -    memcpy(&ed->arch.user_regs,
    7.26 -           &c->user_regs,
    7.27 -           sizeof(ed->arch.user_regs));
    7.28 -
    7.29 -    memcpy(&ed->arch.i387,
    7.30 -           &c->fpu_ctxt,
    7.31 -           sizeof(ed->arch.i387));
    7.32 +    memcpy(&ed->arch.guest_context, c, sizeof(*c));
    7.33  
    7.34      /* IOPL privileges are virtualised. */
    7.35 -    ed->arch.iopl = (ed->arch.user_regs.eflags >> 12) & 3;
    7.36 -    ed->arch.user_regs.eflags &= ~EF_IOPL;
    7.37 +    ed->arch.iopl = (ed->arch.guest_context.user_regs.eflags >> 12) & 3;
    7.38 +    ed->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
    7.39  
    7.40      /* Clear IOPL for unprivileged domains. */
    7.41 -    if (!IS_PRIV(d))
    7.42 -        ed->arch.user_regs.eflags &= 0xffffcfff;
    7.43 +    if ( !IS_PRIV(d) )
    7.44 +        ed->arch.guest_context.user_regs.eflags &= 0xffffcfff;
    7.45  
    7.46 -    if (test_bit(EDF_DONEINIT, &ed->ed_flags))
    7.47 +    if ( test_bit(EDF_DONEINIT, &ed->ed_flags) )
    7.48          return 0;
    7.49  
    7.50 -    memcpy(ed->arch.traps,
    7.51 -           &c->trap_ctxt,
    7.52 -           sizeof(ed->arch.traps));
    7.53 -
    7.54      if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 )
    7.55          return rc;
    7.56  
    7.57 -    ed->arch.ldt_base = c->ldt_base;
    7.58 -    ed->arch.ldt_ents = c->ldt_ents;
    7.59 -
    7.60 -    ed->arch.kernel_ss = c->kernel_ss;
    7.61 -    ed->arch.kernel_sp = c->kernel_esp;
    7.62 -
    7.63 +    memset(ed->arch.guest_context.debugreg, 0,
    7.64 +           sizeof(ed->arch.guest_context.debugreg));
    7.65      for ( i = 0; i < 8; i++ )
    7.66          (void)set_debugreg(ed, i, c->debugreg[i]);
    7.67  
    7.68 -#if defined(__i386__)
    7.69 -    ed->arch.event_selector    = c->event_callback_cs;
    7.70 -    ed->arch.event_address     = c->event_callback_eip;
    7.71 -    ed->arch.failsafe_selector = c->failsafe_callback_cs;
    7.72 -    ed->arch.failsafe_address  = c->failsafe_callback_eip;
    7.73 -#elif defined(__x86_64__)
    7.74 -    ed->arch.event_address     = c->event_callback_eip;
    7.75 -    ed->arch.failsafe_address  = c->failsafe_callback_eip;
    7.76 -    ed->arch.syscall_address   = c->syscall_callback_eip;
    7.77 -#endif
    7.78 -
    7.79      if ( ed->eid == 0 )
    7.80          d->vm_assist = c->vm_assist;
    7.81  
    7.82 @@ -475,7 +452,7 @@ int arch_set_info_guest(
    7.83      }
    7.84  
    7.85  #ifdef CONFIG_VMX
    7.86 -    if ( c->flags & ECF_VMX_GUEST )
    7.87 +    if ( c->flags & VGCF_VMX_GUEST )
    7.88      {
    7.89          int error;
    7.90  
    7.91 @@ -507,7 +484,7 @@ void new_thread(struct exec_domain *d,
    7.92                  unsigned long start_stack,
    7.93                  unsigned long start_info)
    7.94  {
    7.95 -    struct cpu_user_regs *regs = &d->arch.user_regs;
    7.96 +    struct cpu_user_regs *regs = &d->arch.guest_context.user_regs;
    7.97  
    7.98      /*
    7.99       * Initial register values:
   7.100 @@ -557,63 +534,63 @@ void toggle_guest_mode(struct exec_domai
   7.101  
   7.102  static void load_segments(struct exec_domain *p, struct exec_domain *n)
   7.103  {
   7.104 +    struct vcpu_guest_context *pctxt = &p->arch.guest_context;
   7.105 +    struct vcpu_guest_context *nctxt = &n->arch.guest_context;
   7.106      int all_segs_okay = 1;
   7.107  
   7.108      /* Either selector != 0 ==> reload. */
   7.109 -    if ( unlikely(p->arch.user_regs.ds |
   7.110 -                  n->arch.user_regs.ds) )
   7.111 -        all_segs_okay &= loadsegment(ds, n->arch.user_regs.ds);
   7.112 +    if ( unlikely(pctxt->user_regs.ds | nctxt->user_regs.ds) )
   7.113 +        all_segs_okay &= loadsegment(ds, nctxt->user_regs.ds);
   7.114  
   7.115      /* Either selector != 0 ==> reload. */
   7.116 -    if ( unlikely(p->arch.user_regs.es |
   7.117 -                  n->arch.user_regs.es) )
   7.118 -        all_segs_okay &= loadsegment(es, n->arch.user_regs.es);
   7.119 +    if ( unlikely(pctxt->user_regs.es | nctxt->user_regs.es) )
   7.120 +        all_segs_okay &= loadsegment(es, nctxt->user_regs.es);
   7.121  
   7.122      /*
   7.123       * Either selector != 0 ==> reload.
   7.124       * Also reload to reset FS_BASE if it was non-zero.
   7.125       */
   7.126 -    if ( unlikely(p->arch.user_regs.fs |
   7.127 -                  p->arch.user_regs.fs_base |
   7.128 -                  n->arch.user_regs.fs) )
   7.129 +    if ( unlikely(pctxt->user_regs.fs |
   7.130 +                  pctxt->fs_base |
   7.131 +                  nctxt->user_regs.fs) )
   7.132      {
   7.133 -        all_segs_okay &= loadsegment(fs, n->arch.user_regs.fs);
   7.134 -        if ( p->arch.user_regs.fs ) /* != 0 selector kills fs_base */
   7.135 -            p->arch.user_regs.fs_base = 0;
   7.136 +        all_segs_okay &= loadsegment(fs, nctxt->user_regs.fs);
   7.137 +        if ( pctxt->user_regs.fs ) /* != 0 selector kills fs_base */
   7.138 +            pctxt->fs_base = 0;
   7.139      }
   7.140  
   7.141      /*
   7.142       * Either selector != 0 ==> reload.
   7.143       * Also reload to reset GS_BASE if it was non-zero.
   7.144       */
   7.145 -    if ( unlikely(p->arch.user_regs.gs |
   7.146 -                  p->arch.user_regs.gs_base_user |
   7.147 -                  n->arch.user_regs.gs) )
   7.148 +    if ( unlikely(pctxt->user_regs.gs |
   7.149 +                  pctxt->gs_base_user |
   7.150 +                  nctxt->user_regs.gs) )
   7.151      {
   7.152          /* Reset GS_BASE with user %gs? */
   7.153 -        if ( p->arch.user_regs.gs || !n->arch.user_regs.gs_base_user )
   7.154 -            all_segs_okay &= loadsegment(gs, n->arch.user_regs.gs);
   7.155 -        if ( p->arch.user_regs.gs ) /* != 0 selector kills gs_base_user */
   7.156 -            p->arch.user_regs.gs_base_user = 0;
   7.157 +        if ( pctxt->user_regs.gs || !nctxt->gs_base_user )
   7.158 +            all_segs_okay &= loadsegment(gs, nctxt->user_regs.gs);
   7.159 +        if ( pctxt->user_regs.gs ) /* != 0 selector kills gs_base_user */
   7.160 +            pctxt->gs_base_user = 0;
   7.161      }
   7.162  
   7.163      /* This can only be non-zero if selector is NULL. */
   7.164 -    if ( n->arch.user_regs.fs_base )
   7.165 +    if ( nctxt->fs_base )
   7.166          wrmsr(MSR_FS_BASE,
   7.167 -              n->arch.user_regs.fs_base,
   7.168 -              n->arch.user_regs.fs_base>>32);
   7.169 +              nctxt->fs_base,
   7.170 +              nctxt->fs_base>>32);
   7.171  
   7.172      /* Most kernels have non-zero GS base, so don't bother testing. */
   7.173      /* (This is also a serialising instruction, avoiding AMD erratum #88.) */
   7.174      wrmsr(MSR_SHADOW_GS_BASE,
   7.175 -          n->arch.user_regs.gs_base_kernel,
   7.176 -          n->arch.user_regs.gs_base_kernel>>32);
   7.177 +          nctxt->gs_base_kernel,
   7.178 +          nctxt->gs_base_kernel>>32);
   7.179  
   7.180      /* This can only be non-zero if selector is NULL. */
   7.181 -    if ( n->arch.user_regs.gs_base_user )
   7.182 +    if ( nctxt->gs_base_user )
   7.183          wrmsr(MSR_GS_BASE,
   7.184 -              n->arch.user_regs.gs_base_user,
   7.185 -              n->arch.user_regs.gs_base_user>>32);
   7.186 +              nctxt->gs_base_user,
   7.187 +              nctxt->gs_base_user>>32);
   7.188  
   7.189      /* If in kernel mode then switch the GS bases around. */
   7.190      if ( n->arch.flags & TF_kernel_mode )
   7.191 @@ -625,24 +602,24 @@ static void load_segments(struct exec_do
   7.192          unsigned long   *rsp =
   7.193              (n->arch.flags & TF_kernel_mode) ?
   7.194              (unsigned long *)regs->rsp : 
   7.195 -            (unsigned long *)n->arch.kernel_sp;
   7.196 +            (unsigned long *)nctxt->kernel_sp;
   7.197  
   7.198          if ( !(n->arch.flags & TF_kernel_mode) )
   7.199              toggle_guest_mode(n);
   7.200          else
   7.201              regs->cs &= ~3;
   7.202  
   7.203 -        if ( put_user(regs->ss,             rsp- 1) |
   7.204 -             put_user(regs->rsp,            rsp- 2) |
   7.205 -             put_user(regs->rflags,         rsp- 3) |
   7.206 -             put_user(regs->cs,             rsp- 4) |
   7.207 -             put_user(regs->rip,            rsp- 5) |
   7.208 -             put_user(n->arch.user_regs.gs, rsp- 6) |
   7.209 -             put_user(n->arch.user_regs.fs, rsp- 7) |
   7.210 -             put_user(n->arch.user_regs.es, rsp- 8) |
   7.211 -             put_user(n->arch.user_regs.ds, rsp- 9) |
   7.212 -             put_user(regs->r11,            rsp-10) |
   7.213 -             put_user(regs->rcx,            rsp-11) )
   7.214 +        if ( put_user(regs->ss,            rsp- 1) |
   7.215 +             put_user(regs->rsp,           rsp- 2) |
   7.216 +             put_user(regs->rflags,        rsp- 3) |
   7.217 +             put_user(regs->cs,            rsp- 4) |
   7.218 +             put_user(regs->rip,           rsp- 5) |
   7.219 +             put_user(nctxt->user_regs.gs, rsp- 6) |
   7.220 +             put_user(nctxt->user_regs.fs, rsp- 7) |
   7.221 +             put_user(nctxt->user_regs.es, rsp- 8) |
   7.222 +             put_user(nctxt->user_regs.ds, rsp- 9) |
   7.223 +             put_user(regs->r11,           rsp-10) |
   7.224 +             put_user(regs->rcx,           rsp-11) )
   7.225          {
   7.226              DPRINTK("Error while creating failsafe callback frame.\n");
   7.227              domain_crash();
   7.228 @@ -653,16 +630,17 @@ static void load_segments(struct exec_do
   7.229          regs->ss            = __GUEST_SS;
   7.230          regs->rsp           = (unsigned long)(rsp-11);
   7.231          regs->cs            = __GUEST_CS;
   7.232 -        regs->rip           = n->arch.failsafe_address;
   7.233 +        regs->rip           = nctxt->failsafe_callback_eip;
   7.234      }
   7.235  }
   7.236  
   7.237 -static void save_segments(struct exec_domain *p)
   7.238 +static void save_segments(struct exec_domain *ed)
   7.239  {
   7.240 -    __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_regs.ds) );
   7.241 -    __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_regs.es) );
   7.242 -    __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_regs.fs) );
   7.243 -    __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_regs.gs) );
   7.244 +    struct cpu_user_regs *regs = &ed->arch.guest_context.user_regs;
   7.245 +    __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (regs->ds) );
   7.246 +    __asm__ __volatile__ ( "movl %%es,%0" : "=m" (regs->es) );
   7.247 +    __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (regs->fs) );
   7.248 +    __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (regs->gs) );
   7.249  }
   7.250  
   7.251  static void clear_segments(void)
   7.252 @@ -695,7 +673,7 @@ long do_switch_to_user(void)
   7.253      regs->rsp    = stu.rsp;
   7.254      regs->ss     = stu.ss | 3; /* force guest privilege */
   7.255  
   7.256 -    if ( !(stu.flags & ECF_IN_SYSCALL) )
   7.257 +    if ( !(stu.flags & VGCF_IN_SYSCALL) )
   7.258      {
   7.259          regs->entry_vector = 0;
   7.260          regs->r11 = stu.r11;
   7.261 @@ -717,8 +695,8 @@ long do_switch_to_user(void)
   7.262  static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu)
   7.263  {
   7.264      struct tss_struct *tss = &init_tss[cpu];
   7.265 -    tss->esp1 = n->arch.kernel_sp;
   7.266 -    tss->ss1  = n->arch.kernel_ss;
   7.267 +    tss->esp1 = n->arch.guest_context.kernel_sp;
   7.268 +    tss->ss1  = n->arch.guest_context.kernel_ss;
   7.269  }
   7.270  
   7.271  #endif
   7.272 @@ -728,15 +706,15 @@ static inline void switch_kernel_stack(s
   7.273  
   7.274  static void __context_switch(void)
   7.275  {
   7.276 -    struct cpu_user_regs *stack_ec = get_cpu_user_regs();
   7.277 +    struct cpu_user_regs *stack_regs = get_cpu_user_regs();
   7.278      unsigned int         cpu = smp_processor_id();
   7.279      struct exec_domain  *p = percpu_ctxt[cpu].curr_ed;
   7.280      struct exec_domain  *n = current;
   7.281  
   7.282      if ( !is_idle_task(p->domain) )
   7.283      {
   7.284 -        memcpy(&p->arch.user_regs,
   7.285 -               stack_ec, 
   7.286 +        memcpy(&p->arch.guest_context.user_regs,
   7.287 +               stack_regs, 
   7.288                 CTXT_SWITCH_STACK_BYTES);
   7.289          unlazy_fpu(p);
   7.290          CLEAR_FAST_TRAP(&p->arch);
   7.291 @@ -745,20 +723,20 @@ static void __context_switch(void)
   7.292  
   7.293      if ( !is_idle_task(n->domain) )
   7.294      {
   7.295 -        memcpy(stack_ec,
   7.296 -               &n->arch.user_regs,
   7.297 +        memcpy(stack_regs,
   7.298 +               &n->arch.guest_context.user_regs,
   7.299                 CTXT_SWITCH_STACK_BYTES);
   7.300  
   7.301          /* Maybe switch the debug registers. */
   7.302 -        if ( unlikely(n->arch.debugreg[7]) )
   7.303 +        if ( unlikely(n->arch.guest_context.debugreg[7]) )
   7.304          {
   7.305 -            loaddebug(&n->arch, 0);
   7.306 -            loaddebug(&n->arch, 1);
   7.307 -            loaddebug(&n->arch, 2);
   7.308 -            loaddebug(&n->arch, 3);
   7.309 +            loaddebug(&n->arch.guest_context, 0);
   7.310 +            loaddebug(&n->arch.guest_context, 1);
   7.311 +            loaddebug(&n->arch.guest_context, 2);
   7.312 +            loaddebug(&n->arch.guest_context, 3);
   7.313              /* no 4 and 5 */
   7.314 -            loaddebug(&n->arch, 6);
   7.315 -            loaddebug(&n->arch, 7);
   7.316 +            loaddebug(&n->arch.guest_context, 6);
   7.317 +            loaddebug(&n->arch.guest_context, 7);
   7.318          }
   7.319  
   7.320          if ( !VMX_DOMAIN(n) )
     8.1 --- a/xen/arch/x86/domain_build.c	Thu Apr 28 15:12:37 2005 +0000
     8.2 +++ b/xen/arch/x86/domain_build.c	Thu Apr 28 18:04:45 2005 +0000
     8.3 @@ -222,14 +222,15 @@ int construct_dom0(struct domain *d,
     8.4       * We're basically forcing default RPLs to 1, so that our "what privilege
     8.5       * level are we returning to?" logic works.
     8.6       */
     8.7 -    ed->arch.failsafe_selector = FLAT_KERNEL_CS;
     8.8 -    ed->arch.event_selector    = FLAT_KERNEL_CS;
     8.9 -    ed->arch.kernel_ss = FLAT_KERNEL_SS;
    8.10 +    ed->arch.guest_context.kernel_ss = FLAT_KERNEL_SS;
    8.11      for ( i = 0; i < 256; i++ ) 
    8.12 -        ed->arch.traps[i].cs = FLAT_KERNEL_CS;
    8.13 +        ed->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS;
    8.14  
    8.15  #if defined(__i386__)
    8.16  
    8.17 +    ed->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS;
    8.18 +    ed->arch.guest_context.event_callback_cs    = FLAT_KERNEL_CS;
    8.19 +
    8.20      /*
    8.21       * Protect the lowest 1GB of memory. We use a temporary mapping there
    8.22       * from which we copy the kernel and ramdisk images.
     9.1 --- a/xen/arch/x86/i387.c	Thu Apr 28 15:12:37 2005 +0000
     9.2 +++ b/xen/arch/x86/i387.c	Thu Apr 28 18:04:45 2005 +0000
     9.3 @@ -34,11 +34,11 @@ void save_init_fpu(struct exec_domain *t
     9.4      if ( cpu_has_fxsr )
     9.5          __asm__ __volatile__ (
     9.6              "fxsave %0 ; fnclex"
     9.7 -            : "=m" (tsk->arch.i387) );
     9.8 +            : "=m" (tsk->arch.guest_context.fpu_ctxt) );
     9.9      else
    9.10          __asm__ __volatile__ (
    9.11              "fnsave %0 ; fwait"
    9.12 -            : "=m" (tsk->arch.i387) );
    9.13 +            : "=m" (tsk->arch.guest_context.fpu_ctxt) );
    9.14  
    9.15      clear_bit(EDF_USEDFPU, &tsk->ed_flags);
    9.16      stts();
    9.17 @@ -49,11 +49,11 @@ void restore_fpu(struct exec_domain *tsk
    9.18      if ( cpu_has_fxsr )
    9.19          __asm__ __volatile__ (
    9.20              "fxrstor %0"
    9.21 -            : : "m" (tsk->arch.i387) );
    9.22 +            : : "m" (tsk->arch.guest_context.fpu_ctxt) );
    9.23      else
    9.24          __asm__ __volatile__ (
    9.25              "frstor %0"
    9.26 -            : : "m" (tsk->arch.i387) );
    9.27 +            : : "m" (tsk->arch.guest_context.fpu_ctxt) );
    9.28  }
    9.29  
    9.30  /*
    10.1 --- a/xen/arch/x86/mm.c	Thu Apr 28 15:12:37 2005 +0000
    10.2 +++ b/xen/arch/x86/mm.c	Thu Apr 28 18:04:45 2005 +0000
    10.3 @@ -285,7 +285,7 @@ int map_ldt_shadow_page(unsigned int off
    10.4      struct domain *d = ed->domain;
    10.5      unsigned long gpfn, gmfn;
    10.6      l1_pgentry_t l1e, nl1e;
    10.7 -    unsigned gva = ed->arch.ldt_base + (off << PAGE_SHIFT);
    10.8 +    unsigned gva = ed->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
    10.9      int res;
   10.10  
   10.11  #if defined(__x86_64__)
   10.12 @@ -1639,12 +1639,12 @@ int do_mmuext_op(
   10.13                  okay = 0;
   10.14                  MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
   10.15              }
   10.16 -            else if ( (ed->arch.ldt_ents != ents) || 
   10.17 -                      (ed->arch.ldt_base != ptr) )
   10.18 +            else if ( (ed->arch.guest_context.ldt_ents != ents) || 
   10.19 +                      (ed->arch.guest_context.ldt_base != ptr) )
   10.20              {
   10.21                  invalidate_shadow_ldt(ed);
   10.22 -                ed->arch.ldt_base = ptr;
   10.23 -                ed->arch.ldt_ents = ents;
   10.24 +                ed->arch.guest_context.ldt_base = ptr;
   10.25 +                ed->arch.guest_context.ldt_ents = ents;
   10.26                  load_LDT(ed);
   10.27                  percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
   10.28                  if ( ents != 0 )
    11.1 --- a/xen/arch/x86/traps.c	Thu Apr 28 15:12:37 2005 +0000
    11.2 +++ b/xen/arch/x86/traps.c	Thu Apr 28 18:04:45 2005 +0000
    11.3 @@ -150,11 +150,12 @@ static inline int do_trap(int trapnr, ch
    11.4          goto xen_fault;
    11.5  
    11.6  #ifndef NDEBUG
    11.7 -    if ( (ed->arch.traps[trapnr].address == 0) && (ed->domain->id == 0) )
    11.8 +    if ( (ed->arch.guest_context.trap_ctxt[trapnr].address == 0) &&
    11.9 +         (ed->domain->id == 0) )
   11.10          goto xen_fault;
   11.11  #endif
   11.12  
   11.13 -    ti = current->arch.traps + trapnr;
   11.14 +    ti = &current->arch.guest_context.trap_ctxt[trapnr];
   11.15      tb->flags = TBF_EXCEPTION;
   11.16      tb->cs    = ti->cs;
   11.17      tb->eip   = ti->address;
   11.18 @@ -224,7 +225,7 @@ asmlinkage int do_int3(struct cpu_user_r
   11.19          panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id());
   11.20      } 
   11.21  
   11.22 -    ti = current->arch.traps + 3;
   11.23 +    ti = &current->arch.guest_context.trap_ctxt[TRAP_int3];
   11.24      tb->flags = TBF_EXCEPTION;
   11.25      tb->cs    = ti->cs;
   11.26      tb->eip   = ti->address;
   11.27 @@ -245,7 +246,7 @@ void propagate_page_fault(unsigned long 
   11.28      struct exec_domain *ed = current;
   11.29      struct trap_bounce *tb = &ed->arch.trap_bounce;
   11.30  
   11.31 -    ti = ed->arch.traps + 14;
   11.32 +    ti = &ed->arch.guest_context.trap_ctxt[TRAP_page_fault];
   11.33      tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
   11.34      tb->cr2        = addr;
   11.35      tb->error_code = error_code;
   11.36 @@ -303,7 +304,8 @@ asmlinkage int do_page_fault(struct cpu_
   11.37      }
   11.38  
   11.39      if ( unlikely(addr >= LDT_VIRT_START(ed)) && 
   11.40 -         (addr < (LDT_VIRT_START(ed) + (ed->arch.ldt_ents*LDT_ENTRY_SIZE))) )
   11.41 +         (addr < (LDT_VIRT_START(ed) + 
   11.42 +                  (ed->arch.guest_context.ldt_ents*LDT_ENTRY_SIZE))) )
   11.43      {
   11.44          /*
   11.45           * Copy a mapping from the guest's LDT, if it is valid. Otherwise we
   11.46 @@ -312,7 +314,7 @@ asmlinkage int do_page_fault(struct cpu_
   11.47          extern int map_ldt_shadow_page(unsigned int);
   11.48          LOCK_BIGLOCK(d);
   11.49          off  = addr - LDT_VIRT_START(ed);
   11.50 -        addr = ed->arch.ldt_base + off;
   11.51 +        addr = ed->arch.guest_context.ldt_base + off;
   11.52          ret = map_ldt_shadow_page(off >> PAGE_SHIFT);
   11.53          UNLOCK_BIGLOCK(d);
   11.54          if ( likely(ret) )
   11.55 @@ -323,7 +325,8 @@ asmlinkage int do_page_fault(struct cpu_
   11.56          goto xen_fault;
   11.57  
   11.58  #ifndef NDEBUG
   11.59 -    if ( (ed->arch.traps[TRAP_page_fault].address == 0) && (d->id == 0) )
   11.60 +    if ( (ed->arch.guest_context.trap_ctxt[TRAP_page_fault].address == 0) &&
   11.61 +         (d->id == 0) )
   11.62          goto xen_fault;
   11.63  #endif
   11.64  
   11.65 @@ -781,7 +784,7 @@ asmlinkage int do_general_protection(str
   11.66      if ( (regs->error_code & 3) == 2 )
   11.67      {
   11.68          /* This fault must be due to <INT n> instruction. */
   11.69 -        ti = current->arch.traps + (regs->error_code>>3);
   11.70 +        ti = &current->arch.guest_context.trap_ctxt[regs->error_code>>3];
   11.71          if ( PERMIT_SOFTINT(TI_GET_DPL(ti), ed, regs) )
   11.72          {
   11.73              tb->flags = TBF_EXCEPTION;
   11.74 @@ -803,13 +806,13 @@ asmlinkage int do_general_protection(str
   11.75  #endif
   11.76  
   11.77  #ifndef NDEBUG
   11.78 -    if ( (ed->arch.traps[TRAP_gp_fault].address == 0) &&
   11.79 +    if ( (ed->arch.guest_context.trap_ctxt[TRAP_gp_fault].address == 0) &&
   11.80           (ed->domain->id == 0) )
   11.81          goto gp_in_kernel;
   11.82  #endif
   11.83  
   11.84      /* Pass on GPF as is. */
   11.85 -    ti = current->arch.traps + 13;
   11.86 +    ti = &current->arch.guest_context.trap_ctxt[TRAP_gp_fault];
   11.87      tb->flags      = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
   11.88      tb->error_code = regs->error_code;
   11.89   finish_propagation:
   11.90 @@ -916,21 +919,20 @@ asmlinkage int math_state_restore(struct
   11.91      /* Prevent recursion. */
   11.92      clts();
   11.93  
   11.94 -    if ( !test_bit(EDF_USEDFPU, &current->ed_flags) )
   11.95 +    if ( !test_and_set_bit(EDF_USEDFPU, &current->ed_flags) )
   11.96      {
   11.97          if ( test_bit(EDF_DONEFPUINIT, &current->ed_flags) )
   11.98              restore_fpu(current);
   11.99          else
  11.100              init_fpu();
  11.101 -        set_bit(EDF_USEDFPU, &current->ed_flags); /* so we fnsave on switch_to() */
  11.102      }
  11.103  
  11.104      if ( test_and_clear_bit(EDF_GUEST_STTS, &current->ed_flags) )
  11.105      {
  11.106          struct trap_bounce *tb = &current->arch.trap_bounce;
  11.107 -        tb->flags      = TBF_EXCEPTION;
  11.108 -        tb->cs         = current->arch.traps[7].cs;
  11.109 -        tb->eip        = current->arch.traps[7].address;
  11.110 +        tb->flags = TBF_EXCEPTION;
  11.111 +        tb->cs    = current->arch.guest_context.trap_ctxt[7].cs;
  11.112 +        tb->eip   = current->arch.guest_context.trap_ctxt[7].address;
  11.113      }
  11.114  
  11.115      return EXCRET_fault_fixed;
  11.116 @@ -946,7 +948,7 @@ asmlinkage int do_debug(struct cpu_user_
  11.117  
  11.118      /* Mask out spurious debug traps due to lazy DR7 setting */
  11.119      if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
  11.120 -         (ed->arch.debugreg[7] == 0) )
  11.121 +         (ed->arch.guest_context.debugreg[7] == 0) )
  11.122      {
  11.123          __asm__("mov %0,%%db7" : : "r" (0UL));
  11.124          goto out;
  11.125 @@ -968,11 +970,11 @@ asmlinkage int do_debug(struct cpu_user_
  11.126      } 
  11.127  
  11.128      /* Save debug status register where guest OS can peek at it */
  11.129 -    ed->arch.debugreg[6] = condition;
  11.130 +    ed->arch.guest_context.debugreg[6] = condition;
  11.131  
  11.132      tb->flags = TBF_EXCEPTION;
  11.133 -    tb->cs    = ed->arch.traps[1].cs;
  11.134 -    tb->eip   = ed->arch.traps[1].address;
  11.135 +    tb->cs    = ed->arch.guest_context.trap_ctxt[TRAP_debug].cs;
  11.136 +    tb->eip   = ed->arch.guest_context.trap_ctxt[TRAP_debug].address;
  11.137  
  11.138   out:
  11.139      return EXCRET_not_a_fault;
  11.140 @@ -1059,7 +1061,7 @@ void __init trap_init(void)
  11.141  long do_set_trap_table(trap_info_t *traps)
  11.142  {
  11.143      trap_info_t cur;
  11.144 -    trap_info_t *dst = current->arch.traps;
  11.145 +    trap_info_t *dst = current->arch.guest_context.trap_ctxt;
  11.146      long rc = 0;
  11.147  
  11.148      LOCK_BIGLOCK(current->domain);
  11.149 @@ -1163,7 +1165,7 @@ long set_debugreg(struct exec_domain *p,
  11.150          return -EINVAL;
  11.151      }
  11.152  
  11.153 -    p->arch.debugreg[reg] = value;
  11.154 +    p->arch.guest_context.debugreg[reg] = value;
  11.155      return 0;
  11.156  }
  11.157  
  11.158 @@ -1175,7 +1177,7 @@ long do_set_debugreg(int reg, unsigned l
  11.159  unsigned long do_get_debugreg(int reg)
  11.160  {
  11.161      if ( (reg < 0) || (reg > 7) ) return -EINVAL;
  11.162 -    return current->arch.debugreg[reg];
  11.163 +    return current->arch.guest_context.debugreg[reg];
  11.164  }
  11.165  
  11.166  /*
    12.1 --- a/xen/arch/x86/vmx.c	Thu Apr 28 15:12:37 2005 +0000
    12.2 +++ b/xen/arch/x86/vmx.c	Thu Apr 28 18:04:45 2005 +0000
    12.3 @@ -250,18 +250,18 @@ static void vmx_dr_access (unsigned long
    12.4      case TYPE_MOV_TO_DR: 
    12.5          /* don't need to check the range */
    12.6          if (reg != REG_ESP)
    12.7 -            ed->arch.debugreg[reg] = *reg_p; 
    12.8 +            ed->arch.guest_context.debugreg[reg] = *reg_p; 
    12.9          else {
   12.10              unsigned long value;
   12.11              __vmread(GUEST_ESP, &value);
   12.12 -            ed->arch.debugreg[reg] = value;
   12.13 +            ed->arch.guest_context.debugreg[reg] = value;
   12.14          }
   12.15          break;
   12.16      case TYPE_MOV_FROM_DR:
   12.17          if (reg != REG_ESP)
   12.18 -            *reg_p = ed->arch.debugreg[reg];
   12.19 +            *reg_p = ed->arch.guest_context.debugreg[reg];
   12.20          else {
   12.21 -            __vmwrite(GUEST_ESP, ed->arch.debugreg[reg]);
   12.22 +            __vmwrite(GUEST_ESP, ed->arch.guest_context.debugreg[reg]);
   12.23          }
   12.24          break;
   12.25      }
    13.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Thu Apr 28 15:12:37 2005 +0000
    13.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Thu Apr 28 18:04:45 2005 +0000
    13.3 @@ -48,14 +48,20 @@ void __dummy__(void)
    13.4  
    13.5      OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    13.6      OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
    13.7 -    OFFSET(EDOMAIN_event_sel, struct exec_domain, arch.event_selector);
    13.8 -    OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address);
    13.9 -    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, arch.failsafe_selector);
   13.10 -    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address);
   13.11      OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
   13.12      OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
   13.13 -    OFFSET(EDOMAIN_kernel_ss, struct exec_domain, arch.kernel_ss);
   13.14 -    OFFSET(EDOMAIN_kernel_sp, struct exec_domain, arch.kernel_sp);
   13.15 +    OFFSET(EDOMAIN_event_sel, struct exec_domain,
   13.16 +           arch.guest_context.event_callback_cs);
   13.17 +    OFFSET(EDOMAIN_event_addr, struct exec_domain, 
   13.18 +           arch.guest_context.event_callback_eip);
   13.19 +    OFFSET(EDOMAIN_failsafe_sel, struct exec_domain,
   13.20 +           arch.guest_context.failsafe_callback_cs);
   13.21 +    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain,
   13.22 +           arch.guest_context.failsafe_callback_eip);
   13.23 +    OFFSET(EDOMAIN_kernel_ss, struct exec_domain,
   13.24 +           arch.guest_context.kernel_ss);
   13.25 +    OFFSET(EDOMAIN_kernel_sp, struct exec_domain,
   13.26 +           arch.guest_context.kernel_sp);
   13.27      BLANK();
   13.28  
   13.29      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
    14.1 --- a/xen/arch/x86/x86_32/mm.c	Thu Apr 28 15:12:37 2005 +0000
    14.2 +++ b/xen/arch/x86/x86_32/mm.c	Thu Apr 28 18:04:45 2005 +0000
    14.3 @@ -188,8 +188,8 @@ long do_stack_switch(unsigned long ss, u
    14.4      if ( (ss & 3) != 1 )
    14.5          return -EPERM;
    14.6  
    14.7 -    current->arch.kernel_ss = ss;
    14.8 -    current->arch.kernel_sp = esp;
    14.9 +    current->arch.guest_context.kernel_ss = ss;
   14.10 +    current->arch.guest_context.kernel_sp = esp;
   14.11      t->ss1  = ss;
   14.12      t->esp1 = esp;
   14.13  
    15.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Thu Apr 28 15:12:37 2005 +0000
    15.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Thu Apr 28 18:04:45 2005 +0000
    15.3 @@ -115,7 +115,7 @@ int get_baselimit(u16 seg, unsigned long
    15.4      if ( ldt )
    15.5      {
    15.6          table = (unsigned long *)LDT_VIRT_START(d);
    15.7 -        if ( idx >= d->arch.ldt_ents )
    15.8 +        if ( idx >= d->arch.guest_context.ldt_ents )
    15.9              goto fail;
   15.10      }
   15.11      else /* gdt */
   15.12 @@ -181,7 +181,7 @@ int fixup_seg(u16 seg, unsigned long off
   15.13      if ( ldt )
   15.14      {
   15.15          table = (unsigned long *)LDT_VIRT_START(d);
   15.16 -        if ( idx >= d->arch.ldt_ents )
   15.17 +        if ( idx >= d->arch.guest_context.ldt_ents )
   15.18          {
   15.19              DPRINTK("Segment %04x out of LDT range (%ld)\n",
   15.20                      seg, d->arch.ldt_ents);
   15.21 @@ -449,7 +449,7 @@ int gpf_emulate_4gb(struct cpu_user_regs
   15.22      /* If requested, give a callback on otherwise unused vector 15. */
   15.23      if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) )
   15.24      {
   15.25 -        ti  = &d->arch.traps[15];
   15.26 +        ti  = &d->arch.guest_context.trap_ctxt[15];
   15.27          tb  = &d->arch.trap_bounce;
   15.28          tb->flags      = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
   15.29          tb->error_code = pb - eip;
    16.1 --- a/xen/arch/x86/x86_32/traps.c	Thu Apr 28 15:12:37 2005 +0000
    16.2 +++ b/xen/arch/x86/x86_32/traps.c	Thu Apr 28 18:04:45 2005 +0000
    16.3 @@ -281,7 +281,7 @@ long set_fast_trap(struct exec_domain *p
    16.4      if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 
    16.5          return -1;
    16.6  
    16.7 -    ti = p->arch.traps + idx;
    16.8 +    ti = &p->arch.guest_context.trap_ctxt[idx];
    16.9  
   16.10      /*
   16.11       * We can't virtualise interrupt gates, as there's no way to get
   16.12 @@ -293,7 +293,7 @@ long set_fast_trap(struct exec_domain *p
   16.13      if ( p == current )
   16.14          CLEAR_FAST_TRAP(&p->arch);
   16.15  
   16.16 -    p->arch.fast_trap_idx    = idx;
   16.17 +    p->arch.guest_context.fast_trap_idx = idx;
   16.18      p->arch.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
   16.19      p->arch.fast_trap_desc.b = 
   16.20          (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13;
   16.21 @@ -320,10 +320,10 @@ long do_set_callbacks(unsigned long even
   16.22      if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
   16.23          return -EPERM;
   16.24  
   16.25 -    d->arch.event_selector    = event_selector;
   16.26 -    d->arch.event_address     = event_address;
   16.27 -    d->arch.failsafe_selector = failsafe_selector;
   16.28 -    d->arch.failsafe_address  = failsafe_address;
   16.29 +    d->arch.guest_context.event_callback_cs     = event_selector;
   16.30 +    d->arch.guest_context.event_callback_eip    = event_address;
   16.31 +    d->arch.guest_context.failsafe_callback_cs  = failsafe_selector;
   16.32 +    d->arch.guest_context.failsafe_callback_eip = failsafe_address;
   16.33  
   16.34      return 0;
   16.35  }
    17.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Thu Apr 28 15:12:37 2005 +0000
    17.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Thu Apr 28 18:04:45 2005 +0000
    17.3 @@ -52,12 +52,16 @@ void __dummy__(void)
    17.4  
    17.5      OFFSET(EDOMAIN_processor, struct exec_domain, processor);
    17.6      OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
    17.7 -    OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address);
    17.8 -    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address);
    17.9 -    OFFSET(EDOMAIN_syscall_addr, struct exec_domain, arch.syscall_address);
   17.10      OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
   17.11      OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
   17.12 -    OFFSET(EDOMAIN_kernel_sp, struct exec_domain, arch.kernel_sp);
   17.13 +    OFFSET(EDOMAIN_event_addr, struct exec_domain,
   17.14 +           arch.guest_context.event_callback_eip);
   17.15 +    OFFSET(EDOMAIN_failsafe_addr, struct exec_domain,
   17.16 +           arch.guest_context.failsafe_callback_eip);
   17.17 +    OFFSET(EDOMAIN_syscall_addr, struct exec_domain,
   17.18 +           arch.guest_context.syscall_callback_eip);
   17.19 +    OFFSET(EDOMAIN_kernel_sp, struct exec_domain,
   17.20 +           arch.guest_context.kernel_sp);
   17.21      BLANK();
   17.22  
   17.23      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
    18.1 --- a/xen/arch/x86/x86_64/entry.S	Thu Apr 28 15:12:37 2005 +0000
    18.2 +++ b/xen/arch/x86/x86_64/entry.S	Thu Apr 28 18:04:45 2005 +0000
    18.3 @@ -161,9 +161,6 @@ test_all_events:
    18.4   *
    18.5   * We also need the room, especially because orig_eax field is used 
    18.6   * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following:
    18.7 - *   (13) u64 gs_base_user;                 
    18.8 - *   (12) u64 gs_base_kernel;                 
    18.9 - *   (11) u64 fs_base;                 
   18.10   *   (10) u64 gs;                 
   18.11   *   (9)  u64 fs;
   18.12   *   (8)  u64 ds;
   18.13 @@ -176,9 +173,6 @@ test_all_events:
   18.14   *   (2)  u64 rip;
   18.15   * (2/1)  u32 entry_vector;
   18.16   * (1/1)  u32 error_code;
   18.17 - * However, get_stack_bottom() actually returns 64 bytes before the real
   18.18 - * bottom of the stack to allow space for:
   18.19 - * domain pointer, DS, ES, FS, GS. Therefore, we effectively skip 6 registers.
   18.20   */
   18.21  #define VMX_MONITOR_RFLAGS	0x202 /* IF on */
   18.22  #define NR_SKIPPED_REGS	6	/* See the above explanation */
    19.1 --- a/xen/arch/x86/x86_64/mm.c	Thu Apr 28 15:12:37 2005 +0000
    19.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu Apr 28 18:04:45 2005 +0000
    19.3 @@ -240,8 +240,8 @@ long do_stack_switch(unsigned long ss, u
    19.4  {
    19.5      if ( (ss & 3) != 3 )
    19.6          return -EPERM;
    19.7 -    current->arch.kernel_ss = ss;
    19.8 -    current->arch.kernel_sp = esp;
    19.9 +    current->arch.guest_context.kernel_ss = ss;
   19.10 +    current->arch.guest_context.kernel_sp = esp;
   19.11      return 0;
   19.12  }
   19.13  
   19.14 @@ -253,21 +253,24 @@ long do_set_segment_base(unsigned int wh
   19.15      switch ( which )
   19.16      {
   19.17      case SEGBASE_FS:
   19.18 -        ed->arch.user_regs.fs_base = base;
   19.19          if ( wrmsr_user(MSR_FS_BASE, base, base>>32) )
   19.20              ret = -EFAULT;
   19.21 +        else
   19.22 +            ed->arch.guest_context.fs_base = base;
   19.23          break;
   19.24  
   19.25      case SEGBASE_GS_USER:
   19.26 -        ed->arch.user_regs.gs_base_user = base;
   19.27          if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) )
   19.28              ret = -EFAULT;
   19.29 +        else
   19.30 +            ed->arch.guest_context.gs_base_user = base;
   19.31          break;
   19.32  
   19.33      case SEGBASE_GS_KERNEL:
   19.34 -        ed->arch.user_regs.gs_base_kernel = base;
   19.35          if ( wrmsr_user(MSR_GS_BASE, base, base>>32) )
   19.36              ret = -EFAULT;
   19.37 +        else
   19.38 +            ed->arch.guest_context.gs_base_kernel = base;
   19.39          break;
   19.40  
   19.41      case SEGBASE_GS_USER_SEL:
    20.1 --- a/xen/arch/x86/x86_64/traps.c	Thu Apr 28 15:12:37 2005 +0000
    20.2 +++ b/xen/arch/x86/x86_64/traps.c	Thu Apr 28 18:04:45 2005 +0000
    20.3 @@ -255,9 +255,9 @@ long do_set_callbacks(unsigned long even
    20.4  {
    20.5      struct exec_domain *d = current;
    20.6  
    20.7 -    d->arch.event_address    = event_address;
    20.8 -    d->arch.failsafe_address = failsafe_address;
    20.9 -    d->arch.syscall_address  = syscall_address;
   20.10 +    d->arch.guest_context.event_callback_eip    = event_address;
   20.11 +    d->arch.guest_context.failsafe_callback_eip = failsafe_address;
   20.12 +    d->arch.guest_context.syscall_callback_eip  = syscall_address;
   20.13  
   20.14      return 0;
   20.15  }
    21.1 --- a/xen/include/asm-x86/domain.h	Thu Apr 28 15:12:37 2005 +0000
    21.2 +++ b/xen/include/asm-x86/domain.h	Thu Apr 28 18:04:45 2005 +0000
    21.3 @@ -66,38 +66,12 @@ struct arch_domain
    21.4  
    21.5  struct arch_exec_domain
    21.6  {
    21.7 -    unsigned long      kernel_sp;
    21.8 -    unsigned long      kernel_ss;
    21.9 +    struct vcpu_guest_context guest_context;
   21.10  
   21.11      unsigned long      flags; /* TF_ */
   21.12  
   21.13 -    /* Hardware debugging registers */
   21.14 -    unsigned long      debugreg[8];  /* %%db0-7 debug registers */
   21.15 -
   21.16 -    /* floating point info */
   21.17 -    struct i387_state  i387;
   21.18 -
   21.19 -    /* general user-visible register state */
   21.20 -    struct cpu_user_regs user_regs;
   21.21 -
   21.22      void (*schedule_tail) (struct exec_domain *);
   21.23  
   21.24 -    /*
   21.25 -     * Return vectors pushed to us by guest OS.
   21.26 -     * The stack frame for events is exactly that of an x86 hardware interrupt.
   21.27 -     * The stack frame for a failsafe callback is augmented with saved values
   21.28 -     * for segment registers %ds, %es, %fs and %gs:
   21.29 -     *  %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
   21.30 -     */
   21.31 -
   21.32 -    unsigned long event_selector;    /* entry CS  (x86/32 only) */
   21.33 -    unsigned long event_address;     /* entry EIP */
   21.34 -
   21.35 -    unsigned long failsafe_selector; /* entry CS  (x86/32 only) */
   21.36 -    unsigned long failsafe_address;  /* entry EIP */
   21.37 -
   21.38 -    unsigned long syscall_address;   /* entry EIP (x86/64 only) */
   21.39 -
   21.40      /* Bounce information for propagating an exception to guest OS. */
   21.41      struct trap_bounce trap_bounce;
   21.42  
   21.43 @@ -108,10 +82,8 @@ struct arch_exec_domain
   21.44  
   21.45      /* Trap info. */
   21.46  #ifdef ARCH_HAS_FAST_TRAP
   21.47 -    int                fast_trap_idx;
   21.48      struct desc_struct fast_trap_desc;
   21.49  #endif
   21.50 -    trap_info_t        traps[256];
   21.51  
   21.52      /* Virtual Machine Extensions */
   21.53      struct arch_vmx_struct arch_vmx;
   21.54 @@ -143,7 +115,7 @@ struct arch_exec_domain
   21.55      unsigned long guest_cr2;
   21.56  
   21.57      /* Current LDT details. */
   21.58 -    unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
   21.59 +    unsigned long shadow_ldt_mapcnt;
   21.60      /* Next entry is passed to LGDT on domain switch. */
   21.61      char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
   21.62  } __cacheline_aligned;
    22.1 --- a/xen/include/asm-x86/ldt.h	Thu Apr 28 15:12:37 2005 +0000
    22.2 +++ b/xen/include/asm-x86/ldt.h	Thu Apr 28 18:04:45 2005 +0000
    22.3 @@ -10,7 +10,7 @@ static inline void load_LDT(struct exec_
    22.4      struct desc_struct *desc;
    22.5      unsigned long ents;
    22.6  
    22.7 -    if ( (ents = ed->arch.ldt_ents) == 0 )
    22.8 +    if ( (ents = ed->arch.guest_context.ldt_ents) == 0 )
    22.9      {
   22.10          __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
   22.11      }
    23.1 --- a/xen/include/asm-x86/processor.h	Thu Apr 28 15:12:37 2005 +0000
    23.2 +++ b/xen/include/asm-x86/processor.h	Thu Apr 28 18:04:45 2005 +0000
    23.3 @@ -110,7 +110,7 @@
    23.4  #define TRAP_deferred_nmi     31
    23.5  
    23.6  /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
    23.7 -/* NB. Same as ECF_IN_SYSCALL. No bits in common with any other TRAP_* defn. */
    23.8 +/* NB. Same as VGCF_IN_SYSCALL. No bits in common with any other TRAP_ defn. */
    23.9  #define TRAP_syscall         256
   23.10  
   23.11  /*
   23.12 @@ -332,10 +332,6 @@ static inline void clear_in_cr4 (unsigne
   23.13  #define IOBMP_BYTES             8192
   23.14  #define IOBMP_INVALID_OFFSET    0x8000
   23.15  
   23.16 -struct i387_state {
   23.17 -    u8 state[512]; /* big enough for FXSAVE */
   23.18 -} __attribute__ ((aligned (16)));
   23.19 -
   23.20  struct tss_struct {
   23.21      unsigned short	back_link,__blh;
   23.22  #ifdef __x86_64__
   23.23 @@ -384,16 +380,18 @@ extern struct tss_struct init_tss[NR_CPU
   23.24  #ifdef ARCH_HAS_FAST_TRAP
   23.25  
   23.26  #define SET_DEFAULT_FAST_TRAP(_p) \
   23.27 -    (_p)->fast_trap_idx = 0x20;   \
   23.28 +    (_p)->guest_context.fast_trap_idx = 0x20;   \
   23.29      (_p)->fast_trap_desc.a = 0;   \
   23.30      (_p)->fast_trap_desc.b = 0;
   23.31  
   23.32  #define CLEAR_FAST_TRAP(_p) \
   23.33 -    (memset(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
   23.34 -     0, 8))
   23.35 +    (memset(idt_tables[smp_processor_id()] + \
   23.36 +            (_p)->guest_context.fast_trap_idx, \
   23.37 +            0, 8))
   23.38  
   23.39  #define SET_FAST_TRAP(_p)   \
   23.40 -    (memcpy(idt_tables[smp_processor_id()] + (_p)->fast_trap_idx, \
   23.41 +    (memcpy(idt_tables[smp_processor_id()] + \
   23.42 +            (_p)->guest_context.fast_trap_idx, \
   23.43              &((_p)->fast_trap_desc), 8))
   23.44  
   23.45  long set_fast_trap(struct exec_domain *p, int idx);
    24.1 --- a/xen/include/asm-x86/x86_64/current.h	Thu Apr 28 15:12:37 2005 +0000
    24.2 +++ b/xen/include/asm-x86/x86_64/current.h	Thu Apr 28 18:04:45 2005 +0000
    24.3 @@ -34,15 +34,15 @@ static inline struct cpu_user_regs *get_
    24.4  
    24.5  /*
    24.6   * Get the bottom-of-stack, as stored in the per-CPU TSS. This is actually
    24.7 - * 64 bytes before the real bottom of the stack to allow space for:
    24.8 - *  domain pointer, DS, ES, FS, GS, FS_BASE, GS_BASE_OS, GS_BASE_APP
    24.9 + * 40 bytes before the real bottom of the stack to allow space for:
   24.10 + *  domain pointer, DS, ES, FS, GS
   24.11   */
   24.12  static inline unsigned long get_stack_bottom(void)
   24.13  {
   24.14      unsigned long p;
   24.15      __asm__( "andq %%rsp,%0; addq %2,%0"
   24.16  	    : "=r" (p)
   24.17 -	    : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-64) );
   24.18 +	    : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-40) );
   24.19      return p;
   24.20  }
   24.21  
    25.1 --- a/xen/include/public/arch-x86_32.h	Thu Apr 28 15:12:37 2005 +0000
    25.2 +++ b/xen/include/public/arch-x86_32.h	Thu Apr 28 18:04:45 2005 +0000
    25.3 @@ -121,21 +121,22 @@ typedef struct cpu_user_regs {
    25.4  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
    25.5  
    25.6  /*
    25.7 - * The following is all CPU context. Note that the i387_ctxt block is filled 
    25.8 + * The following is all CPU context. Note that the fpu_ctxt block is filled 
    25.9   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
   25.10   */
   25.11  typedef struct vcpu_guest_context {
   25.12 -#define ECF_I387_VALID (1<<0)
   25.13 -#define ECF_VMX_GUEST  (1<<1)
   25.14 -#define ECF_IN_KERNEL  (1<<2)
   25.15 -    unsigned long flags;
   25.16 +#define VGCF_I387_VALID (1<<0)
   25.17 +#define VGCF_VMX_GUEST  (1<<1)
   25.18 +#define VGCF_IN_KERNEL  (1<<2)
   25.19 +    unsigned long flags;                    /* VGCF_* flags                 */
   25.20      cpu_user_regs_t user_regs;              /* User-level CPU registers     */
   25.21 -    char          fpu_ctxt[256];            /* User-level FPU registers     */
   25.22 +    struct { char x[512]; } fpu_ctxt        /* User-level FPU registers     */
   25.23 +    __attribute__((__aligned__(16)));       /* (needs 16-byte alignment)    */
   25.24      trap_info_t   trap_ctxt[256];           /* Virtual IDT                  */
   25.25      unsigned int  fast_trap_idx;            /* "Fast trap" vector offset    */
   25.26      unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
   25.27      unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
   25.28 -    unsigned long kernel_ss, kernel_esp;  /* Virtual TSS (only SS1/ESP1)  */
   25.29 +    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
   25.30      unsigned long pt_base;                  /* CR3 (pagetable base)         */
   25.31      unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
   25.32      unsigned long event_callback_cs;        /* CS:EIP of event callback     */
   25.33 @@ -143,15 +144,15 @@ typedef struct vcpu_guest_context {
   25.34      unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
   25.35      unsigned long failsafe_callback_eip;
   25.36      unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
   25.37 -} PACKED vcpu_guest_context_t;
   25.38 +} vcpu_guest_context_t;
   25.39  
   25.40  typedef struct {
   25.41      /* MFN of a table of MFNs that make up p2m table */
   25.42      u64 pfn_to_mfn_frame_list;
   25.43 -} PACKED arch_shared_info_t;
   25.44 +} arch_shared_info_t;
   25.45  
   25.46  typedef struct {
   25.47 -} PACKED arch_vcpu_info_t;
   25.48 +} arch_vcpu_info_t;
   25.49  
   25.50  #define ARCH_HAS_FAST_TRAP
   25.51  
    26.1 --- a/xen/include/public/arch-x86_64.h	Thu Apr 28 15:12:37 2005 +0000
    26.2 +++ b/xen/include/public/arch-x86_64.h	Thu Apr 28 18:04:45 2005 +0000
    26.3 @@ -101,7 +101,7 @@
    26.4   * int HYPERVISOR_switch_to_user(void)
    26.5   * All arguments are on the kernel stack, in the following format.
    26.6   * Never returns if successful. Current kernel context is lost.
    26.7 - * If flags contains ECF_IN_SYSCALL:
    26.8 + * If flags contains VGCF_IN_SYSCALL:
    26.9   *   Restore RAX, RIP, RFLAGS, RSP. 
   26.10   *   Discard R11, RCX, CS, SS.
   26.11   * Otherwise:
   26.12 @@ -109,7 +109,7 @@
   26.13   * All other registers are saved on hypercall entry and restored to user.
   26.14   */
   26.15  /* Guest exited in SYSCALL context? Return to guest with SYSRET? */
   26.16 -#define ECF_IN_SYSCALL (1<<8)
   26.17 +#define VGCF_IN_SYSCALL (1<<8)
   26.18  struct switch_to_user {
   26.19      /* Top of stack (%rsp at point of hypercall). */
   26.20      u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
   26.21 @@ -167,45 +167,47 @@ typedef struct cpu_user_regs {
   26.22      u64 ss;
   26.23      u64 es;
   26.24      u64 ds;
   26.25 -    u64 fs;      /* Non-zero => takes precedence over fs_base.     */
   26.26 -    u64 gs;      /* Non-zero => takes precedence over gs_base_app. */
   26.27 -    u64 fs_base;
   26.28 -    u64 gs_base_kernel;
   26.29 -    u64 gs_base_user;
   26.30 +    u64 fs;      /* Non-zero => takes precedence over fs_base.      */
   26.31 +    u64 gs;      /* Non-zero => takes precedence over gs_base_user. */
   26.32  } cpu_user_regs_t;
   26.33  
   26.34  typedef u64 tsc_timestamp_t; /* RDTSC timestamp */
   26.35  
   26.36  /*
   26.37 - * The following is all CPU context. Note that the i387_ctxt block is filled 
   26.38 + * The following is all CPU context. Note that the fpu_ctxt block is filled 
   26.39   * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
   26.40   */
   26.41  typedef struct vcpu_guest_context {
   26.42 -#define ECF_I387_VALID (1<<0)
   26.43 -#define ECF_VMX_GUEST  (1<<1)
   26.44 -#define ECF_IN_KERNEL  (1<<2)
   26.45 -    unsigned long flags;
   26.46 +#define VGCF_I387_VALID (1<<0)
   26.47 +#define VGCF_VMX_GUEST  (1<<1)
   26.48 +#define VGCF_IN_KERNEL  (1<<2)
   26.49 +    unsigned long flags;                    /* VGCF_* flags                 */
   26.50      cpu_user_regs_t user_regs;              /* User-level CPU registers     */
   26.51 -    char          fpu_ctxt[512];            /* User-level FPU registers     */
   26.52 +    struct { char x[512]; } fpu_ctxt        /* User-level FPU registers     */
   26.53 +    __attribute__((__aligned__(16)));       /* (needs 16-byte alignment)    */
   26.54      trap_info_t   trap_ctxt[256];           /* Virtual IDT                  */
   26.55      unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
   26.56      unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
   26.57 -    unsigned long kernel_ss, kernel_esp;  /* Virtual TSS (only SS1/ESP1)  */
   26.58 +    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
   26.59      unsigned long pt_base;                  /* CR3 (pagetable base)         */
   26.60      unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
   26.61      unsigned long event_callback_eip;
   26.62      unsigned long failsafe_callback_eip;
   26.63      unsigned long syscall_callback_eip;
   26.64      unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
   26.65 -} PACKED vcpu_guest_context_t;
   26.66 +    /* Segment base addresses. */
   26.67 +    u64           fs_base;
   26.68 +    u64           gs_base_kernel;
   26.69 +    u64           gs_base_user;
   26.70 +} vcpu_guest_context_t;
   26.71  
   26.72  typedef struct {
   26.73      /* MFN of a table of MFNs that make up p2m table */
   26.74      u64 pfn_to_mfn_frame_list;
   26.75 -} PACKED arch_shared_info_t;
   26.76 +} arch_shared_info_t;
   26.77  
   26.78  typedef struct {
   26.79 -} PACKED arch_vcpu_info_t;
   26.80 +} arch_vcpu_info_t;
   26.81  
   26.82  #endif /* !__ASSEMBLY__ */
   26.83