ia64/xen-unstable

changeset 15934:ec3b23d8d544

hvm: Always keep canonical copy of RIP/RSP/RFLAGS in
guest_cpu_user_regs(). Reduces complexity at little or no performance
cost (except on really old Intel P4 hardware where VMREAD/VMWRITE are
silly expensive).
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Sep 19 14:25:44 2007 +0100 (2007-09-19)
parents 202153d094d8
children 4bc37263e69f
files xen/arch/x86/domain.c xen/arch/x86/domctl.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/emulate.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/x86_32/exits.S xen/arch/x86/hvm/svm/x86_64/exits.S xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/x86_32/exits.S xen/arch/x86/hvm/vmx/x86_64/exits.S xen/arch/x86/mm/shadow/multi.c xen/arch/x86/oprofile/op_model_athlon.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/svm/emulate.h xen/include/asm-x86/hvm/vcpu.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Wed Sep 19 12:12:49 2007 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Wed Sep 19 14:25:44 2007 +0100
     1.3 @@ -631,11 +631,11 @@ int arch_set_info_guest(
     1.4          memcpy(&v->arch.guest_context, c.nat, sizeof(*c.nat));
     1.5  #ifdef CONFIG_COMPAT
     1.6      else
     1.7 -    {
     1.8          XLAT_vcpu_guest_context(&v->arch.guest_context, c.cmp);
     1.9 -    }
    1.10  #endif
    1.11  
    1.12 +    v->arch.guest_context.user_regs.eflags |= 2;
    1.13 +
    1.14      /* Only CR0.TS is modifiable by guest or admin. */
    1.15      v->arch.guest_context.ctrlreg[0] &= X86_CR0_TS;
    1.16      v->arch.guest_context.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS;
    1.17 @@ -651,10 +651,6 @@ int arch_set_info_guest(
    1.18          /* Ensure real hardware interrupts are enabled. */
    1.19          v->arch.guest_context.user_regs.eflags |= EF_IE;
    1.20      }
    1.21 -    else
    1.22 -    {
    1.23 -        hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs);
    1.24 -    }
    1.25  
    1.26      if ( v->is_initialised )
    1.27          goto out;
     2.1 --- a/xen/arch/x86/domctl.c	Wed Sep 19 12:12:49 2007 +0100
     2.2 +++ b/xen/arch/x86/domctl.c	Wed Sep 19 14:25:44 2007 +0100
     2.3 @@ -556,7 +556,6 @@ void arch_get_info_guest(struct vcpu *v,
     2.4      {
     2.5          if ( !is_pv_32on64_domain(v->domain) )
     2.6          {
     2.7 -            hvm_store_cpu_guest_regs(v, &c.nat->user_regs);
     2.8              memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
     2.9              c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
    2.10              c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
    2.11 @@ -566,11 +565,6 @@ void arch_get_info_guest(struct vcpu *v,
    2.12  #ifdef CONFIG_COMPAT
    2.13          else
    2.14          {
    2.15 -            struct cpu_user_regs user_regs;
    2.16 -            unsigned i;
    2.17 -
    2.18 -            hvm_store_cpu_guest_regs(v, &user_regs);
    2.19 -            XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
    2.20              memset(c.cmp->ctrlreg, 0, sizeof(c.cmp->ctrlreg));
    2.21              c.cmp->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
    2.22              c.cmp->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
     3.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Sep 19 12:12:49 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Sep 19 14:25:44 2007 +0100
     3.3 @@ -283,8 +283,10 @@ static int hvm_save_cpu_ctxt(struct doma
     3.4          ctxt.rbp = vc->user_regs.ebp;
     3.5          ctxt.rsi = vc->user_regs.esi;
     3.6          ctxt.rdi = vc->user_regs.edi;
     3.7 -        /* %rsp handled by arch-specific call above */
     3.8 -#ifdef __x86_64__        
     3.9 +        ctxt.rsp = vc->user_regs.esp;
    3.10 +        ctxt.rip = vc->user_regs.eip;
    3.11 +        ctxt.rflags = vc->user_regs.eflags;
    3.12 +#ifdef __x86_64__
    3.13          ctxt.r8  = vc->user_regs.r8;
    3.14          ctxt.r9  = vc->user_regs.r9;
    3.15          ctxt.r10 = vc->user_regs.r10;
    3.16 @@ -347,6 +349,8 @@ static int hvm_load_cpu_ctxt(struct doma
    3.17      vc->user_regs.esi = ctxt.rsi;
    3.18      vc->user_regs.edi = ctxt.rdi;
    3.19      vc->user_regs.esp = ctxt.rsp;
    3.20 +    vc->user_regs.eip = ctxt.rip;
    3.21 +    vc->user_regs.eflags = ctxt.rflags | 2;
    3.22  #ifdef __x86_64__
    3.23      vc->user_regs.r8  = ctxt.r8; 
    3.24      vc->user_regs.r9  = ctxt.r9; 
    3.25 @@ -973,8 +977,6 @@ void hvm_task_switch(
    3.26          goto out;
    3.27      }
    3.28  
    3.29 -    hvm_store_cpu_guest_regs(v, regs);
    3.30 -
    3.31      ptss = hvm_map(prev_tr.base, sizeof(tss));
    3.32      if ( ptss == NULL )
    3.33          goto out;
    3.34 @@ -1081,8 +1083,6 @@ void hvm_task_switch(
    3.35              hvm_copy_to_guest_virt(linear_addr, &errcode, 4);
    3.36      }
    3.37  
    3.38 -    hvm_load_cpu_guest_regs(v, regs);
    3.39 -
    3.40   out:
    3.41      hvm_unmap(optss_desc);
    3.42      hvm_unmap(nptss_desc);
    3.43 @@ -1322,7 +1322,6 @@ int hvm_do_hypercall(struct cpu_user_reg
    3.44  #endif
    3.45      case 4:
    3.46      case 2:
    3.47 -        hvm_store_cpu_guest_regs(current, regs);
    3.48          if ( unlikely(ring_3(regs)) )
    3.49          {
    3.50      default:
     4.1 --- a/xen/arch/x86/hvm/io.c	Wed Sep 19 12:12:49 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/io.c	Wed Sep 19 14:25:44 2007 +0100
     4.3 @@ -858,7 +858,6 @@ void hvm_io_assist(void)
     4.4  
     4.5      /* Copy register changes back into current guest state. */
     4.6      regs->eflags &= ~X86_EFLAGS_RF;
     4.7 -    hvm_load_cpu_guest_regs(v, regs);
     4.8      memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
     4.9  
    4.10   out:
     5.1 --- a/xen/arch/x86/hvm/platform.c	Wed Sep 19 12:12:49 2007 +0100
     5.2 +++ b/xen/arch/x86/hvm/platform.c	Wed Sep 19 14:25:44 2007 +0100
     5.3 @@ -1032,7 +1032,6 @@ void handle_mmio(unsigned long gpa)
     5.4  
     5.5      /* Copy current guest state into io instruction state structure. */
     5.6      memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
     5.7 -    hvm_store_cpu_guest_regs(v, regs);
     5.8  
     5.9      df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
    5.10  
     6.1 --- a/xen/arch/x86/hvm/svm/emulate.c	Wed Sep 19 12:12:49 2007 +0100
     6.2 +++ b/xen/arch/x86/hvm/svm/emulate.c	Wed Sep 19 14:25:44 2007 +0100
     6.3 @@ -59,8 +59,8 @@ extern int inst_copy_from_guest(unsigned
     6.4  #define DECODE_SIB_BASE(prefix, sib) DECODE_MODRM_RM(prefix, sib)
     6.5  
     6.6  
     6.7 -static inline unsigned long DECODE_GPR_VALUE(struct vmcb_struct *vmcb, 
     6.8 -        struct cpu_user_regs *regs, u8 gpr_rm)
     6.9 +static inline unsigned long DECODE_GPR_VALUE(
    6.10 +    struct cpu_user_regs *regs, u8 gpr_rm)
    6.11  {
    6.12      unsigned long value;
    6.13      switch (gpr_rm) 
    6.14 @@ -78,7 +78,7 @@ static inline unsigned long DECODE_GPR_V
    6.15          value = regs->ebx;
    6.16          break;
    6.17      case 0x4:
    6.18 -        value = (unsigned long)vmcb->rsp;
    6.19 +        value = regs->esp;
    6.20      case 0x5:
    6.21          value = regs->ebp;
    6.22          break;
    6.23 @@ -172,7 +172,7 @@ unsigned long get_effective_addr_modrm64
    6.24          }
    6.25          else
    6.26          {
    6.27 -            effective_addr = DECODE_GPR_VALUE(vmcb, regs, modrm_rm);
    6.28 +            effective_addr = DECODE_GPR_VALUE(regs, modrm_rm);
    6.29          }
    6.30          break;
    6.31  
    6.32 @@ -202,12 +202,12 @@ unsigned long get_effective_addr_modrm64
    6.33  #if __x86_64__
    6.34          /* 64-bit mode */
    6.35          if (vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v))
    6.36 -            return vmcb->rip + inst_len + *size + disp;
    6.37 +            return regs->eip + inst_len + *size + disp;
    6.38  #endif
    6.39          return disp;
    6.40  
    6.41      default:
    6.42 -        effective_addr = DECODE_GPR_VALUE(vmcb, regs, modrm_rm);
    6.43 +        effective_addr = DECODE_GPR_VALUE(regs, modrm_rm);
    6.44  
    6.45      }
    6.46  
    6.47 @@ -251,7 +251,7 @@ unsigned long get_effective_addr_sib(str
    6.48      sib_idx = DECODE_SIB_INDEX(prefix, sib);
    6.49      sib_base = DECODE_SIB_BASE(prefix, sib);
    6.50  
    6.51 -    base = DECODE_GPR_VALUE(vmcb, regs, sib_base);
    6.52 +    base = DECODE_GPR_VALUE(regs, sib_base);
    6.53  
    6.54      if ((unsigned long)-1 == base)
    6.55      {
    6.56 @@ -293,7 +293,7 @@ unsigned long get_effective_addr_sib(str
    6.57      if (4 == sib_idx)
    6.58          return base;
    6.59  
    6.60 -    effective_addr = DECODE_GPR_VALUE(vmcb, regs, sib_idx);
    6.61 +    effective_addr = DECODE_GPR_VALUE(regs, sib_idx);
    6.62  
    6.63      effective_addr <<= sib_scale;
    6.64  
    6.65 @@ -326,7 +326,8 @@ unsigned long svm_rip2pointer(struct vcp
    6.66       * no matter what kind of addressing is used.
    6.67       */
    6.68      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    6.69 -    unsigned long p = vmcb->cs.base + vmcb->rip;
    6.70 +    unsigned long p = vmcb->cs.base + guest_cpu_user_regs()->eip;
    6.71 +    ASSERT(v == current);
    6.72      if (!(vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v)))
    6.73          return (u32)p; /* mask to 32 bits */
    6.74      /* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
     7.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Sep 19 12:12:49 2007 +0100
     7.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Sep 19 14:25:44 2007 +0100
     7.3 @@ -72,6 +72,14 @@ static void *root_vmcb[NR_CPUS] __read_m
     7.4  /* hardware assisted paging bits */
     7.5  extern int opt_hap_enabled;
     7.6  
     7.7 +static void inline __update_guest_eip(
     7.8 +    struct cpu_user_regs *regs, int inst_len) 
     7.9 +{
    7.10 +    ASSERT(inst_len > 0);
    7.11 +    regs->eip += inst_len;
    7.12 +    regs->eflags &= ~X86_EFLAGS_RF;
    7.13 +}
    7.14 +
    7.15  static void svm_inject_exception(
    7.16      struct vcpu *v, int trap, int ev, int error_code)
    7.17  {
    7.18 @@ -108,16 +116,6 @@ static int svm_lme_is_set(struct vcpu *v
    7.19  #endif
    7.20  }
    7.21  
    7.22 -static void svm_store_cpu_guest_regs(
    7.23 -    struct vcpu *v, struct cpu_user_regs *regs)
    7.24 -{
    7.25 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    7.26 -
    7.27 -    regs->esp    = vmcb->rsp;
    7.28 -    regs->eflags = vmcb->rflags;
    7.29 -    regs->eip    = vmcb->rip;
    7.30 -}
    7.31 -
    7.32  static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
    7.33  {
    7.34      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
    7.35 @@ -233,30 +231,11 @@ int svm_vmcb_save(struct vcpu *v, struct
    7.36  {
    7.37      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    7.38  
    7.39 -    c->rip = vmcb->rip;
    7.40 -
    7.41 -#ifdef HVM_DEBUG_SUSPEND
    7.42 -    printk("%s: eip=0x%"PRIx64".\n", 
    7.43 -           __func__,
    7.44 -           inst_len, c->eip);
    7.45 -#endif
    7.46 -
    7.47 -    c->rsp = vmcb->rsp;
    7.48 -    c->rflags = vmcb->rflags;
    7.49 -
    7.50      c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
    7.51      c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
    7.52      c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
    7.53      c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
    7.54  
    7.55 -#ifdef HVM_DEBUG_SUSPEND
    7.56 -    printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
    7.57 -           __func__,
    7.58 -           c->cr3,
    7.59 -           c->cr0,
    7.60 -           c->cr4);
    7.61 -#endif
    7.62 -
    7.63      c->idtr_limit = vmcb->idtr.limit;
    7.64      c->idtr_base  = vmcb->idtr.base;
    7.65  
    7.66 @@ -355,10 +334,6 @@ int svm_vmcb_restore(struct vcpu *v, str
    7.67          v->arch.guest_table = pagetable_from_pfn(mfn);
    7.68      }
    7.69  
    7.70 -    vmcb->rip    = c->rip;
    7.71 -    vmcb->rsp    = c->rsp;
    7.72 -    vmcb->rflags = c->rflags;
    7.73 -
    7.74      v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
    7.75      v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
    7.76      v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
    7.77 @@ -518,7 +493,8 @@ static int svm_interrupts_enabled(struct
    7.78          return !vmcb->interrupt_shadow;
    7.79  
    7.80      ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
    7.81 -    return !irq_masked(vmcb->rflags) && !vmcb->interrupt_shadow; 
    7.82 +    return (!irq_masked(guest_cpu_user_regs()->eflags) &&
    7.83 +            !vmcb->interrupt_shadow);
    7.84  }
    7.85  
    7.86  static int svm_guest_x86_mode(struct vcpu *v)
    7.87 @@ -527,7 +503,7 @@ static int svm_guest_x86_mode(struct vcp
    7.88  
    7.89      if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
    7.90          return 0;
    7.91 -    if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
    7.92 +    if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
    7.93          return 1;
    7.94      if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
    7.95          return 8;
    7.96 @@ -785,7 +761,6 @@ static void svm_init_ap_context(
    7.97       */
    7.98      svm_reset_to_realmode(v, regs);  
    7.99      /* Adjust the vmcb's hidden register state. */
   7.100 -    vmcb->rip = 0;
   7.101      vmcb->cs.sel = cs_sel;
   7.102      vmcb->cs.base = (cs_sel << 4);
   7.103  }
   7.104 @@ -810,15 +785,6 @@ static void svm_init_hypercall_page(stru
   7.105      *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
   7.106  }
   7.107  
   7.108 -static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
   7.109 -{
   7.110 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.111 -
   7.112 -    vmcb->rsp      = regs->esp;   
   7.113 -    vmcb->rflags   = regs->eflags | 2UL;
   7.114 -    vmcb->rip      = regs->eip;
   7.115 -}
   7.116 -
   7.117  static void svm_ctxt_switch_from(struct vcpu *v)
   7.118  {
   7.119      int cpu = smp_processor_id();
   7.120 @@ -950,8 +916,6 @@ static struct hvm_function_table svm_fun
   7.121      .domain_destroy       = svm_domain_destroy,
   7.122      .vcpu_initialise      = svm_vcpu_initialise,
   7.123      .vcpu_destroy         = svm_vcpu_destroy,
   7.124 -    .store_cpu_guest_regs = svm_store_cpu_guest_regs,
   7.125 -    .load_cpu_guest_regs  = svm_load_cpu_guest_regs,
   7.126      .save_cpu_ctxt        = svm_save_vmcb_ctxt,
   7.127      .load_cpu_ctxt        = svm_load_vmcb_ctxt,
   7.128      .interrupts_enabled   = svm_interrupts_enabled,
   7.129 @@ -1144,7 +1108,7 @@ static void svm_vmexit_do_cpuid(struct v
   7.130  
   7.131      inst_len = __get_instruction_length(v, INSTR_CPUID, NULL);
   7.132      ASSERT(inst_len > 0);
   7.133 -    __update_guest_eip(vmcb, inst_len);
   7.134 +    __update_guest_eip(regs, inst_len);
   7.135  }
   7.136  
   7.137  static unsigned long *get_reg_p(
   7.138 @@ -1176,7 +1140,7 @@ static unsigned long *get_reg_p(
   7.139          reg_p = (unsigned long *)&regs->ebp;
   7.140          break;
   7.141      case SVM_REG_ESP:
   7.142 -        reg_p = (unsigned long *)&vmcb->rsp;
   7.143 +        reg_p = (unsigned long *)&regs->esp;
   7.144          break;
   7.145  #ifdef __x86_64__
   7.146      case SVM_REG_R8:
   7.147 @@ -1348,7 +1312,7 @@ static int svm_get_io_address(
   7.148       * than one byte (+ maybe rep-prefix), we have some prefix so we need 
   7.149       * to figure out what it is...
   7.150       */
   7.151 -    isize = vmcb->exitinfo2 - vmcb->rip;
   7.152 +    isize = vmcb->exitinfo2 - regs->eip;
   7.153  
   7.154      if (info.fields.rep)
   7.155          isize --;
   7.156 @@ -1501,7 +1465,6 @@ static void svm_io_instruction(struct vc
   7.157  
   7.158      /* Copy current guest state into io instruction state structure. */
   7.159      memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
   7.160 -    svm_store_cpu_guest_regs(v, regs);
   7.161  
   7.162      info.bytes = vmcb->exitinfo1;
   7.163  
   7.164 @@ -1524,7 +1487,7 @@ static void svm_io_instruction(struct vc
   7.165      HVM_DBG_LOG(DBG_LEVEL_IO, 
   7.166                  "svm_io_instruction: port 0x%x eip=%x:%"PRIx64", "
   7.167                  "exit_qualification = %"PRIx64,
   7.168 -                port, vmcb->cs.sel, vmcb->rip, info.bytes);
   7.169 +                port, vmcb->cs.sel, (uint64_t)regs->eip, info.bytes);
   7.170  
   7.171      /* string instruction */
   7.172      if (info.fields.str)
   7.173 @@ -1775,7 +1738,7 @@ static void svm_cr_access(
   7.174      if (index > 0 && (buffer[index-1] & 0xF0) == 0x40)
   7.175          prefix = buffer[index-1];
   7.176  
   7.177 -    HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
   7.178 +    HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long)regs->eip);
   7.179  
   7.180      switch ( match )
   7.181  
   7.182 @@ -1870,7 +1833,7 @@ static void svm_cr_access(
   7.183      ASSERT(inst_len);
   7.184  
   7.185      if ( result )
   7.186 -        __update_guest_eip(vmcb, inst_len);
   7.187 +        __update_guest_eip(regs, inst_len);
   7.188  }
   7.189  
   7.190  static void svm_do_msr_access(
   7.191 @@ -1993,14 +1956,15 @@ static void svm_do_msr_access(
   7.192          inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL);
   7.193      }
   7.194  
   7.195 -    __update_guest_eip(vmcb, inst_len);
   7.196 +    __update_guest_eip(regs, inst_len);
   7.197  }
   7.198  
   7.199 -static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
   7.200 +static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
   7.201 +                              struct cpu_user_regs *regs)
   7.202  {
   7.203      enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
   7.204  
   7.205 -    __update_guest_eip(vmcb, 1);
   7.206 +    __update_guest_eip(regs, 1);
   7.207  
   7.208      /* Check for interrupt not handled or new interrupt. */
   7.209      if ( vmcb->eventinj.fields.v ||
   7.210 @@ -2011,13 +1975,12 @@ static void svm_vmexit_do_hlt(struct vmc
   7.211      }
   7.212  
   7.213      HVMTRACE_1D(HLT, current, /*int pending=*/ 0);
   7.214 -    hvm_hlt(vmcb->rflags);
   7.215 +    hvm_hlt(regs->eflags);
   7.216  }
   7.217  
   7.218 -static void svm_vmexit_do_invd(struct vcpu *v)
   7.219 +static void svm_vmexit_do_invd(struct cpu_user_regs *regs)
   7.220  {
   7.221 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.222 -    int  inst_len;
   7.223 +    int inst_len;
   7.224      
   7.225      /* Invalidate the cache - we can't really do that safely - maybe we should 
   7.226       * WBINVD, but I think it's just fine to completely ignore it - we should 
   7.227 @@ -2029,8 +1992,8 @@ static void svm_vmexit_do_invd(struct vc
   7.228       */
   7.229      gdprintk(XENLOG_WARNING, "INVD instruction intercepted - ignored\n");
   7.230      
   7.231 -    inst_len = __get_instruction_length(v, INSTR_INVD, NULL);
   7.232 -    __update_guest_eip(vmcb, inst_len);
   7.233 +    inst_len = __get_instruction_length(current, INSTR_INVD, NULL);
   7.234 +    __update_guest_eip(regs, inst_len);
   7.235  }    
   7.236          
   7.237  void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
   7.238 @@ -2039,7 +2002,6 @@ void svm_handle_invlpg(const short invlp
   7.239      u8 opcode[MAX_INST_LEN], prefix, length = MAX_INST_LEN;
   7.240      unsigned long g_vaddr;
   7.241      int inst_len;
   7.242 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.243  
   7.244      /* 
   7.245       * Unknown how many bytes the invlpg instruction will take.  Use the
   7.246 @@ -2056,7 +2018,7 @@ void svm_handle_invlpg(const short invlp
   7.247      {
   7.248          inst_len = __get_instruction_length(v, INSTR_INVLPGA, opcode);
   7.249          ASSERT(inst_len > 0);
   7.250 -        __update_guest_eip(vmcb, inst_len);
   7.251 +        __update_guest_eip(regs, inst_len);
   7.252  
   7.253          /* 
   7.254           * The address is implicit on this instruction. At the moment, we don't
   7.255 @@ -2083,7 +2045,7 @@ void svm_handle_invlpg(const short invlp
   7.256                                               &opcode[inst_len], &length);
   7.257  
   7.258          inst_len += length;
   7.259 -        __update_guest_eip (vmcb, inst_len);
   7.260 +        __update_guest_eip(regs, inst_len);
   7.261      }
   7.262  
   7.263      HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
   7.264 @@ -2106,6 +2068,8 @@ static int svm_reset_to_realmode(struct 
   7.265  
   7.266      memset(regs, 0, sizeof(struct cpu_user_regs));
   7.267  
   7.268 +    regs->eflags = 2;
   7.269 +
   7.270      v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
   7.271      svm_update_guest_cr(v, 0);
   7.272  
   7.273 @@ -2118,7 +2082,7 @@ static int svm_reset_to_realmode(struct 
   7.274      vmcb->efer = EFER_SVME;
   7.275  
   7.276      /* This will jump to ROMBIOS */
   7.277 -    vmcb->rip = 0xFFF0;
   7.278 +    regs->eip = 0xFFF0;
   7.279  
   7.280      /* Set up the segment registers and all their hidden states. */
   7.281      vmcb->cs.sel = 0xF000;
   7.282 @@ -2171,16 +2135,12 @@ static int svm_reset_to_realmode(struct 
   7.283      vmcb->idtr.limit = 0x3ff;
   7.284      vmcb->idtr.base = 0x00;
   7.285  
   7.286 -    vmcb->rax = 0;
   7.287 -    vmcb->rsp = 0;
   7.288 -
   7.289      return 0;
   7.290  }
   7.291  
   7.292  asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
   7.293  {
   7.294      unsigned int exit_reason;
   7.295 -    unsigned long eip;
   7.296      struct vcpu *v = current;
   7.297      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.298      eventinj_t eventinj;
   7.299 @@ -2198,7 +2158,7 @@ asmlinkage void svm_vmexit_handler(struc
   7.300  
   7.301      exit_reason = vmcb->exitcode;
   7.302  
   7.303 -    HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
   7.304 +    HVMTRACE_2D(VMEXIT, v, regs->eip, exit_reason);
   7.305  
   7.306      if ( unlikely(exit_reason == VMEXIT_INVALID) )
   7.307      {
   7.308 @@ -2207,7 +2167,6 @@ asmlinkage void svm_vmexit_handler(struc
   7.309      }
   7.310  
   7.311      perfc_incra(svmexits, exit_reason);
   7.312 -    eip = vmcb->rip;
   7.313  
   7.314      /* Event delivery caused this intercept? Queue for redelivery. */
   7.315      eventinj = vmcb->exitintinfo;
   7.316 @@ -2244,7 +2203,7 @@ asmlinkage void svm_vmexit_handler(struc
   7.317              goto exit_and_crash;
   7.318          /* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. */
   7.319          inst_len = __get_instruction_length(v, INSTR_INT3, NULL);
   7.320 -        __update_guest_eip(vmcb, inst_len);
   7.321 +        __update_guest_eip(regs, inst_len);
   7.322          domain_pause_for_debugger();
   7.323          break;
   7.324  
   7.325 @@ -2275,7 +2234,6 @@ asmlinkage void svm_vmexit_handler(struc
   7.326  
   7.327      case VMEXIT_EXCEPTION_MC:
   7.328          HVMTRACE_0D(MCE, v);
   7.329 -        svm_store_cpu_guest_regs(v, regs);
   7.330          do_machine_check(regs);
   7.331          break;
   7.332  
   7.333 @@ -2285,7 +2243,7 @@ asmlinkage void svm_vmexit_handler(struc
   7.334          break;
   7.335  
   7.336      case VMEXIT_INVD:
   7.337 -        svm_vmexit_do_invd(v);
   7.338 +        svm_vmexit_do_invd(regs);
   7.339          break;
   7.340  
   7.341      case VMEXIT_TASK_SWITCH: {
   7.342 @@ -2308,7 +2266,7 @@ asmlinkage void svm_vmexit_handler(struc
   7.343          break;
   7.344  
   7.345      case VMEXIT_HLT:
   7.346 -        svm_vmexit_do_hlt(vmcb);
   7.347 +        svm_vmexit_do_hlt(vmcb, regs);
   7.348          break;
   7.349  
   7.350      case VMEXIT_INVLPG:
   7.351 @@ -2326,7 +2284,7 @@ asmlinkage void svm_vmexit_handler(struc
   7.352          rc = hvm_do_hypercall(regs);
   7.353          if ( rc != HVM_HCALL_preempted )
   7.354          {
   7.355 -            __update_guest_eip(vmcb, inst_len);
   7.356 +            __update_guest_eip(regs, inst_len);
   7.357              if ( rc == HVM_HCALL_invalidate )
   7.358                  send_invalidate_req();
   7.359          }
     8.1 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S	Wed Sep 19 12:12:49 2007 +0100
     8.2 +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S	Wed Sep 19 14:25:44 2007 +0100
     8.3 @@ -58,6 +58,12 @@ svm_trace_done:
     8.4          movl VCPU_svm_vmcb(%ebx),%ecx
     8.5          movl UREGS_eax(%esp),%eax
     8.6          movl %eax,VMCB_rax(%ecx)
     8.7 +        movl UREGS_eip(%esp),%eax
     8.8 +        movl %eax,VMCB_rip(%ecx)
     8.9 +        movl UREGS_esp(%esp),%eax
    8.10 +        movl %eax,VMCB_rsp(%ecx)
    8.11 +        movl UREGS_eflags(%esp),%eax
    8.12 +        movl %eax,VMCB_rflags(%ecx)
    8.13  
    8.14          movl VCPU_svm_vmcb_pa(%ebx),%eax
    8.15          popl %ebx
    8.16 @@ -81,6 +87,12 @@ svm_trace_done:
    8.17          movl VCPU_svm_vmcb(%ebx),%ecx
    8.18          movl VMCB_rax(%ecx),%eax
    8.19          movl %eax,UREGS_eax(%esp)
    8.20 +        movl VMCB_rip(%ecx),%eax
    8.21 +        movl %eax,UREGS_eip(%esp)
    8.22 +        movl VMCB_rsp(%ecx),%eax
    8.23 +        movl %eax,UREGS_esp(%esp)
    8.24 +        movl VMCB_rflags(%ecx),%eax
    8.25 +        movl %eax,UREGS_eflags(%esp)
    8.26  
    8.27          STGI
    8.28  .globl svm_stgi_label;
     9.1 --- a/xen/arch/x86/hvm/svm/x86_64/exits.S	Wed Sep 19 12:12:49 2007 +0100
     9.2 +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S	Wed Sep 19 14:25:44 2007 +0100
     9.3 @@ -59,6 +59,12 @@ svm_trace_done:
     9.4          movq VCPU_svm_vmcb(%rbx),%rcx
     9.5          movq UREGS_rax(%rsp),%rax
     9.6          movq %rax,VMCB_rax(%rcx)
     9.7 +        movq UREGS_rip(%rsp),%rax
     9.8 +        movq %rax,VMCB_rip(%rcx)
     9.9 +        movq UREGS_rsp(%rsp),%rax
    9.10 +        movq %rax,VMCB_rsp(%rcx)
    9.11 +        movq UREGS_eflags(%rsp),%rax
    9.12 +        movq %rax,VMCB_rflags(%rcx)
    9.13  
    9.14          movq VCPU_svm_vmcb_pa(%rbx),%rax
    9.15          popq %r15
    9.16 @@ -100,6 +106,12 @@ svm_trace_done:
    9.17          movq VCPU_svm_vmcb(%rbx),%rcx
    9.18          movq VMCB_rax(%rcx),%rax
    9.19          movq %rax,UREGS_rax(%rsp)
    9.20 +        movq VMCB_rip(%rcx),%rax
    9.21 +        movq %rax,UREGS_rip(%rsp)
    9.22 +        movq VMCB_rsp(%rcx),%rax
    9.23 +        movq %rax,UREGS_rsp(%rsp)
    9.24 +        movq VMCB_rflags(%rcx),%rax
    9.25 +        movq %rax,UREGS_eflags(%rsp)
    9.26  
    9.27          STGI
    9.28  .globl svm_stgi_label;
    10.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Sep 19 12:12:49 2007 +0100
    10.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Sep 19 14:25:44 2007 +0100
    10.3 @@ -437,11 +437,9 @@ static int vmx_guest_x86_mode(struct vcp
    10.4  {
    10.5      unsigned int cs_ar_bytes;
    10.6  
    10.7 -    ASSERT(v == current);
    10.8 -
    10.9      if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
   10.10          return 0;
   10.11 -    if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
   10.12 +    if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
   10.13          return 1;
   10.14      cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
   10.15      if ( hvm_long_mode_enabled(v) &&
   10.16 @@ -485,10 +483,6 @@ void vmx_vmcs_save(struct vcpu *v, struc
   10.17  
   10.18      vmx_vmcs_enter(v);
   10.19  
   10.20 -    c->rip = __vmread(GUEST_RIP);
   10.21 -    c->rsp = __vmread(GUEST_RSP);
   10.22 -    c->rflags = __vmread(GUEST_RFLAGS);
   10.23 -
   10.24      c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
   10.25      c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
   10.26      c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
   10.27 @@ -496,11 +490,6 @@ void vmx_vmcs_save(struct vcpu *v, struc
   10.28  
   10.29      c->msr_efer = v->arch.hvm_vcpu.guest_efer;
   10.30  
   10.31 -#ifdef HVM_DEBUG_SUSPEND
   10.32 -    printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   10.33 -           __func__, c->cr3, c->cr0, c->cr4);
   10.34 -#endif
   10.35 -
   10.36      c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
   10.37      c->idtr_base = __vmread(GUEST_IDTR_BASE);
   10.38  
   10.39 @@ -594,10 +583,6 @@ int vmx_vmcs_restore(struct vcpu *v, str
   10.40  
   10.41      vmx_vmcs_enter(v);
   10.42  
   10.43 -    __vmwrite(GUEST_RIP, c->rip);
   10.44 -    __vmwrite(GUEST_RSP, c->rsp);
   10.45 -    __vmwrite(GUEST_RFLAGS, c->rflags);
   10.46 -
   10.47      v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
   10.48      v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
   10.49      v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
   10.50 @@ -793,30 +778,6 @@ static void vmx_ctxt_switch_to(struct vc
   10.51      vmx_restore_dr(v);
   10.52  }
   10.53  
   10.54 -static void vmx_store_cpu_guest_regs(
   10.55 -    struct vcpu *v, struct cpu_user_regs *regs)
   10.56 -{
   10.57 -    vmx_vmcs_enter(v);
   10.58 -
   10.59 -    regs->eflags = __vmread(GUEST_RFLAGS);
   10.60 -    regs->eip = __vmread(GUEST_RIP);
   10.61 -    regs->esp = __vmread(GUEST_RSP);
   10.62 -
   10.63 -    vmx_vmcs_exit(v);
   10.64 -}
   10.65 -
   10.66 -static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
   10.67 -{
   10.68 -    vmx_vmcs_enter(v);
   10.69 -
   10.70 -    /* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */
   10.71 -    __vmwrite(GUEST_RFLAGS, regs->eflags | 2UL);
   10.72 -    __vmwrite(GUEST_RIP, regs->eip);
   10.73 -    __vmwrite(GUEST_RSP, regs->esp);
   10.74 -
   10.75 -    vmx_vmcs_exit(v);
   10.76 -}
   10.77 -
   10.78  static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
   10.79  {
   10.80      unsigned long base = 0;
   10.81 @@ -1061,9 +1022,7 @@ static void vmx_init_hypercall_page(stru
   10.82  
   10.83  static int vmx_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
   10.84  {
   10.85 -    unsigned long intr_shadow, eflags;
   10.86 -
   10.87 -    ASSERT(v == current);
   10.88 +    unsigned long intr_shadow;
   10.89  
   10.90      intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
   10.91  
   10.92 @@ -1073,8 +1032,7 @@ static int vmx_interrupts_enabled(struct
   10.93                                  VMX_INTR_SHADOW_NMI));
   10.94  
   10.95      ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
   10.96 -    eflags = __vmread(GUEST_RFLAGS);
   10.97 -    return (!irq_masked(eflags) &&
   10.98 +    return (!irq_masked(guest_cpu_user_regs()->eflags) &&
   10.99              !(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)));
  10.100  }
  10.101  
  10.102 @@ -1193,8 +1151,6 @@ static struct hvm_function_table vmx_fun
  10.103      .domain_destroy       = vmx_domain_destroy,
  10.104      .vcpu_initialise      = vmx_vcpu_initialise,
  10.105      .vcpu_destroy         = vmx_vcpu_destroy,
  10.106 -    .store_cpu_guest_regs = vmx_store_cpu_guest_regs,
  10.107 -    .load_cpu_guest_regs  = vmx_load_cpu_guest_regs,
  10.108      .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
  10.109      .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
  10.110      .interrupts_enabled   = vmx_interrupts_enabled,
  10.111 @@ -1284,14 +1240,11 @@ static int __get_instruction_length(void
  10.112  
  10.113  static void __update_guest_eip(unsigned long inst_len)
  10.114  {
  10.115 +    struct cpu_user_regs *regs = guest_cpu_user_regs();
  10.116      unsigned long x;
  10.117  
  10.118 -    x = __vmread(GUEST_RIP);
  10.119 -    __vmwrite(GUEST_RIP, x + inst_len);
  10.120 -
  10.121 -    x = __vmread(GUEST_RFLAGS);
  10.122 -    if ( x & X86_EFLAGS_RF )
  10.123 -        __vmwrite(GUEST_RFLAGS, x & ~X86_EFLAGS_RF);
  10.124 +    regs->eip += inst_len;
  10.125 +    regs->eflags &= ~X86_EFLAGS_RF;
  10.126  
  10.127      x = __vmread(GUEST_INTERRUPTIBILITY_INFO);
  10.128      if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
  10.129 @@ -1435,16 +1388,10 @@ static void vmx_dr_access(unsigned long 
  10.130   */
  10.131  static void vmx_do_invlpg(unsigned long va)
  10.132  {
  10.133 -    unsigned long eip;
  10.134      struct vcpu *v = current;
  10.135  
  10.136      HVMTRACE_2D(INVLPG, v, /*invlpga=*/ 0, va);
  10.137  
  10.138 -    eip = __vmread(GUEST_RIP);
  10.139 -
  10.140 -    HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
  10.141 -                eip, va);
  10.142 -
  10.143      /*
  10.144       * We do the safest things first, then try to update the shadow
  10.145       * copying from guest
  10.146 @@ -1852,7 +1799,6 @@ static void vmx_io_instruction(unsigned 
  10.147  
  10.148      /* Copy current guest state into io instruction state structure. */
  10.149      memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
  10.150 -    vmx_store_cpu_guest_regs(current, regs);
  10.151  
  10.152      HVM_DBG_LOG(DBG_LEVEL_IO, "vm86 %d, eip=%x:%lx, "
  10.153                  "exit_qualification = %lx",
  10.154 @@ -1891,12 +1837,12 @@ static void vmx_io_instruction(unsigned 
  10.155  
  10.156  static void vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
  10.157  {
  10.158 -    /* NB. Skip transition instruction. */
  10.159 -    c->eip = __vmread(GUEST_RIP);
  10.160 +    struct cpu_user_regs *regs = guest_cpu_user_regs();
  10.161 +
  10.162 +    c->eip  = regs->eip;
  10.163      c->eip += __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
  10.164 -
  10.165 -    c->esp = __vmread(GUEST_RSP);
  10.166 -    c->eflags = __vmread(GUEST_RFLAGS) & ~X86_EFLAGS_RF;
  10.167 +    c->esp = regs->esp;
  10.168 +    c->eflags = regs->eflags & ~X86_EFLAGS_RF;
  10.169  
  10.170      c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
  10.171      c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
  10.172 @@ -1951,6 +1897,7 @@ static void vmx_world_save(struct vcpu *
  10.173  
  10.174  static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
  10.175  {
  10.176 +    struct cpu_user_regs *regs = guest_cpu_user_regs();
  10.177      unsigned long mfn = 0;
  10.178      p2m_type_t p2mt;
  10.179  
  10.180 @@ -1969,9 +1916,9 @@ static int vmx_world_restore(struct vcpu
  10.181  
  10.182      v->arch.guest_table = pagetable_from_pfn(mfn);
  10.183  
  10.184 -    __vmwrite(GUEST_RIP, c->eip);
  10.185 -    __vmwrite(GUEST_RSP, c->esp);
  10.186 -    __vmwrite(GUEST_RFLAGS, c->eflags);
  10.187 +    regs->eip = c->eip;
  10.188 +    regs->esp = c->esp;
  10.189 +    regs->eflags = c->eflags | 2;
  10.190  
  10.191      v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
  10.192      v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
  10.193 @@ -2121,7 +2068,6 @@ static int vmx_assist(struct vcpu *v, in
  10.194  static int vmx_set_cr0(unsigned long value)
  10.195  {
  10.196      struct vcpu *v = current;
  10.197 -    unsigned long eip;
  10.198      int rc = hvm_set_cr0(value);
  10.199  
  10.200      if ( rc == 0 )
  10.201 @@ -2142,24 +2088,12 @@ static int vmx_set_cr0(unsigned long val
  10.202      if ( !(value & X86_CR0_PE) )
  10.203      {
  10.204          if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
  10.205 -        {
  10.206 -            eip = __vmread(GUEST_RIP);
  10.207 -            HVM_DBG_LOG(DBG_LEVEL_1,
  10.208 -                        "Transfering control to vmxassist %%eip 0x%lx", eip);
  10.209              return 0; /* do not update eip! */
  10.210 -        }
  10.211      }
  10.212      else if ( v->arch.hvm_vmx.vmxassist_enabled )
  10.213      {
  10.214 -        eip = __vmread(GUEST_RIP);
  10.215 -        HVM_DBG_LOG(DBG_LEVEL_1,
  10.216 -                    "Enabling CR0.PE at %%eip 0x%lx", eip);
  10.217          if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
  10.218 -        {
  10.219 -            HVM_DBG_LOG(DBG_LEVEL_1,
  10.220 -                        "Restoring to %%eip 0x%lx", eip);
  10.221              return 0; /* do not update eip! */
  10.222 -        }
  10.223      }
  10.224  
  10.225      return 1;
  10.226 @@ -2204,10 +2138,8 @@ static int mov_to_cr(int gp, int cr, str
  10.227      CASE_GET_REG(EBP, ebp);
  10.228      CASE_GET_REG(ESI, esi);
  10.229      CASE_GET_REG(EDI, edi);
  10.230 +    CASE_GET_REG(ESP, esp);
  10.231      CASE_EXTEND_GET_REG;
  10.232 -    case REG_ESP:
  10.233 -        value = __vmread(GUEST_RSP);
  10.234 -        break;
  10.235      default:
  10.236          gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
  10.237          goto exit_and_crash;
  10.238 @@ -2276,11 +2208,8 @@ static void mov_from_cr(int cr, int gp, 
  10.239      CASE_SET_REG(EBP, ebp);
  10.240      CASE_SET_REG(ESI, esi);
  10.241      CASE_SET_REG(EDI, edi);
  10.242 +    CASE_SET_REG(ESP, esp);
  10.243      CASE_EXTEND_SET_REG;
  10.244 -    case REG_ESP:
  10.245 -        __vmwrite(GUEST_RSP, value);
  10.246 -        regs->esp = value;
  10.247 -        break;
  10.248      default:
  10.249          printk("invalid gp: %d\n", gp);
  10.250          domain_crash(v->domain);
  10.251 @@ -2521,12 +2450,10 @@ gp_fault:
  10.252      return 0;
  10.253  }
  10.254  
  10.255 -static void vmx_do_hlt(void)
  10.256 +static void vmx_do_hlt(struct cpu_user_regs *regs)
  10.257  {
  10.258 -    unsigned long rflags;
  10.259      HVMTRACE_0D(HLT, current);
  10.260 -    rflags = __vmread(GUEST_RFLAGS);
  10.261 -    hvm_hlt(rflags);
  10.262 +    hvm_hlt(regs->eflags);
  10.263  }
  10.264  
  10.265  static void vmx_do_extint(struct cpu_user_regs *regs)
  10.266 @@ -2601,7 +2528,6 @@ static void vmx_failed_vmentry(unsigned 
  10.267      case EXIT_REASON_MACHINE_CHECK:
  10.268          printk("caused by machine check.\n");
  10.269          HVMTRACE_0D(MCE, current);
  10.270 -        vmx_store_cpu_guest_regs(current, regs);
  10.271          do_machine_check(regs);
  10.272          break;
  10.273      default:
  10.274 @@ -2624,7 +2550,7 @@ asmlinkage void vmx_vmexit_handler(struc
  10.275  
  10.276      exit_reason = __vmread(VM_EXIT_REASON);
  10.277  
  10.278 -    HVMTRACE_2D(VMEXIT, v, __vmread(GUEST_RIP), exit_reason);
  10.279 +    HVMTRACE_2D(VMEXIT, v, regs->eip, exit_reason);
  10.280  
  10.281      perfc_incra(vmexits, exit_reason);
  10.282  
  10.283 @@ -2723,12 +2649,10 @@ asmlinkage void vmx_vmexit_handler(struc
  10.284                   (X86_EVENTTYPE_NMI << 8) )
  10.285                  goto exit_and_crash;
  10.286              HVMTRACE_0D(NMI, v);
  10.287 -            vmx_store_cpu_guest_regs(v, regs);
  10.288              do_nmi(regs); /* Real NMI, vector 2: normal processing. */
  10.289              break;
  10.290          case TRAP_machine_check:
  10.291              HVMTRACE_0D(MCE, v);
  10.292 -            vmx_store_cpu_guest_regs(v, regs);
  10.293              do_machine_check(regs);
  10.294              break;
  10.295          default:
  10.296 @@ -2775,7 +2699,7 @@ asmlinkage void vmx_vmexit_handler(struc
  10.297      case EXIT_REASON_HLT:
  10.298          inst_len = __get_instruction_length(); /* Safe: HLT */
  10.299          __update_guest_eip(inst_len);
  10.300 -        vmx_do_hlt();
  10.301 +        vmx_do_hlt(regs);
  10.302          break;
  10.303      case EXIT_REASON_INVLPG:
  10.304      {
    11.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S	Wed Sep 19 12:12:49 2007 +0100
    11.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S	Wed Sep 19 14:25:44 2007 +0100
    11.3 @@ -23,6 +23,16 @@
    11.4  #include <asm/page.h>
    11.5  #include <public/xen.h>
    11.6  
    11.7 +#define VMRESUME     .byte 0x0f,0x01,0xc3
    11.8 +#define VMLAUNCH     .byte 0x0f,0x01,0xc2
    11.9 +#define VMREAD(off)  .byte 0x0f,0x78,0x44,0x24,off
   11.10 +#define VMWRITE(off) .byte 0x0f,0x79,0x44,0x24,off
   11.11 +
   11.12 +/* VMCS field encodings */
   11.13 +#define GUEST_RSP    0x681c
   11.14 +#define GUEST_RIP    0x681e
   11.15 +#define GUEST_RFLAGS 0x6820
   11.16 +
   11.17  #define GET_CURRENT(reg)         \
   11.18          movl $STACK_SIZE-4, reg; \
   11.19          orl  %esp, reg;          \
   11.20 @@ -51,6 +61,14 @@
   11.21          ALIGN
   11.22  ENTRY(vmx_asm_vmexit_handler)
   11.23          HVM_SAVE_ALL_NOSEGREGS
   11.24 +
   11.25 +        movl $GUEST_RIP,%eax
   11.26 +        VMREAD(UREGS_eip)
   11.27 +        movl $GUEST_RSP,%eax
   11.28 +        VMREAD(UREGS_esp)
   11.29 +        movl $GUEST_RFLAGS,%eax
   11.30 +        VMREAD(UREGS_eflags)
   11.31 +
   11.32          movl %esp,%eax
   11.33          push %eax
   11.34          call vmx_vmexit_handler
   11.35 @@ -78,13 +96,19 @@ ENTRY(vmx_asm_do_vmentry)
   11.36          movl %eax,%cr2
   11.37          call vmx_trace_vmentry
   11.38  
   11.39 +        movl $GUEST_RIP,%eax
   11.40 +        VMWRITE(UREGS_eip)
   11.41 +        movl $GUEST_RSP,%eax
   11.42 +        VMWRITE(UREGS_esp)
   11.43 +        movl $GUEST_RFLAGS,%eax
   11.44 +        VMWRITE(UREGS_eflags)
   11.45 +
   11.46          cmpl $0,VCPU_vmx_launched(%ebx)
   11.47          je   vmx_launch
   11.48  
   11.49  /*vmx_resume:*/
   11.50          HVM_RESTORE_ALL_NOSEGREGS
   11.51 -        /* VMRESUME */
   11.52 -        .byte 0x0f,0x01,0xc3
   11.53 +        VMRESUME
   11.54          pushf
   11.55          call vm_resume_fail
   11.56          ud2
   11.57 @@ -92,8 +116,7 @@ ENTRY(vmx_asm_do_vmentry)
   11.58  vmx_launch:
   11.59          movl $1,VCPU_vmx_launched(%ebx)
   11.60          HVM_RESTORE_ALL_NOSEGREGS
   11.61 -        /* VMLAUNCH */
   11.62 -        .byte 0x0f,0x01,0xc2
   11.63 +        VMLAUNCH
   11.64          pushf
   11.65          call vm_launch_fail
   11.66          ud2
    12.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Wed Sep 19 12:12:49 2007 +0100
    12.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Wed Sep 19 14:25:44 2007 +0100
    12.3 @@ -23,6 +23,16 @@
    12.4  #include <asm/page.h>
    12.5  #include <public/xen.h>
    12.6  
    12.7 +#define VMRESUME     .byte 0x0f,0x01,0xc3
    12.8 +#define VMLAUNCH     .byte 0x0f,0x01,0xc2
    12.9 +#define VMREAD(off)  .byte 0x0f,0x78,0x44,0x24,off
   12.10 +#define VMWRITE(off) .byte 0x0f,0x79,0x44,0x24,off
   12.11 +
   12.12 +/* VMCS field encodings */
   12.13 +#define GUEST_RSP    0x681c
   12.14 +#define GUEST_RIP    0x681e
   12.15 +#define GUEST_RFLAGS 0x6820
   12.16 +
   12.17  #define GET_CURRENT(reg)         \
   12.18          movq $STACK_SIZE-8, reg; \
   12.19          orq  %rsp, reg;          \
   12.20 @@ -66,6 +76,14 @@
   12.21          ALIGN
   12.22  ENTRY(vmx_asm_vmexit_handler)
   12.23          HVM_SAVE_ALL_NOSEGREGS
   12.24 +
   12.25 +        movl $GUEST_RIP,%eax
   12.26 +        VMREAD(UREGS_rip)
   12.27 +        movl $GUEST_RSP,%eax
   12.28 +        VMREAD(UREGS_rsp)
   12.29 +        movl $GUEST_RFLAGS,%eax
   12.30 +        VMREAD(UREGS_eflags)
   12.31 +
   12.32          movq %rsp,%rdi
   12.33          call vmx_vmexit_handler
   12.34          jmp vmx_asm_do_vmentry
   12.35 @@ -92,13 +110,19 @@ ENTRY(vmx_asm_do_vmentry)
   12.36          movq %rax,%cr2
   12.37          call vmx_trace_vmentry
   12.38  
   12.39 +        movl $GUEST_RIP,%eax
   12.40 +        VMWRITE(UREGS_rip)
   12.41 +        movl $GUEST_RSP,%eax
   12.42 +        VMWRITE(UREGS_rsp)
   12.43 +        movl $GUEST_RFLAGS,%eax
   12.44 +        VMWRITE(UREGS_eflags)
   12.45 +
   12.46          cmpl $0,VCPU_vmx_launched(%rbx)
   12.47          je   vmx_launch
   12.48  
   12.49  /*vmx_resume:*/
   12.50          HVM_RESTORE_ALL_NOSEGREGS
   12.51 -        /* VMRESUME */
   12.52 -        .byte 0x0f,0x01,0xc3
   12.53 +        VMRESUME
   12.54          pushfq
   12.55          call vm_resume_fail
   12.56          ud2
   12.57 @@ -106,8 +130,7 @@ ENTRY(vmx_asm_do_vmentry)
   12.58  vmx_launch:
   12.59          movl $1,VCPU_vmx_launched(%rbx)
   12.60          HVM_RESTORE_ALL_NOSEGREGS
   12.61 -        /* VMLAUNCH */
   12.62 -        .byte 0x0f,0x01,0xc2
   12.63 +        VMLAUNCH
   12.64          pushfq
   12.65          call vm_launch_fail
   12.66          ud2
    13.1 --- a/xen/arch/x86/mm/shadow/multi.c	Wed Sep 19 12:12:49 2007 +0100
    13.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Wed Sep 19 14:25:44 2007 +0100
    13.3 @@ -2928,8 +2928,6 @@ static int sh_page_fault(struct vcpu *v,
    13.4              sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
    13.5              goto done;
    13.6          }
    13.7 -
    13.8 -        hvm_store_cpu_guest_regs(v, regs);
    13.9      }
   13.10  
   13.11      SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n", 
   13.12 @@ -2993,10 +2991,6 @@ static int sh_page_fault(struct vcpu *v,
   13.13      }
   13.14  #endif /* PAE guest */
   13.15  
   13.16 -    /* Emulator has changed the user registers: write back */
   13.17 -    if ( is_hvm_domain(d) )
   13.18 -        hvm_load_cpu_guest_regs(v, regs);
   13.19 -
   13.20      SHADOW_PRINTK("emulated\n");
   13.21      return EXCRET_fault_fixed;
   13.22  
    14.1 --- a/xen/arch/x86/oprofile/op_model_athlon.c	Wed Sep 19 12:12:49 2007 +0100
    14.2 +++ b/xen/arch/x86/oprofile/op_model_athlon.c	Wed Sep 19 14:25:44 2007 +0100
    14.3 @@ -119,7 +119,6 @@ static int athlon_check_ctrs(unsigned in
    14.4  	    (regs->eip == (unsigned long)svm_stgi_label)) {
    14.5  		/* SVM guest was running when NMI occurred */
    14.6  		ASSERT(is_hvm_vcpu(v));
    14.7 -		hvm_store_cpu_guest_regs(v, guest_regs);
    14.8  		eip = guest_regs->eip;
    14.9  		mode = xenoprofile_get_mode(v, guest_regs);
   14.10  	} else {
    15.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Wed Sep 19 12:12:49 2007 +0100
    15.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Wed Sep 19 14:25:44 2007 +0100
    15.3 @@ -89,6 +89,9 @@ void __dummy__(void)
    15.4      BLANK();
    15.5  
    15.6      OFFSET(VMCB_rax, struct vmcb_struct, rax);
    15.7 +    OFFSET(VMCB_rip, struct vmcb_struct, rip);
    15.8 +    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
    15.9 +    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
   15.10      BLANK();
   15.11  
   15.12      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
    16.1 --- a/xen/arch/x86/x86_32/traps.c	Wed Sep 19 12:12:49 2007 +0100
    16.2 +++ b/xen/arch/x86/x86_32/traps.c	Wed Sep 19 14:25:44 2007 +0100
    16.3 @@ -47,7 +47,6 @@ void show_registers(struct cpu_user_regs
    16.4      {
    16.5          struct segment_register sreg;
    16.6          context = "hvm";
    16.7 -        hvm_store_cpu_guest_regs(v, &fault_regs);
    16.8          fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
    16.9          fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
   16.10          fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
    17.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Wed Sep 19 12:12:49 2007 +0100
    17.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Wed Sep 19 14:25:44 2007 +0100
    17.3 @@ -95,6 +95,9 @@ void __dummy__(void)
    17.4      BLANK();
    17.5  
    17.6      OFFSET(VMCB_rax, struct vmcb_struct, rax);
    17.7 +    OFFSET(VMCB_rip, struct vmcb_struct, rip);
    17.8 +    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
    17.9 +    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
   17.10      BLANK();
   17.11  
   17.12      OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
    18.1 --- a/xen/arch/x86/x86_64/traps.c	Wed Sep 19 12:12:49 2007 +0100
    18.2 +++ b/xen/arch/x86/x86_64/traps.c	Wed Sep 19 14:25:44 2007 +0100
    18.3 @@ -50,7 +50,6 @@ void show_registers(struct cpu_user_regs
    18.4      {
    18.5          struct segment_register sreg;
    18.6          context = "hvm";
    18.7 -        hvm_store_cpu_guest_regs(v, &fault_regs);
    18.8          fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
    18.9          fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
   18.10          fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
    19.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed Sep 19 12:12:49 2007 +0100
    19.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Sep 19 14:25:44 2007 +0100
    19.3 @@ -21,6 +21,7 @@
    19.4  #ifndef __ASM_X86_HVM_HVM_H__
    19.5  #define __ASM_X86_HVM_HVM_H__
    19.6  
    19.7 +#include <asm/current.h>
    19.8  #include <asm/x86_emulate.h>
    19.9  #include <public/domctl.h>
   19.10  #include <public/hvm/save.h>
   19.11 @@ -79,16 +80,6 @@ struct hvm_function_table {
   19.12      int  (*vcpu_initialise)(struct vcpu *v);
   19.13      void (*vcpu_destroy)(struct vcpu *v);
   19.14  
   19.15 -    /*
   19.16 -     * Store and load guest state:
   19.17 -     * 1) load/store guest register state,
   19.18 -     * 2) modify guest state (e.g., set debug flags).
   19.19 -     */
   19.20 -    void (*store_cpu_guest_regs)(
   19.21 -        struct vcpu *v, struct cpu_user_regs *r);
   19.22 -    void (*load_cpu_guest_regs)(
   19.23 -        struct vcpu *v, struct cpu_user_regs *r);
   19.24 -
   19.25      /* save and load hvm guest cpu context for save/restore */
   19.26      void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
   19.27      int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
   19.28 @@ -166,19 +157,6 @@ void hvm_vcpu_reset(struct vcpu *vcpu);
   19.29  
   19.30  void hvm_send_assist_req(struct vcpu *v);
   19.31  
   19.32 -static inline void
   19.33 -hvm_store_cpu_guest_regs(
   19.34 -    struct vcpu *v, struct cpu_user_regs *r)
   19.35 -{
   19.36 -    hvm_funcs.store_cpu_guest_regs(v, r);
   19.37 -}
   19.38 -
   19.39 -static inline void
   19.40 -hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
   19.41 -{
   19.42 -    hvm_funcs.load_cpu_guest_regs(v, r);
   19.43 -}
   19.44 -
   19.45  void hvm_set_guest_time(struct vcpu *v, u64 gtime);
   19.46  u64 hvm_get_guest_time(struct vcpu *v);
   19.47  
   19.48 @@ -199,12 +177,14 @@ u64 hvm_get_guest_time(struct vcpu *v);
   19.49  static inline int
   19.50  hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
   19.51  {
   19.52 +    ASSERT(v == current);
   19.53      return hvm_funcs.interrupts_enabled(v, type);
   19.54  }
   19.55  
   19.56  static inline int
   19.57  hvm_guest_x86_mode(struct vcpu *v)
   19.58  {
   19.59 +    ASSERT(v == current);
   19.60      return hvm_funcs.guest_x86_mode(v);
   19.61  }
   19.62  
    20.1 --- a/xen/include/asm-x86/hvm/svm/emulate.h	Wed Sep 19 12:12:49 2007 +0100
    20.2 +++ b/xen/include/asm-x86/hvm/svm/emulate.h	Wed Sep 19 14:25:44 2007 +0100
    20.3 @@ -131,16 +131,6 @@ static inline int skip_prefix_bytes(u8 *
    20.4      return index;
    20.5  }
    20.6  
    20.7 -
    20.8 -
    20.9 -static void inline __update_guest_eip(
   20.10 -    struct vmcb_struct *vmcb, int inst_len) 
   20.11 -{
   20.12 -    ASSERT(inst_len > 0);
   20.13 -    vmcb->rip += inst_len;
   20.14 -    vmcb->rflags &= ~X86_EFLAGS_RF;
   20.15 -}
   20.16 -
   20.17  #endif /* __ASM_X86_HVM_SVM_EMULATE_H__ */
   20.18  
   20.19  /*
    21.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Wed Sep 19 12:12:49 2007 +0100
    21.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Wed Sep 19 14:25:44 2007 +0100
    21.3 @@ -66,7 +66,7 @@ struct hvm_vcpu {
    21.4  
    21.5  #define ARCH_HVM_IO_WAIT         1   /* Waiting for I/O completion */
    21.6  
    21.7 -#define HVM_CONTEXT_STACK_BYTES  (offsetof(struct cpu_user_regs, error_code))
    21.8 +#define HVM_CONTEXT_STACK_BYTES  (offsetof(struct cpu_user_regs, ss))
    21.9  
   21.10  #endif /* __ASM_X86_HVM_VCPU_H__ */
   21.11