]> xenbits.xensource.com Git - xen.git/commitdiff
hvm amd: Fix 32bit guest VM save/restore issues associated with SYSENTER MSRs
authorKeir Fraser <keir@xen.org>
Sun, 6 Feb 2011 17:39:10 +0000 (17:39 +0000)
committerKeir Fraser <keir@xen.org>
Sun, 6 Feb 2011 17:39:10 +0000 (17:39 +0000)
This patch turn-on SYSENTER MSRs interception for 32bit guest VMs on
AMD CPUs. With it, hvm_svm.guest_sysenter_xx fields always contain the
canonical version of SYSENTER MSRs and are used in guest save/restore.
The data fields in VMCB save area are updated as necessary.

Reported-by: James Harper <james.harper@bendigoit.com.au>
Signed-off-by: Wei Huang <wei.huang2@amd.com>
xen-unstable changeset:   22873:186162762071
xen-unstable date:        Sun Feb 06 17:03:09 2011 +0000

xen/arch/x86/hvm/svm/svm.c

index a77639f064d23a3b6f5ec33d25b8aa8ace094745..f175216a288ab53517b571ef79ec235f5d30132a 100644 (file)
@@ -251,10 +251,11 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
     hvm_update_guest_cr(v, 2);
     hvm_update_guest_cr(v, 4);
 
-    v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
-    v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
-    v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip;
-
+    /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
+    vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
+    vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp;
+    vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip;
+    
     if ( paging_mode_hap(v->domain) )
     {
         vmcb->np_enable = 1;
@@ -449,14 +450,6 @@ static void svm_update_guest_efer(struct vcpu *v)
     vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME;
     if ( lma )
         vmcb->efer |= EFER_LME;
-
-    /*
-     * In legacy mode (EFER.LMA=0) we natively support SYSENTER/SYSEXIT with
-     * no need for MSR intercepts. When EFER.LMA=1 we must trap and emulate.
-     */
-    svm_intercept_msr(v, MSR_IA32_SYSENTER_CS, lma);
-    svm_intercept_msr(v, MSR_IA32_SYSENTER_ESP, lma);
-    svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
 }
 
 static void svm_sync_vmcb(struct vcpu *v)
@@ -1099,6 +1092,21 @@ static int svm_msr_write_intercept(struct cpu_user_regs *regs)
     u32 ecx = regs->ecx;
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    int sync = 0;
+
+    switch ( ecx )
+    {
+    case MSR_IA32_SYSENTER_CS:
+    case MSR_IA32_SYSENTER_ESP:
+    case MSR_IA32_SYSENTER_EIP:
+        sync = 1;
+        break;
+    default:
+        break;
+    }
+
+    if ( sync )
+        svm_sync_vmcb(v);    
 
     msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
 
@@ -1110,13 +1118,13 @@ static int svm_msr_write_intercept(struct cpu_user_regs *regs)
         goto gpf;
 
     case MSR_IA32_SYSENTER_CS:
-        v->arch.hvm_svm.guest_sysenter_cs = msr_content;
+        vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = msr_content;
         break;
     case MSR_IA32_SYSENTER_ESP:
-        v->arch.hvm_svm.guest_sysenter_esp = msr_content;
+        vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = msr_content;
         break;
     case MSR_IA32_SYSENTER_EIP:
-        v->arch.hvm_svm.guest_sysenter_eip = msr_content;
+        vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = msr_content;
         break;
 
     case MSR_IA32_DEBUGCTLMSR:
@@ -1164,6 +1172,9 @@ static int svm_msr_write_intercept(struct cpu_user_regs *regs)
         break;
     }
 
+    if ( sync )
+        svm_vmload(vmcb);
+
     return X86EMUL_OKAY;
 
  gpf: