]> xenbits.xensource.com Git - xen.git/commitdiff
nestedsvm: fix lazy fpu switching causing a triple fault
authorChristoph Egger <Christoph.Egger@amd.com>
Fri, 28 Oct 2011 16:23:51 +0000 (17:23 +0100)
committerChristoph Egger <Christoph.Egger@amd.com>
Fri, 28 Oct 2011 16:23:51 +0000 (17:23 +0100)
Fix FPU switching uncovered with Hyper-V: Do FPU switching always on
the n1 vmcb rather doing it on the current active vmcb.  The FPU state
between n1 and n2 vmcb is already synced in the vmrun and vmexit
emulation.

This fixes the situation where the n2 vmcb had the #NM intercept bit
set but the virtual vmcb did not.  That means it was believed the l1
guest can handle an #NM intercept but was actually wrong.

The result were three #GP's with error code 0x11 in the l1 guest.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Committed-by: Keir Fraser <keir@xen.org>
xen/arch/x86/hvm/svm/svm.c

index 479ad17103dd35e3f04db588ea3ce3566541b655..fc3803a0db0dcb8af387b77f377714445b05fd24 100644 (file)
@@ -349,16 +349,17 @@ static int svm_load_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
 
 static void svm_fpu_enter(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
 
     vcpu_restore_fpu_lazy(v);
     vmcb_set_exception_intercepts(
-        vmcb, vmcb_get_exception_intercepts(vmcb) & ~(1U << TRAP_no_device));
+        n1vmcb,
+        vmcb_get_exception_intercepts(n1vmcb) & ~(1U << TRAP_no_device));
 }
 
 static void svm_fpu_leave(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
 
     ASSERT(!v->fpu_dirtied);
     ASSERT(read_cr0() & X86_CR0_TS);
@@ -372,9 +373,9 @@ static void svm_fpu_leave(struct vcpu *v)
     if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
     {
         vmcb_set_exception_intercepts(
-            vmcb,
-            vmcb_get_exception_intercepts(vmcb) | (1U << TRAP_no_device));
-        vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) | X86_CR0_TS);
+            n1vmcb,
+            vmcb_get_exception_intercepts(n1vmcb) | (1U << TRAP_no_device));
+        vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) | X86_CR0_TS);
     }
 }
 
@@ -1191,15 +1192,17 @@ static void svm_fpu_dirty_intercept(void)
 {
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct vmcb_struct *n1vmcb = vcpu_nestedhvm(v).nv_n1vmcx;
 
     svm_fpu_enter(v);
 
-    if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) ) {
-       /* Check if guest must make FPU ready for the nested guest */
+    if ( vmcb != n1vmcb )
+    {
+       /* Check if l1 guest must make FPU ready for the l2 guest */
        if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS )
            hvm_inject_exception(TRAP_no_device, HVM_DELIVER_NO_ERROR_CODE, 0);
        else
-           vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) & ~X86_CR0_TS);
+           vmcb_set_cr0(n1vmcb, vmcb_get_cr0(n1vmcb) & ~X86_CR0_TS);
        return;
     }