]> xenbits.xensource.com Git - xen.git/commitdiff
x86/xsave: fix nonlazy state handling
authorLiu Jinsong <jinsong.liu@intel.com>
Mon, 25 Nov 2013 10:19:04 +0000 (11:19 +0100)
committerJan Beulich <jbeulich@suse.com>
Mon, 25 Nov 2013 10:19:04 +0000 (11:19 +0100)
Nonlazy xstates should be xsaved each time when vcpu_save_fpu.
Operation to nonlazy xstates will not trigger #NM exception, so
whenever vcpu scheduled in it got restored and whenever scheduled
out it should get saved.

Currently this bug affects AMD LWP feature, and later Intel MPX
feature. With the bugfix both LWP and MPX will work fine.

Signed-off-by: Liu Jinsong <jinsong.liu@intel.com>
Furthermore, during restore we also need to set nonlazy_xstate_used
according to the incoming accumulated XCR0.

Also adjust the changes to i387.c such that there won't be a pointless
clts()/stts() pair.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/domctl.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/i387.c

index f7e4586cab72c8fc0bbfbb0cee93e4da93ebab99..864421864e62c30e5a8ecd440e4b87e27964c4e4 100644 (file)
@@ -1146,6 +1146,8 @@ long arch_do_domctl(
             {
                 v->arch.xcr0 = _xcr0;
                 v->arch.xcr0_accum = _xcr0_accum;
+                if ( _xcr0_accum & XSTATE_NONLAZY )
+                    v->arch.nonlazy_xstate_used = 1;
                 memcpy(v->arch.xsave_area, _xsave_area,
                        evc->size - 2 * sizeof(uint64_t));
             }
index b76f041eb06192e5f90e2a8a9eab25f577ddce3b..0094c62cb3ea41f893b672620e44d0082c1f57a9 100644 (file)
@@ -1119,6 +1119,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, hvm_domain_context_t *h)
 
     v->arch.xcr0 = ctxt->xcr0;
     v->arch.xcr0_accum = ctxt->xcr0_accum;
+    if ( ctxt->xcr0_accum & XSTATE_NONLAZY )
+        v->arch.nonlazy_xstate_used = 1;
     memcpy(v->arch.xsave_area, &ctxt->save_area,
            desc->length - offsetof(struct hvm_hw_cpu_xsave, save_area));
 
index 76492747015af44b6d1722237d50684d56560ff5..bd72138878200c83ea1aeb27e7b52b98db537dd7 100644 (file)
@@ -133,11 +133,22 @@ static inline void fpu_frstor(struct vcpu *v)
 /*******************************/
 /*      FPU Save Functions     */
 /*******************************/
+
+static inline uint64_t vcpu_xsave_mask(const struct vcpu *v)
+{
+    if ( v->fpu_dirtied )
+        return v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY;
+
+    return v->arch.nonlazy_xstate_used ? XSTATE_NONLAZY : 0;
+}
+
 /* Save x87 extended state */
 static inline void fpu_xsave(struct vcpu *v)
 {
     bool_t ok;
+    uint64_t mask = vcpu_xsave_mask(v);
 
+    ASSERT(mask);
     ASSERT(v->arch.xsave_area);
     /*
      * XCR0 normally represents what guest OS set. In case of Xen itself,
@@ -145,7 +156,7 @@ static inline void fpu_xsave(struct vcpu *v)
      */
     ok = set_xcr0(v->arch.xcr0_accum | XSTATE_FP_SSE);
     ASSERT(ok);
-    xsave(v, v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY);
+    xsave(v, mask);
     ok = set_xcr0(v->arch.xcr0 ?: XSTATE_FP_SSE);
     ASSERT(ok);
 }
@@ -257,7 +268,7 @@ void vcpu_restore_fpu_lazy(struct vcpu *v)
  */
 void vcpu_save_fpu(struct vcpu *v)
 {
-    if ( !v->fpu_dirtied )
+    if ( !v->fpu_dirtied && !v->arch.nonlazy_xstate_used )
         return;
 
     ASSERT(!is_idle_vcpu(v));