if ( xcr0 != get_xcr0() && !set_xcr0(xcr0) )
BUG();
}
- vcpu_restore_fpu_eager(n);
+ vcpu_restore_fpu_nonlazy(n, 0);
n->arch.ctxt_switch_to(n);
}
/* VCPU FPU Functions */
/*******************************/
/* Restore FPU state whenever VCPU is schduled in. */
-void vcpu_restore_fpu_eager(struct vcpu *v)
+void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool_t need_stts)
{
if ( v->arch.fully_eager_fpu )
{
v->fpu_dirtied = 1;
/* Xen doesn't need TS set, but the guest might. */
- if ( is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS) )
- stts();
+ need_stts = is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS);
}
/* save the nonlazy extended state which is not tracked by CR0.TS bit */
else if ( v->arch.nonlazy_xstate_used )
/* Avoid recursion */
clts();
fpu_xrstor(v, XSTATE_NONLAZY);
- stts();
+ need_stts = 1;
}
+
+ if ( need_stts )
+ stts();
}
/*
irq_exit();
efi_rs_on_cpu = NR_CPUS;
spin_unlock(&efi_rs_lock);
- vcpu_restore_fpu_eager(curr);
+ vcpu_restore_fpu_nonlazy(curr, 1);
}
bool_t efi_rs_using_pgtables(void)
} r[8];
};
-void vcpu_restore_fpu_eager(struct vcpu *v);
+void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool_t need_stts);
void vcpu_restore_fpu_lazy(struct vcpu *v);
void vcpu_save_fpu(struct vcpu *v);
void save_fpu_enable(void);