switch ( cr )
{
case 0:
+ /* TS cleared? Then initialise FPU now. */
+ if ( (v == current) && !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) &&
+ (vmcb->cr0 & X86_CR0_TS) )
+ {
+ setup_fpu(v);
+ vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
+ }
+
vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
if ( !paging_mode_hap(v->domain) )
vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
}
}
-static int svm_set_cr0(unsigned long value)
-{
- struct vcpu *v = current;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int rc = hvm_set_cr0(value);
-
- if ( rc == 0 )
- return 0;
-
- /* TS cleared? Then initialise FPU now. */
- if ( !(value & X86_CR0_TS) )
- {
- setup_fpu(v);
- vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
- }
-
- return 1;
-}
-
static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
{
unsigned long value = 0;
switch ( cr )
{
case 0:
- return svm_set_cr0(value);
+ return hvm_set_cr0(value);
case 3:
return hvm_set_cr3(value);
case 4:
gpreg = decode_src_reg(prefix, buffer[index+2]);
value = get_reg(gpreg, regs, vmcb) & 0xF;
value = (v->arch.hvm_vcpu.guest_cr[0] & ~0xF) | value;
- result = svm_set_cr0(value);
+ result = hvm_set_cr0(value);
HVMTRACE_1D(LMSW, current, value);
break;
switch ( cr )
{
case 0:
+ /* TS cleared? Then initialise FPU now. */
+ if ( (v == current) && !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) &&
+ (v->arch.hvm_vcpu.hw_cr[0] & X86_CR0_TS) )
+ {
+ setup_fpu(v);
+ __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
+ }
+
v->arch.hvm_vcpu.hw_cr[0] =
v->arch.hvm_vcpu.guest_cr[0] |
X86_CR0_PE | X86_CR0_NE | X86_CR0_PG | X86_CR0_WP;
if ( rc == 0 )
return 0;
- /* TS cleared? Then initialise FPU now. */
- if ( !(value & X86_CR0_TS) )
- {
- setup_fpu(v);
- __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
- }
-
/*
* VMX does not implement real-mode virtualization. We emulate
* real-mode by performing a world switch to VMXAssist whenever