SET(wbinvd),
SET(invlpg),
.get_fpu = emul_test_get_fpu,
+ .put_fpu = emul_test_put_fpu,
.cpuid = emul_test_cpuid,
};
#undef SET
.read_cr = emul_test_read_cr,
.read_msr = read_msr,
.get_fpu = emul_test_get_fpu,
+ .put_fpu = emul_test_put_fpu,
};
int main(int argc, char **argv)
return X86EMUL_OKAY;
}
+void emul_test_put_fpu(
+ struct x86_emulate_ctxt *ctxt,
+ enum x86_emulate_fpu_type backout)
+{
+ /* TBD */
+}
+
#include "x86_emulate/x86_emulate.c"
void *exception_callback_arg,
enum x86_emulate_fpu_type type,
struct x86_emulate_ctxt *ctxt);
+
+void emul_test_put_fpu(
+ struct x86_emulate_ctxt *ctxt,
+ enum x86_emulate_fpu_type backout);
#include <xen/paging.h>
#include <xen/trace.h>
#include <asm/event.h>
+#include <asm/i387.h>
#include <asm/xstate.h>
#include <asm/hvm/emulate.h>
#include <asm/hvm/hvm.h>
if ( !curr->fpu_dirtied )
hvm_funcs.fpu_dirty_intercept();
+ else if ( type == X86EMUL_FPU_fpu )
+ {
+ const typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt =
+ curr->arch.fpu_ctxt;
+
+ /*
+ * Latch current register state so that we can back out changes
+ * if needed (namely when a memory write fails after register state
+ * has already been updated).
+ * NB: We don't really need the "enable" part of the called function
+ * (->fpu_dirtied set implies CR0.TS clear), but the additional
+ * overhead should be low enough to not warrant introduction of yet
+ * another slightly different function. However, we need to undo the
+ * ->fpu_dirtied clearing the function does as well as the possible
+ * masking of all exceptions by FNSTENV.)
+ */
+ save_fpu_enable();
+ curr->fpu_dirtied = true;
+ if ( (fpu_ctxt->fcw & 0x3f) != 0x3f )
+ {
+ uint16_t fcw;
+
+ asm ( "fnstcw %0" : "=m" (fcw) );
+ if ( (fcw & 0x3f) == 0x3f )
+ asm ( "fldcw %0" :: "m" (fpu_ctxt->fcw) );
+ else
+ ASSERT(fcw == fpu_ctxt->fcw);
+ }
+ }
curr->arch.hvm_vcpu.fpu_exception_callback = exception_callback;
curr->arch.hvm_vcpu.fpu_exception_callback_arg = exception_callback_arg;
}
static void hvmemul_put_fpu(
- struct x86_emulate_ctxt *ctxt)
+ struct x86_emulate_ctxt *ctxt,
+ enum x86_emulate_fpu_type backout)
{
struct vcpu *curr = current;
+
curr->arch.hvm_vcpu.fpu_exception_callback = NULL;
+
+ if ( backout == X86EMUL_FPU_fpu )
+ {
+ /*
+ * To back out changes to the register file simply adjust state such
+ * that upon next FPU insn use by the guest we'll reload the state
+ * saved (or freshly loaded) by hvmemul_get_fpu().
+ */
+ curr->fpu_dirtied = false;
+ stts();
+ hvm_funcs.fpu_leave(curr);
+ }
}
static int hvmemul_invlpg(
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
.update_guest_vendor = svm_update_guest_vendor,
+ .fpu_leave = svm_fpu_leave,
.set_guest_pat = svm_set_guest_pat,
.get_guest_pat = svm_get_guest_pat,
.set_tsc_offset = svm_set_tsc_offset,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
.update_guest_vendor = vmx_update_guest_vendor,
+ .fpu_leave = vmx_fpu_leave,
.set_guest_pat = vmx_set_guest_pat,
.get_guest_pat = vmx_get_guest_pat,
.set_tsc_offset = vmx_set_tsc_offset,
{
unsigned long cr0;
+ fail_if(type == X86EMUL_FPU_fpu && !ops->put_fpu);
fic->type = type;
fail_if(!ops->read_cr);
static void put_fpu(
struct fpu_insn_ctxt *fic,
+ bool failed_late,
struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
- if ( fic->type != X86EMUL_FPU_none && ops->put_fpu )
- ops->put_fpu(ctxt);
+ if ( unlikely(failed_late) && fic->type == X86EMUL_FPU_fpu )
+ ops->put_fpu(ctxt, X86EMUL_FPU_fpu);
+ else if ( fic->type != X86EMUL_FPU_none && ops->put_fpu )
+ ops->put_fpu(ctxt, X86EMUL_FPU_none);
fic->type = X86EMUL_FPU_none;
}
break;
case 0x9b: /* wait/fwait */
- fic.insn_bytes = 1;
host_and_vcpu_must_have(fpu);
get_fpu(X86EMUL_FPU_wait, &fic);
+ fic.insn_bytes = 1;
asm volatile ( "fwait" ::: "memory" );
check_fpu_exn(&fic);
break;
}
complete_insn: /* Commit shadow register state. */
- put_fpu(&fic, ctxt, ops);
+ put_fpu(&fic, false, ctxt, ops);
/* Zero the upper 32 bits of %rip if not in 64-bit mode. */
if ( !mode_64bit() )
ctxt->regs->eflags &= ~X86_EFLAGS_RF;
done:
- put_fpu(&fic, ctxt, ops);
+ put_fpu(&fic, fic.insn_bytes > 0 && dst.type == OP_MEM, ctxt, ops);
put_stub(stub);
return rc;
#undef state
* put_fpu: Relinquish the FPU. Unhook from FPU/SIMD exception handlers.
* The handler, if installed, must be prepared to get called without
* the get_fpu one having got called before!
+ * @backout: Undo updates to the specified register file (can, besides
+ * X86EMUL_FPU_none, only be X86EMUL_FPU_fpu at present);
*/
void (*put_fpu)(
- struct x86_emulate_ctxt *ctxt);
+ struct x86_emulate_ctxt *ctxt,
+ enum x86_emulate_fpu_type backout);
/* invlpg: Invalidate paging structures which map addressed byte. */
int (*invlpg)(
void (*update_guest_vendor)(struct vcpu *v);
+ void (*fpu_leave)(struct vcpu *v);
+
int (*get_guest_pat)(struct vcpu *v, u64 *);
int (*set_guest_pat)(struct vcpu *v, u64);