return X86EMUL_UNHANDLEABLE;
/* This is a singleton operation: fail it with an exception. */
- hvmemul_ctxt->exn_pending = 1;
- hvmemul_ctxt->trap.vector =
- (seg == x86_seg_ss) ? TRAP_stack_error : TRAP_gp_fault;
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_HW_EXCEPTION;
- hvmemul_ctxt->trap.error_code = 0;
- hvmemul_ctxt->trap.insn_len = 0;
+ x86_emul_hw_exception((seg == x86_seg_ss)
+ ? TRAP_stack_error
+ : TRAP_gp_fault, 0, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
}
return X86EMUL_OKAY;
}
-static int hvmemul_inject_hw_exception(
- uint8_t vector,
- int32_t error_code,
- struct x86_emulate_ctxt *ctxt)
-{
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-
- hvmemul_ctxt->exn_pending = 1;
- hvmemul_ctxt->trap.vector = vector;
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_HW_EXCEPTION;
- hvmemul_ctxt->trap.error_code = error_code;
- hvmemul_ctxt->trap.insn_len = 0;
-
- return X86EMUL_OKAY;
-}
-
-static int hvmemul_inject_sw_interrupt(
- enum x86_swint_type type,
- uint8_t vector,
- uint8_t insn_len,
- struct x86_emulate_ctxt *ctxt)
-{
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-
- switch ( type )
- {
- case x86_swint_icebp:
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
- break;
-
- case x86_swint_int3:
- case x86_swint_into:
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_SW_EXCEPTION;
- break;
-
- case x86_swint_int:
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_SW_INTERRUPT;
- break;
-
- default:
- return X86EMUL_UNHANDLEABLE;
- }
-
- hvmemul_ctxt->exn_pending = 1;
- hvmemul_ctxt->trap.vector = vector;
- hvmemul_ctxt->trap.error_code = X86_EVENT_NO_EC;
- hvmemul_ctxt->trap.insn_len = insn_len;
-
- return X86EMUL_OKAY;
-}
-
static int hvmemul_get_fpu(
void (*exception_callback)(void *, struct cpu_user_regs *),
void *exception_callback_arg,
* hvmemul_virtual_to_linear() raises exceptions for type/limit
* violations, so squash them.
*/
- hvmemul_ctxt->exn_pending = 0;
- hvmemul_ctxt->trap = (struct x86_event){};
+ x86_emul_reset_event(ctxt);
rc = X86EMUL_OKAY;
}
rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs);
if ( rc != X86EMUL_OKAY )
- hvmemul_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+ x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
return rc;
}
.write_msr = hvmemul_write_msr,
.wbinvd = hvmemul_wbinvd,
.cpuid = hvmemul_cpuid,
- .inject_hw_exception = hvmemul_inject_hw_exception,
- .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
.invlpg = hvmemul_invlpg,
.write_msr = hvmemul_write_msr_discard,
.wbinvd = hvmemul_wbinvd_discard,
.cpuid = hvmemul_cpuid,
- .inject_hw_exception = hvmemul_inject_hw_exception,
- .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
.invlpg = hvmemul_invlpg,
hvm_dump_emulation_state(XENLOG_G_WARNING "MMCFG", &ctxt);
break;
case X86EMUL_EXCEPTION:
- if ( ctxt.exn_pending )
- hvm_inject_event(&ctxt.trap);
+ if ( ctxt.ctxt.event_pending )
+ hvm_inject_event(&ctxt.ctxt.event);
/* fallthrough */
default:
hvm_emulate_writeback(&ctxt);
hvm_inject_hw_exception(trapnr, errcode);
break;
case X86EMUL_EXCEPTION:
- if ( ctx.exn_pending )
- hvm_inject_event(&ctx.trap);
+ if ( ctx.ctxt.event_pending )
+ hvm_inject_event(&ctx.ctxt.event);
break;
}
hvmemul_ctxt->insn_buf_bytes = insn_bytes;
memcpy(hvmemul_ctxt->insn_buf, insn_buf, insn_bytes);
}
-
- hvmemul_ctxt->exn_pending = 0;
}
void hvm_emulate_writeback(
hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
break;
case X86EMUL_EXCEPTION:
- if ( ctxt.exn_pending )
- hvm_inject_event(&ctxt.trap);
+ if ( ctxt.ctxt.event_pending )
+ hvm_inject_event(&ctxt.ctxt.event);
/* fall through */
default:
hvm_emulate_writeback(&ctxt);
hvm_dump_emulation_state(XENLOG_G_WARNING "MMIO", &ctxt);
return 0;
case X86EMUL_EXCEPTION:
- if ( ctxt.exn_pending )
- hvm_inject_event(&ctxt.trap);
+ if ( ctxt.ctxt.event_pending )
+ hvm_inject_event(&ctxt.ctxt.event);
break;
default:
break;
if ( rc == X86EMUL_EXCEPTION )
{
- if ( !hvmemul_ctxt->exn_pending )
+ if ( !hvmemul_ctxt->ctxt.event_pending )
{
unsigned long intr_info;
gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
goto fail;
}
- hvmemul_ctxt->trap.vector = (uint8_t)intr_info;
- hvmemul_ctxt->trap.insn_len = 0;
+ hvmemul_ctxt->ctxt.event.vector = (uint8_t)intr_info;
+ hvmemul_ctxt->ctxt.event.insn_len = 0;
}
if ( unlikely(curr->domain->debugger_attached) &&
- ((hvmemul_ctxt->trap.vector == TRAP_debug) ||
- (hvmemul_ctxt->trap.vector == TRAP_int3)) )
+ ((hvmemul_ctxt->ctxt.event.vector == TRAP_debug) ||
+ (hvmemul_ctxt->ctxt.event.vector == TRAP_int3)) )
{
domain_pause_for_debugger();
}
else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
{
gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
- hvmemul_ctxt->trap.vector);
+ hvmemul_ctxt->ctxt.event.vector);
goto fail;
}
else
{
realmode_deliver_exception(
- hvmemul_ctxt->trap.vector,
- hvmemul_ctxt->trap.insn_len,
+ hvmemul_ctxt->ctxt.event.vector,
+ hvmemul_ctxt->ctxt.event.insn_len,
hvmemul_ctxt);
}
}
page_unlock(page);
put_page(page);
+ /*
+ * The previous lack of inject_{sw,hw}*() hooks caused exceptions raised
+ * by the emulator itself to become X86EMUL_UNHANDLEABLE. Such exceptions
+ * now set event_pending instead. Exceptions raised behind the back of
+ * the emulator don't yet set event_pending.
+ *
+ * For now, cause such cases to return to the X86EMUL_UNHANDLEABLE path,
+ * for no functional change from before. Future patches will fix this
+ * properly.
+ */
+ if ( rc == X86EMUL_EXCEPTION && ptwr_ctxt.ctxt.event_pending )
+ rc = X86EMUL_UNHANDLEABLE;
+
if ( rc == X86EMUL_UNHANDLEABLE )
goto bail;
else
rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
+ /*
+ * The previous lack of inject_{sw,hw}*() hooks caused exceptions raised
+ * by the emulator itself to become X86EMUL_UNHANDLEABLE. Such exceptions
+ * now set event_pending instead. Exceptions raised behind the back of
+ * the emulator don't yet set event_pending.
+ *
+ * For now, cause such cases to return to the X86EMUL_UNHANDLEABLE path,
+ * for no functional change from before. Future patches will fix this
+ * properly.
+ */
+ if ( rc == X86EMUL_EXCEPTION && ctxt.event_pending )
+ rc = X86EMUL_UNHANDLEABLE;
+
if ( rc == X86EMUL_UNHANDLEABLE )
return 0;
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
+ /*
+ * The previous lack of inject_{sw,hw}*() hooks caused exceptions raised
+ * by the emulator itself to become X86EMUL_UNHANDLEABLE. Such exceptions
+ * now set event_pending instead. Exceptions raised behind the back of
+ * the emulator don't yet set event_pending.
+ *
+ * For now, cause such cases to return to the X86EMUL_UNHANDLEABLE path,
+ * for no functional change from before. Future patches will fix this
+ * properly.
+ */
+ if ( r == X86EMUL_EXCEPTION && emul_ctxt.ctxt.event_pending )
+ r = X86EMUL_UNHANDLEABLE;
+
/*
* NB. We do not unshadow on X86EMUL_EXCEPTION. It's not clear that it
* would be a good unshadow hint. If we *do* decide to unshadow-on-fault
v->arch.paging.last_write_was_pt = 0;
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
+ if ( r == X86EMUL_EXCEPTION && emul_ctxt.ctxt.event_pending )
+ r = X86EMUL_UNHANDLEABLE;
+
/*
* Only continue the search for the second half if there are no
* exceptions or pending actions. Otherwise, give up and re-enter
#define generate_exception_if(p, e, ec...) \
({ if ( (p) ) { \
- fail_if(ops->inject_hw_exception == NULL); \
- rc = ops->inject_hw_exception(e, mkec(e, ##ec, 0), ctxt) \
- ? : X86EMUL_EXCEPTION; \
+ x86_emul_hw_exception(e, mkec(e, ##ec, 0), ctxt); \
+ rc = X86EMUL_EXCEPTION; \
goto done; \
} \
})
{
int rc, error_code, fault_type = EXC_GP;
- fail_if(ops->inject_sw_interrupt == NULL);
- fail_if(ops->inject_hw_exception == NULL);
-
/*
* Without hardware support, injecting software interrupts/exceptions is
* problematic.
}
}
- rc = ops->inject_sw_interrupt(type, vector, insn_len, ctxt);
+ x86_emul_software_event(type, vector, insn_len, ctxt);
+ rc = X86EMUL_OKAY;
done:
return rc;
/* Initialise output state in x86_emulate_ctxt */
ctxt->retire.raw = 0;
+ x86_emul_reset_event(ctxt);
op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8;
if ( op_bytes == 8 )
unsigned int *edx,
struct x86_emulate_ctxt *ctxt);
- /* inject_hw_exception */
- int (*inject_hw_exception)(
- uint8_t vector,
- int32_t error_code,
- struct x86_emulate_ctxt *ctxt);
-
- /* inject_sw_interrupt */
- int (*inject_sw_interrupt)(
- enum x86_swint_type type,
- uint8_t vector,
- uint8_t insn_len,
- struct x86_emulate_ctxt *ctxt);
-
/*
* get_fpu: Load emulated environment's FPU state onto processor.
* @exn_callback: On any FPU or SIMD exception, pass control to
bool singlestep:1; /* Singlestepping was active. */
};
} retire;
+
+ bool event_pending;
+ struct x86_event event;
};
/*
if ( rc == X86EMUL_EXCEPTION )
ASSERT(ctxt->regs->eip == orig_eip);
+ /*
+ * TODO: Make this true:
+ *
+ ASSERT(ctxt->event_pending == (rc == X86EMUL_EXCEPTION));
+ *
+ * Some codepaths still raise exceptions behind the back of the
+ * emulator. (i.e. return X86EMUL_EXCEPTION but without
+ * event_pending being set). In the meantime, use a slightly
+ * relaxed check...
+ */
+ if ( ctxt->event_pending )
+ ASSERT(rc == X86EMUL_EXCEPTION);
+
return rc;
}
#endif
+static inline void x86_emul_hw_exception(
+ unsigned int vector, int error_code, struct x86_emulate_ctxt *ctxt)
+{
+ ASSERT(!ctxt->event_pending);
+
+ ctxt->event.vector = vector;
+ ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
+ ctxt->event.error_code = error_code;
+
+ ctxt->event_pending = true;
+}
+
+static inline void x86_emul_software_event(
+ enum x86_swint_type type, uint8_t vector, uint8_t insn_len,
+ struct x86_emulate_ctxt *ctxt)
+{
+ ASSERT(!ctxt->event_pending);
+
+ switch ( type )
+ {
+ case x86_swint_icebp:
+ ctxt->event.type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
+ break;
+
+ case x86_swint_int3:
+ case x86_swint_into:
+ ctxt->event.type = X86_EVENTTYPE_SW_EXCEPTION;
+ break;
+
+ case x86_swint_int:
+ ctxt->event.type = X86_EVENTTYPE_SW_INTERRUPT;
+ break;
+ }
+
+ ctxt->event.vector = vector;
+ ctxt->event.error_code = X86_EVENT_NO_EC;
+ ctxt->event.insn_len = insn_len;
+
+ ctxt->event_pending = true;
+}
+
+static inline void x86_emul_reset_event(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->event_pending = false;
+ ctxt->event = (struct x86_event){};
+}
+
#endif /* __X86_EMULATE_H__ */
unsigned long seg_reg_accessed;
unsigned long seg_reg_dirty;
- bool_t exn_pending;
- struct x86_event trap;
-
uint32_t intr_shadow;
bool_t set_context;