} opc_tab[INSTR_MAX_COUNT] = {
[INSTR_PAUSE] = { X86EMUL_OPC_F3(0, 0x90) },
[INSTR_INT3] = { X86EMUL_OPC( 0, 0xcc) },
+ [INSTR_ICEBP] = { X86EMUL_OPC( 0, 0xf1) },
[INSTR_HLT] = { X86EMUL_OPC( 0, 0xf4) },
[INSTR_XSETBV] = { X86EMUL_OPC(0x0f, 0x01), MODRM(3, 2, 1) },
[INSTR_VMRUN] = { X86EMUL_OPC(0x0f, 0x01), MODRM(3, 3, 0) },
svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
}
+static void svm_set_icebp_interception(struct domain *d, bool enable)
+{
+ const struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ {
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ uint32_t intercepts = vmcb_get_general2_intercepts(vmcb);
+
+ if ( enable )
+ intercepts |= GENERAL2_INTERCEPT_ICEBP;
+ else
+ intercepts &= ~GENERAL2_INTERCEPT_ICEBP;
+
+ vmcb_set_general2_intercepts(vmcb, intercepts);
+ }
+}
+
static void svm_save_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
bool debug_state = (v->domain->debugger_attached ||
- v->domain->arch.monitor.software_breakpoint_enabled);
+ v->domain->arch.monitor.software_breakpoint_enabled ||
+ v->domain->arch.monitor.debug_exception_enabled);
bool_t vcpu_guestmode = 0;
struct vlapic *vlapic = vcpu_vlapic(v);
return true;
}
-static void svm_propagate_intr(struct vcpu *v, unsigned long insn_len)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- struct x86_event event = {
- .vector = vmcb->eventinj.fields.type,
- .type = vmcb->eventinj.fields.type,
- .error_code = vmcb->exitinfo1,
- };
-
- event.insn_len = insn_len;
- hvm_inject_event(&event);
-}
-
static struct hvm_function_table __initdata svm_function_table = {
.name = "SVM",
.cpu_up_prepare = svm_cpu_up_prepare,
.msr_read_intercept = svm_msr_read_intercept,
.msr_write_intercept = svm_msr_write_intercept,
.enable_msr_interception = svm_enable_msr_interception,
+ .set_icebp_interception = svm_set_icebp_interception,
.set_rdtsc_exiting = svm_set_rdtsc_exiting,
.set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
.get_insn_bytes = svm_get_insn_bytes,
HVMTRACE_0D(SMI);
break;
+ case VMEXIT_ICEBP:
case VMEXIT_EXCEPTION_DB:
if ( !v->domain->debugger_attached )
- hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
+ {
+ int rc;
+ unsigned int trap_type;
+
+ if ( likely(exit_reason != VMEXIT_ICEBP) )
+ {
+ trap_type = X86_EVENTTYPE_HW_EXCEPTION;
+ inst_len = 0;
+ }
+ else
+ {
+ trap_type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
+ inst_len = __get_instruction_length(v, INSTR_ICEBP);
+ }
+
+ rc = hvm_monitor_debug(regs->rip,
+ HVM_MONITOR_DEBUG_EXCEPTION,
+ trap_type, inst_len);
+ if ( rc < 0 )
+ goto unexpected_exit_type;
+ if ( !rc )
+ hvm_inject_exception(TRAP_debug,
+ trap_type, inst_len, X86_EVENT_NO_EC);
+ }
else
domain_pause_for_debugger();
break;
if ( rc < 0 )
goto unexpected_exit_type;
if ( !rc )
- svm_propagate_intr(v, inst_len);
+ hvm_inject_exception(TRAP_int3,
+ X86_EVENTTYPE_SW_EXCEPTION,
+ inst_len, X86_EVENT_NO_EC);
}
break;
bool_t access_w, bool_t access_x);
void (*enable_msr_interception)(struct domain *d, uint32_t msr);
+ void (*set_icebp_interception)(struct domain *d, bool enable);
bool_t (*is_singlestep_supported)(void);
/* Alternate p2m */
void hvm_inject_event(const struct x86_event *event);
+static inline void hvm_inject_exception(
+ unsigned int vector, unsigned int type,
+ unsigned int insn_len, int error_code)
+{
+ struct x86_event event = {
+ .vector = vector,
+ .type = type,
+ .insn_len = insn_len,
+ .error_code = error_code,
+ };
+
+ hvm_inject_event(&event);
+}
+
static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
{
struct x86_event event = {
return 0;
}
+static inline bool hvm_set_icebp_interception(struct domain *d, bool enable)
+{
+ if ( hvm_funcs.set_icebp_interception )
+ {
+ hvm_funcs.set_icebp_interception(d, enable);
+ return true;
+ }
+ return false;
+}
+
static inline bool_t hvm_is_singlestep_supported(void)
{
return (hvm_funcs.is_singlestep_supported &&