* violations, so squash them.
*/
hvmemul_ctxt->exn_pending = 0;
- hvmemul_ctxt->trap = (struct hvm_trap){};
+ hvmemul_ctxt->trap = (struct x86_event){};
rc = X86EMUL_OKAY;
}
break;
case X86EMUL_EXCEPTION:
if ( ctxt.exn_pending )
- hvm_inject_trap(&ctxt.trap);
+ hvm_inject_event(&ctxt.trap);
/* fallthrough */
default:
hvm_emulate_writeback(&ctxt);
break;
case X86EMUL_EXCEPTION:
if ( ctx.exn_pending )
- hvm_inject_trap(&ctx.trap);
+ hvm_inject_event(&ctx.trap);
break;
}
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
- hvm_inject_trap(&v->arch.hvm_vcpu.inject_trap);
+ hvm_inject_event(&v->arch.hvm_vcpu.inject_trap);
v->arch.hvm_vcpu.inject_trap.vector = -1;
}
}
domain_shutdown(d, reason);
}
-void hvm_inject_trap(const struct hvm_trap *trap)
+void hvm_inject_event(const struct x86_event *event)
{
struct vcpu *curr = current;
if ( nestedhvm_enabled(curr->domain) &&
!nestedhvm_vmswitch_in_progress(curr) &&
nestedhvm_vcpu_in_guestmode(curr) &&
- nhvm_vmcx_guest_intercepts_trap(
- curr, trap->vector, trap->error_code) )
+ nhvm_vmcx_guest_intercepts_event(
+ curr, event->vector, event->error_code) )
{
enum nestedhvm_vmexits nsret;
- nsret = nhvm_vcpu_vmexit_trap(curr, trap);
+ nsret = nhvm_vcpu_vmexit_event(curr, event);
switch ( nsret )
{
}
}
- hvm_funcs.inject_trap(trap);
-}
-
-void hvm_inject_hw_exception(unsigned int trapnr, int errcode)
-{
- struct hvm_trap trap = {
- .vector = trapnr,
- .type = X86_EVENTTYPE_HW_EXCEPTION,
- .error_code = errcode };
- hvm_inject_trap(&trap);
-}
-
-void hvm_inject_page_fault(int errcode, unsigned long cr2)
-{
- struct hvm_trap trap = {
- .vector = TRAP_page_fault,
- .type = X86_EVENTTYPE_HW_EXCEPTION,
- .error_code = errcode,
- .cr2 = cr2 };
- hvm_inject_trap(&trap);
+ hvm_funcs.inject_event(event);
}
int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
break;
case X86EMUL_EXCEPTION:
if ( ctxt.exn_pending )
- hvm_inject_trap(&ctxt.trap);
+ hvm_inject_event(&ctxt.trap);
/* fall through */
default:
hvm_emulate_writeback(&ctxt);
return 0;
case X86EMUL_EXCEPTION:
if ( ctxt.exn_pending )
- hvm_inject_trap(&ctxt.trap);
+ hvm_inject_event(&ctxt.trap);
break;
default:
break;
}
int
-nsvm_vcpu_vmexit_trap(struct vcpu *v, const struct hvm_trap *trap)
+nsvm_vcpu_vmexit_event(struct vcpu *v, const struct x86_event *trap)
{
ASSERT(vcpu_nestedhvm(v).nv_vvmcx != NULL);
}
bool_t
-nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr, int errcode)
+nsvm_vmcb_guest_intercepts_event(
+ struct vcpu *v, unsigned int vector, int errcode)
{
return nsvm_vmcb_guest_intercepts_exitcode(v,
- guest_cpu_user_regs(), VMEXIT_EXCEPTION_DE + trapnr);
+ guest_cpu_user_regs(), VMEXIT_EXCEPTION_DE + vector);
}
static int
passive_domain_destroy(v);
}
-static void svm_inject_trap(const struct hvm_trap *trap)
+static void svm_inject_event(const struct x86_event *event)
{
struct vcpu *curr = current;
struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
- eventinj_t event = vmcb->eventinj;
- struct hvm_trap _trap = *trap;
+ eventinj_t eventinj = vmcb->eventinj;
+ struct x86_event _event = *event;
const struct cpu_user_regs *regs = guest_cpu_user_regs();
- switch ( _trap.vector )
+ switch ( _event.vector )
{
case TRAP_debug:
if ( regs->eflags & X86_EFLAGS_TF )
}
}
- if ( unlikely(event.fields.v) &&
- (event.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
+ if ( unlikely(eventinj.fields.v) &&
+ (eventinj.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
{
- _trap.vector = hvm_combine_hw_exceptions(
- event.fields.vector, _trap.vector);
- if ( _trap.vector == TRAP_double_fault )
- _trap.error_code = 0;
+ _event.vector = hvm_combine_hw_exceptions(
+ eventinj.fields.vector, _event.vector);
+ if ( _event.vector == TRAP_double_fault )
+ _event.error_code = 0;
}
- event.bytes = 0;
- event.fields.v = 1;
- event.fields.vector = _trap.vector;
+ eventinj.bytes = 0;
+ eventinj.fields.v = 1;
+ eventinj.fields.vector = _event.vector;
/* Refer to AMD Vol 2: System Programming, 15.20 Event Injection. */
- switch ( _trap.type )
+ switch ( _event.type )
{
case X86_EVENTTYPE_SW_INTERRUPT: /* int $n */
/*
* moved eip forward if appropriate.
*/
if ( cpu_has_svm_nrips )
- vmcb->nextrip = regs->eip + _trap.insn_len;
- event.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
+ vmcb->nextrip = regs->eip + _event.insn_len;
+ eventinj.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
break;
case X86_EVENTTYPE_PRI_SW_EXCEPTION: /* icebp */
*/
if ( cpu_has_svm_nrips )
vmcb->nextrip = regs->eip;
- event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+ eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
break;
case X86_EVENTTYPE_SW_EXCEPTION: /* int3, into */
* the correct faulting eip should a fault occur.
*/
if ( cpu_has_svm_nrips )
- vmcb->nextrip = regs->eip + _trap.insn_len;
- event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+ vmcb->nextrip = regs->eip + _event.insn_len;
+ eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
break;
default:
- event.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
- event.fields.ev = (_trap.error_code != HVM_DELIVER_NO_ERROR_CODE);
- event.fields.errorcode = _trap.error_code;
+ eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+ eventinj.fields.ev = (_event.error_code != HVM_DELIVER_NO_ERROR_CODE);
+ eventinj.fields.errorcode = _event.error_code;
break;
}
- vmcb->eventinj = event;
+ vmcb->eventinj = eventinj;
- if ( _trap.vector == TRAP_page_fault )
+ if ( _event.vector == TRAP_page_fault )
{
- curr->arch.hvm_vcpu.guest_cr[2] = _trap.cr2;
- vmcb_set_cr2(vmcb, _trap.cr2);
- HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code, TRC_PAR_LONG(_trap.cr2));
+ curr->arch.hvm_vcpu.guest_cr[2] = _event.cr2;
+ vmcb_set_cr2(vmcb, _event.cr2);
+ HVMTRACE_LONG_2D(PF_INJECT, _event.error_code, TRC_PAR_LONG(_event.cr2));
}
else
{
- HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code);
+ HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code);
}
}
.set_guest_pat = svm_set_guest_pat,
.get_guest_pat = svm_get_guest_pat,
.set_tsc_offset = svm_set_tsc_offset,
- .inject_trap = svm_inject_trap,
+ .inject_event = svm_inject_event,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
.invlpg = svm_invlpg,
.nhvm_vcpu_initialise = nsvm_vcpu_initialise,
.nhvm_vcpu_destroy = nsvm_vcpu_destroy,
.nhvm_vcpu_reset = nsvm_vcpu_reset,
- .nhvm_vcpu_vmexit_trap = nsvm_vcpu_vmexit_trap,
+ .nhvm_vcpu_vmexit_event = nsvm_vcpu_vmexit_event,
.nhvm_vcpu_p2m_base = nsvm_vcpu_hostcr3,
- .nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap,
+ .nhvm_vmcx_guest_intercepts_event = nsvm_vmcb_guest_intercepts_event,
.nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
.nhvm_intr_blocked = nsvm_intr_blocked,
.nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m,
nvmx->intr.intr_info, nvmx->intr.error_code);
}
-static int nvmx_vmexit_trap(struct vcpu *v, const struct hvm_trap *trap)
+static int nvmx_vmexit_event(struct vcpu *v, const struct x86_event *event)
{
- nvmx_enqueue_n2_exceptions(v, trap->vector, trap->error_code,
+ nvmx_enqueue_n2_exceptions(v, event->vector, event->error_code,
hvm_intsrc_none);
return NESTEDHVM_VMEXIT_DONE;
}
* - #DB is X86_EVENTTYPE_HW_EXCEPTION, except when generated by
* opcode 0xf1 (which is X86_EVENTTYPE_PRI_SW_EXCEPTION)
*/
-static void vmx_inject_trap(const struct hvm_trap *trap)
+static void vmx_inject_event(const struct x86_event *event)
{
unsigned long intr_info;
struct vcpu *curr = current;
- struct hvm_trap _trap = *trap;
+ struct x86_event _event = *event;
- switch ( _trap.vector | -(_trap.type == X86_EVENTTYPE_SW_INTERRUPT) )
+ switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) )
{
case TRAP_debug:
if ( guest_cpu_user_regs()->eflags & X86_EFLAGS_TF )
write_debugreg(6, read_debugreg(6) | DR_STEP);
}
if ( !nestedhvm_vcpu_in_guestmode(curr) ||
- !nvmx_intercepts_exception(curr, TRAP_debug, _trap.error_code) )
+ !nvmx_intercepts_exception(curr, TRAP_debug, _event.error_code) )
{
unsigned long val;
break;
case TRAP_page_fault:
- ASSERT(_trap.type == X86_EVENTTYPE_HW_EXCEPTION);
- curr->arch.hvm_vcpu.guest_cr[2] = _trap.cr2;
+ ASSERT(_event.type == X86_EVENTTYPE_HW_EXCEPTION);
+ curr->arch.hvm_vcpu.guest_cr[2] = _event.cr2;
break;
}
(MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) ==
X86_EVENTTYPE_HW_EXCEPTION) )
{
- _trap.vector = hvm_combine_hw_exceptions(
- (uint8_t)intr_info, _trap.vector);
- if ( _trap.vector == TRAP_double_fault )
- _trap.error_code = 0;
+ _event.vector = hvm_combine_hw_exceptions(
+ (uint8_t)intr_info, _event.vector);
+ if ( _event.vector == TRAP_double_fault )
+ _event.error_code = 0;
}
- if ( _trap.type >= X86_EVENTTYPE_SW_INTERRUPT )
- __vmwrite(VM_ENTRY_INSTRUCTION_LEN, _trap.insn_len);
+ if ( _event.type >= X86_EVENTTYPE_SW_INTERRUPT )
+ __vmwrite(VM_ENTRY_INSTRUCTION_LEN, _event.insn_len);
if ( nestedhvm_vcpu_in_guestmode(curr) &&
- nvmx_intercepts_exception(curr, _trap.vector, _trap.error_code) )
+ nvmx_intercepts_exception(curr, _event.vector, _event.error_code) )
{
nvmx_enqueue_n2_exceptions (curr,
INTR_INFO_VALID_MASK |
- MASK_INSR(_trap.type, INTR_INFO_INTR_TYPE_MASK) |
- MASK_INSR(_trap.vector, INTR_INFO_VECTOR_MASK),
- _trap.error_code, hvm_intsrc_none);
+ MASK_INSR(_event.type, INTR_INFO_INTR_TYPE_MASK) |
+ MASK_INSR(_event.vector, INTR_INFO_VECTOR_MASK),
+ _event.error_code, hvm_intsrc_none);
return;
}
else
- __vmx_inject_exception(_trap.vector, _trap.type, _trap.error_code);
+ __vmx_inject_exception(_event.vector, _event.type, _event.error_code);
- if ( (_trap.vector == TRAP_page_fault) &&
- (_trap.type == X86_EVENTTYPE_HW_EXCEPTION) )
- HVMTRACE_LONG_2D(PF_INJECT, _trap.error_code,
+ if ( (_event.vector == TRAP_page_fault) &&
+ (_event.type == X86_EVENTTYPE_HW_EXCEPTION) )
+ HVMTRACE_LONG_2D(PF_INJECT, _event.error_code,
TRC_PAR_LONG(curr->arch.hvm_vcpu.guest_cr[2]));
else
- HVMTRACE_2D(INJ_EXC, _trap.vector, _trap.error_code);
+ HVMTRACE_2D(INJ_EXC, _event.vector, _event.error_code);
}
static int vmx_event_pending(struct vcpu *v)
.set_guest_pat = vmx_set_guest_pat,
.get_guest_pat = vmx_get_guest_pat,
.set_tsc_offset = vmx_set_tsc_offset,
- .inject_trap = vmx_inject_trap,
+ .inject_event = vmx_inject_event,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
.invlpg = vmx_invlpg,
.nhvm_vcpu_reset = nvmx_vcpu_reset,
.nhvm_vcpu_p2m_base = nvmx_vcpu_eptp_base,
.nhvm_vmcx_hap_enabled = nvmx_ept_enabled,
- .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
- .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
+ .nhvm_vmcx_guest_intercepts_event = nvmx_intercepts_exception,
+ .nhvm_vcpu_vmexit_event = nvmx_vmexit_event,
.nhvm_intr_blocked = nvmx_intr_blocked,
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
*/
static void vmx_propagate_intr(unsigned long intr)
{
- struct hvm_trap trap = {
+ struct x86_event event = {
.vector = MASK_EXTR(intr, INTR_INFO_VECTOR_MASK),
.type = MASK_EXTR(intr, INTR_INFO_INTR_TYPE_MASK),
};
if ( intr & INTR_INFO_DELIVER_CODE_MASK )
{
__vmread(VM_EXIT_INTR_ERROR_CODE, &tmp);
- trap.error_code = tmp;
+ event.error_code = tmp;
}
else
- trap.error_code = HVM_DELIVER_NO_ERROR_CODE;
+ event.error_code = HVM_DELIVER_NO_ERROR_CODE;
- if ( trap.type >= X86_EVENTTYPE_SW_INTERRUPT )
+ if ( event.type >= X86_EVENTTYPE_SW_INTERRUPT )
{
__vmread(VM_EXIT_INSTRUCTION_LEN, &tmp);
- trap.insn_len = tmp;
+ event.insn_len = tmp;
}
else
- trap.insn_len = 0;
+ event.insn_len = 0;
- hvm_inject_trap(&trap);
+ hvm_inject_event(&event);
}
static void vmx_idtv_reinject(unsigned long idtv_info)
regs->eflags = eflags;
}
-bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap,
- int error_code)
+bool_t nvmx_intercepts_exception(
+ struct vcpu *v, unsigned int vector, int error_code)
{
u32 exception_bitmap, pfec_match=0, pfec_mask=0;
int r;
- ASSERT ( trap < 32 );
+ ASSERT(vector < 32);
exception_bitmap = get_vvmcs(v, EXCEPTION_BITMAP);
- r = exception_bitmap & (1 << trap) ? 1: 0;
+ r = exception_bitmap & (1 << vector) ? 1: 0;
- if ( trap == TRAP_page_fault ) {
+ if ( vector == TRAP_page_fault )
+ {
pfec_match = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MATCH);
pfec_mask = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MASK);
if ( (error_code & pfec_mask) != pfec_match )
BUILD_BUG_ON(x86_seg_ds != 3);
BUILD_BUG_ON(x86_seg_fs != 4);
BUILD_BUG_ON(x86_seg_gs != 5);
+
+ /*
+ * Check X86_EVENTTYPE_* against VMCB EVENTINJ and VMCS INTR_INFO type
+ * fields.
+ */
+ BUILD_BUG_ON(X86_EVENTTYPE_EXT_INTR != 0);
+ BUILD_BUG_ON(X86_EVENTTYPE_NMI != 2);
+ BUILD_BUG_ON(X86_EVENTTYPE_HW_EXCEPTION != 3);
+ BUILD_BUG_ON(X86_EVENTTYPE_SW_INTERRUPT != 4);
+ BUILD_BUG_ON(X86_EVENTTYPE_PRI_SW_EXCEPTION != 5);
+ BUILD_BUG_ON(X86_EVENTTYPE_SW_EXCEPTION != 6);
}
#ifdef __XEN__
x86_swint_emulate_all, /* Help needed with all software events */
};
+/*
+ * x86 event types. This enumeration is valid for:
+ * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
+ * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
+ */
+enum x86_event_type {
+ X86_EVENTTYPE_EXT_INTR, /* External interrupt */
+ X86_EVENTTYPE_NMI = 2, /* NMI */
+ X86_EVENTTYPE_HW_EXCEPTION, /* Hardware exception */
+ X86_EVENTTYPE_SW_INTERRUPT, /* Software interrupt (CD nn) */
+ X86_EVENTTYPE_PRI_SW_EXCEPTION, /* ICEBP (F1) */
+ X86_EVENTTYPE_SW_EXCEPTION, /* INT3 (CC), INTO (CE) */
+};
+
+struct x86_event {
+ int16_t vector;
+ uint8_t type; /* X86_EVENTTYPE_* */
+ uint8_t insn_len; /* Instruction length */
+ uint32_t error_code; /* HVM_DELIVER_NO_ERROR_CODE if n/a */
+ unsigned long cr2; /* Only for TRAP_page_fault h/w exception */
+};
+
/*
* Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
* segment descriptor. It happens to match the format of an AMD SVM VMCB.
unsigned long seg_reg_dirty;
bool_t exn_pending;
- struct hvm_trap trap;
+ struct x86_event trap;
uint32_t intr_shadow;
#define HVM_HAP_SUPERPAGE_2MB 0x00000001
#define HVM_HAP_SUPERPAGE_1GB 0x00000002
-struct hvm_trap {
- int16_t vector;
- uint8_t type; /* X86_EVENTTYPE_* */
- uint8_t insn_len; /* Instruction length */
- uint32_t error_code; /* HVM_DELIVER_NO_ERROR_CODE if n/a */
- unsigned long cr2; /* Only for TRAP_page_fault h/w exception */
-};
-
/*
* The hardware virtual machine (HVM) interface abstracts away from the
* x86/x86_64 CPU virtualization assist specifics. Currently this interface
void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
- void (*inject_trap)(const struct hvm_trap *trap);
+ void (*inject_event)(const struct x86_event *event);
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*nhvm_vcpu_initialise)(struct vcpu *v);
void (*nhvm_vcpu_destroy)(struct vcpu *v);
int (*nhvm_vcpu_reset)(struct vcpu *v);
- int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, const struct hvm_trap *trap);
+ int (*nhvm_vcpu_vmexit_event)(struct vcpu *v, const struct x86_event *event);
uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v);
- bool_t (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v,
- unsigned int trapnr,
- int errcode);
+ bool_t (*nhvm_vmcx_guest_intercepts_event)(
+ struct vcpu *v, unsigned int vector, int errcode);
bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);
-void hvm_inject_trap(const struct hvm_trap *trap);
-void hvm_inject_hw_exception(unsigned int trapnr, int errcode);
-void hvm_inject_page_fault(int errcode, unsigned long cr2);
+void hvm_inject_event(const struct x86_event *event);
+
+static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
+{
+ struct x86_event event = {
+ .vector = vector,
+ .type = X86_EVENTTYPE_HW_EXCEPTION,
+ .error_code = errcode,
+ };
+
+ hvm_inject_event(&event);
+}
+
+static inline void hvm_inject_page_fault(int errcode, unsigned long cr2)
+{
+ struct x86_event event = {
+ .vector = TRAP_page_fault,
+ .type = X86_EVENTTYPE_HW_EXCEPTION,
+ .error_code = errcode,
+ .cr2 = cr2,
+ };
+
+ hvm_inject_event(&event);
+}
static inline int hvm_event_pending(struct vcpu *v)
{
(1U << TRAP_alignment_check) | \
(1U << TRAP_machine_check))
-/*
- * x86 event types. This enumeration is valid for:
- * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
- * AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
- */
-#define X86_EVENTTYPE_EXT_INTR 0 /* external interrupt */
-#define X86_EVENTTYPE_NMI 2 /* NMI */
-#define X86_EVENTTYPE_HW_EXCEPTION 3 /* hardware exception */
-#define X86_EVENTTYPE_SW_INTERRUPT 4 /* software interrupt (CD nn) */
-#define X86_EVENTTYPE_PRI_SW_EXCEPTION 5 /* ICEBP (F1) */
-#define X86_EVENTTYPE_SW_EXCEPTION 6 /* INT3 (CC), INTO (CE) */
-
int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);
uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
/* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
* 'trapnr' exception.
*/
-static inline int nhvm_vcpu_vmexit_trap(struct vcpu *v,
- const struct hvm_trap *trap)
+static inline int nhvm_vcpu_vmexit_event(
+ struct vcpu *v, const struct x86_event *event)
{
- return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trap);
+ return hvm_funcs.nhvm_vcpu_vmexit_event(v, event);
}
/* returns l1 guest's cr3 that points to the page table used to
}
/* returns true, when l1 guest intercepts the specified trap */
-static inline bool_t nhvm_vmcx_guest_intercepts_trap(struct vcpu *v,
- unsigned int trap,
- int errcode)
+static inline bool_t nhvm_vmcx_guest_intercepts_event(
+ struct vcpu *v, unsigned int vector, int errcode)
{
- return hvm_funcs.nhvm_vmcx_guest_intercepts_trap(v, trap, errcode);
+ return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode);
}
/* returns true when l1 guest wants to use hap to run l2 guest */
int nsvm_vcpu_initialise(struct vcpu *v);
int nsvm_vcpu_reset(struct vcpu *v);
int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs);
-int nsvm_vcpu_vmexit_trap(struct vcpu *v, const struct hvm_trap *trap);
+int nsvm_vcpu_vmexit_event(struct vcpu *v, const struct x86_event *event);
uint64_t nsvm_vcpu_hostcr3(struct vcpu *v);
-bool_t nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr,
- int errcode);
+bool_t nsvm_vmcb_guest_intercepts_event(
+ struct vcpu *v, unsigned int vector, int errcode);
bool_t nsvm_vmcb_hap_enabled(struct vcpu *v);
enum hvm_intblk nsvm_intr_blocked(struct vcpu *v);
void *fpu_exception_callback_arg;
/* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
- struct hvm_trap inject_trap;
+ struct x86_event inject_trap;
struct viridian_vcpu viridian;
};
int nvmx_vcpu_reset(struct vcpu *v);
uint64_t nvmx_vcpu_eptp_base(struct vcpu *v);
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
-bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap,
- int error_code);
+bool_t nvmx_intercepts_exception(
+ struct vcpu *v, unsigned int vector, int error_code);
void nvmx_domain_relinquish_resources(struct domain *d);
bool_t nvmx_ept_enabled(struct vcpu *v);