bool enable);
int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
bool enable);
+int xc_monitor_vmexit(xc_interface *xch, uint32_t domain_id, bool enable,
+ bool sync);
/**
* This function enables / disables emulation for each REP for a
* REP-compatible instruction.
return do_domctl(xch, &domctl);
}
+int xc_monitor_vmexit(xc_interface *xch, uint32_t domain_id, bool enable,
+ bool sync)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_monitor_op;
+ domctl.domain = domain_id;
+ domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+ : XEN_DOMCTL_MONITOR_OP_DISABLE;
+ domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_VMEXIT;
+ domctl.u.monitor_op.vmexit.sync = sync;
+
+ return do_domctl(xch, &domctl);
+}
+
/*
* Local variables:
* mode: C
return monitor_traps(curr, true, &req) >= 0;
}
+int hvm_monitor_vmexit(unsigned long exit_reason,
+ unsigned long exit_qualification)
+{
+ /*
+ * !rc continue normally
+ * rc paused waiting for response, work here is done
+ */
+ struct vcpu *curr = current;
+ struct arch_domain *ad = &curr->domain->arch;
+ vm_event_request_t req = {};
+
+ if ( !ad->monitor.vmexit_enabled )
+ return 0;
+
+ req.reason = VM_EVENT_REASON_VMEXIT;
+ req.u.vmexit.reason = exit_reason;
+ req.u.vmexit.qualification = exit_qualification;
+
+ set_npt_base(curr, &req);
+
+ if ( ad->monitor.vmexit_sync )
+ hvm_maybe_deassert_evtchn_irq();
+
+ return monitor_traps(curr, !!ad->monitor.vmexit_sync, &req);
+}
+
/*
* Local variables:
* mode: C
}
__vmread(VM_EXIT_REASON, &exit_reason);
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
if ( hvm_long_mode_active(v) )
HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, exit_reason,
/* Now enable interrupts so it's safe to take locks. */
local_irq_enable();
+ if ( unlikely(hvm_monitor_vmexit(exit_reason, exit_qualification)) )
+ return;
+
/*
* If the guest has the ability to switch EPTP without an exit,
* figure out whether it has done so and update the altp2m data.
*/
unsigned int inguest_pagefault_disabled : 1;
unsigned int control_register_values : 1;
+ unsigned int vmexit_enabled : 1;
+ unsigned int vmexit_sync : 1;
struct monitor_msr_bitmap *msr_bitmap;
uint64_t write_ctrlreg_mask[4];
} monitor;
bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
uint16_t kind);
+int hvm_monitor_vmexit(unsigned long exit_reason,
+ unsigned long exit_qualification);
#endif /* __ASM_X86_HVM_MONITOR_H__ */
(1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
(1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
(1U << XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT));
+ (1U << XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_VMEXIT));
if ( hvm_is_singlestep_supported() )
capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
break;
}
+ case XEN_DOMCTL_MONITOR_EVENT_VMEXIT:
+ {
+ bool old_status = ad->monitor.vmexit_enabled;
+
+ if ( unlikely(old_status == requested_status) )
+ return -EEXIST;
+
+ domain_pause(d);
+ ad->monitor.vmexit_enabled = requested_status;
+ ad->monitor.vmexit_sync = mop->u.vmexit.sync;
+ domain_unpause(d);
+ break;
+ }
+
default:
/*
* Should not be reached unless arch_monitor_get_capabilities() is
#include <xen/sched.h>
#include <xen/mem_access.h>
#include <asm/vm_event.h>
+#include <asm/mem_sharing.h>
/* Implicitly serialized by the domctl lock. */
int vm_event_init_domain(struct domain *d)
#define XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED 10
/* Enabled by default */
#define XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT 11
+#define XEN_DOMCTL_MONITOR_EVENT_VMEXIT 12
struct xen_domctl_monitor_op {
uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
/* Pause vCPU until response */
uint8_t sync;
} debug_exception;
+
+ struct {
+ /* Send event and don't process vmexit */
+ uint8_t sync;
+ } vmexit;
} u;
};
#define VM_EVENT_REASON_DESCRIPTOR_ACCESS 13
/* Current instruction is not implemented by the emulator */
#define VM_EVENT_REASON_EMUL_UNIMPLEMENTED 14
+/* VMEXIT */
+#define VM_EVENT_REASON_VMEXIT 15
/* Supported values for the vm_event_write_ctrlreg index. */
#define VM_EVENT_X86_CR0 0
uint8_t data[16]; /* Has to be completely filled */
};
+struct vm_event_vmexit {
+ uint64_t reason;
+ uint64_t qualification;
+};
+
typedef struct vm_event_st {
uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
uint32_t flags; /* VM_EVENT_FLAG_* */
struct vm_event_debug software_breakpoint;
struct vm_event_debug debug_exception;
struct vm_event_cpuid cpuid;
+ struct vm_event_vmexit vmexit;
union {
struct vm_event_interrupt_x86 x86;
} interrupt;