v->arch.hvm.single_step = !v->arch.hvm.single_step;
}
+void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
+{
+ ASSERT(atomic_read(&v->pause_count));
+
+ if ( !hvm_is_singlestep_supported() )
+ return;
+
+ if ( p2midx >= MAX_ALTP2M )
+ return;
+
+ v->arch.hvm.single_step = true;
+ v->arch.hvm.fast_single_step.enabled = true;
+ v->arch.hvm.fast_single_step.p2midx = p2midx;
+}
+
/*
* Segment caches in VMCB/VMCS are inconsistent about which bits are checked,
* important, and preserved across vmentry/exit. Cook the values to make them
#include <asm/hvm/monitor.h>
#include <asm/altp2m.h>
#include <asm/monitor.h>
+#include <asm/p2m.h>
#include <asm/paging.h>
#include <asm/vm_event.h>
#include <public/vm_event.h>
case HVM_MONITOR_SINGLESTEP_BREAKPOINT:
if ( !ad->monitor.singlestep_enabled )
return 0;
+ if ( curr->arch.hvm.fast_single_step.enabled )
+ {
+ p2m_altp2m_check(curr, curr->arch.hvm.fast_single_step.p2midx);
+ curr->arch.hvm.single_step = false;
+ curr->arch.hvm.fast_single_step.enabled = false;
+ curr->arch.hvm.fast_single_step.p2midx = 0;
+ return 0;
+ }
req.reason = VM_EVENT_REASON_SINGLESTEP;
req.u.singlestep.gfn = gfn_of_rip(rip);
sync = true;
void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
vm_event_response_t *rsp)
{
- if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) )
+ if ( !(rsp->flags & (VM_EVENT_FLAG_TOGGLE_SINGLESTEP |
+ VM_EVENT_FLAG_FAST_SINGLESTEP)) )
return;
if ( !is_hvm_domain(d) )
ASSERT(atomic_read(&v->vm_event_pause_count));
- hvm_toggle_singlestep(v);
+ if ( rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
+ hvm_toggle_singlestep(v);
+ else
+ hvm_fast_singlestep(v, rsp->u.fast_singlestep.p2midx);
}
void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
/* Caller should pause vcpu before calling this function */
void hvm_toggle_singlestep(struct vcpu *v);
+void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);
int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
struct npfec npfec);
bool flag_dr_dirty;
bool debug_state_latch;
bool single_step;
+ struct {
+ bool enabled;
+ uint16_t p2midx;
+ } fast_single_step;
struct hvm_vcpu_asid n1asid;
* interrupt pending after resuming the VCPU.
*/
#define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
+/*
+ * Execute fast singlestepping on vm_event response.
+ * Requires the vCPU to be paused already (synchronous events only).
+ *
+ * On a response requires setting the p2midx field of fast_singlestep to which
+ * Xen will switch the vCPU to on the occurance of the first singlestep, after
+ * which singlestep gets automatically disabled.
+ */
+#define VM_EVENT_FLAG_FAST_SINGLESTEP (1 << 11)
/*
* Reasons for the vm event request
uint64_t gfn;
};
+struct vm_event_fast_singlestep {
+ uint16_t p2midx;
+};
+
struct vm_event_debug {
uint64_t gfn;
uint32_t insn_length;
struct vm_event_mov_to_msr mov_to_msr;
struct vm_event_desc_access desc_access;
struct vm_event_singlestep singlestep;
+ struct vm_event_fast_singlestep fast_singlestep;
struct vm_event_debug software_breakpoint;
struct vm_event_debug debug_exception;
struct vm_event_cpuid cpuid;