From 5ab6654cf0f2a90b23d5ab714fa49ae038d21e17 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 12 Nov 2015 17:03:20 +0100 Subject: [PATCH] x86/event: correct debug event generation RIP is not a linear address, and hence should not on its own be subject to GVA -> GFN translation. Once at it, move all of the (perhaps expensive) operations in the two functions into their main if()'s body, and improve the error code passed to the translation function. Signed-off-by: Jan Beulich Acked-by: Razvan Cojocaru Reviewed-by: Andrew Cooper --- xen/arch/x86/hvm/event.c | 51 ++++++++++++++++++++++++--------- xen/include/asm-x86/hvm/event.h | 4 +-- 2 files changed, 39 insertions(+), 16 deletions(-) diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c index 4097af02ae..73c8f1430b 100644 --- a/xen/arch/x86/hvm/event.c +++ b/xen/arch/x86/hvm/event.c @@ -144,36 +144,59 @@ void hvm_event_guest_request(void) } } -int hvm_event_int3(unsigned long gla) +int hvm_event_int3(unsigned long rip) { int rc = 0; - uint32_t pfec = PFEC_page_present; struct vcpu *curr = current; - vm_event_request_t req = { - .reason = VM_EVENT_REASON_SOFTWARE_BREAKPOINT, - .vcpu_id = curr->vcpu_id, - .u.software_breakpoint.gfn = paging_gva_to_gfn(curr, gla, &pfec) - }; if ( curr->domain->arch.monitor.software_breakpoint_enabled ) + { + struct segment_register sreg; + uint32_t pfec = PFEC_page_present | PFEC_insn_fetch; + vm_event_request_t req = { + .reason = VM_EVENT_REASON_SOFTWARE_BREAKPOINT, + .vcpu_id = curr->vcpu_id, + }; + + hvm_get_segment_register(curr, x86_seg_ss, &sreg); + if ( sreg.attr.fields.dpl == 3 ) + pfec |= PFEC_user_mode; + + hvm_get_segment_register(curr, x86_seg_cs, &sreg); + req.u.software_breakpoint.gfn = paging_gva_to_gfn(curr, + sreg.base + rip, + &pfec); + rc = hvm_event_traps(1, &req); + } return rc; } -int hvm_event_single_step(unsigned long gla) +int hvm_event_single_step(unsigned long rip) { int rc = 0; - uint32_t pfec = PFEC_page_present; struct vcpu *curr = current; - vm_event_request_t req = { - .reason = VM_EVENT_REASON_SINGLESTEP, - .vcpu_id = curr->vcpu_id, - .u.singlestep.gfn = paging_gva_to_gfn(curr, gla, &pfec) - }; if ( curr->domain->arch.monitor.singlestep_enabled ) + { + struct segment_register sreg; + uint32_t pfec = PFEC_page_present | PFEC_insn_fetch; + vm_event_request_t req = { + .reason = VM_EVENT_REASON_SINGLESTEP, + .vcpu_id = curr->vcpu_id, + }; + + hvm_get_segment_register(curr, x86_seg_ss, &sreg); + if ( sreg.attr.fields.dpl == 3 ) + pfec |= PFEC_user_mode; + + hvm_get_segment_register(curr, x86_seg_cs, &sreg); + req.u.singlestep.gfn = paging_gva_to_gfn(curr, sreg.base + rip, + &pfec); + rc = hvm_event_traps(1, &req); + } return rc; } diff --git a/xen/include/asm-x86/hvm/event.h b/xen/include/asm-x86/hvm/event.h index e07f3291f1..11eb1feb28 100644 --- a/xen/include/asm-x86/hvm/event.h +++ b/xen/include/asm-x86/hvm/event.h @@ -28,8 +28,8 @@ bool_t hvm_event_cr(unsigned int index, unsigned long value, hvm_event_cr(VM_EVENT_X86_##what, new, old) void hvm_event_msr(unsigned int msr, uint64_t value); /* Called for current VCPU: returns -1 if no listener */ -int hvm_event_int3(unsigned long gla); -int hvm_event_single_step(unsigned long gla); +int hvm_event_int3(unsigned long rip); +int hvm_event_single_step(unsigned long rip); void hvm_event_guest_request(void); #endif /* __ASM_X86_HVM_EVENT_H__ */ -- 2.39.5