]> xenbits.xensource.com Git - xen.git/commitdiff
x86/vm_event: consolidate hvm_event_fill_regs and p2m_vm_event_fill_regs
authorTamas K Lengyel <tlengyel@novetta.com>
Mon, 22 Feb 2016 16:24:15 +0000 (17:24 +0100)
committerJan Beulich <jbeulich@suse.com>
Mon, 22 Feb 2016 16:24:15 +0000 (17:24 +0100)
Currently the registers saved in the request depend on which type of event
is filling in the registers. In this patch we consolidate the two versions
of register filling function as to return a fix set of registers irrespective
of the underlying event.

Signed-off-by: Tamas K Lengyel <tlengyel@novetta.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
xen/arch/x86/hvm/event.c
xen/arch/x86/mm/p2m.c
xen/arch/x86/vm_event.c
xen/include/asm-x86/vm_event.h

index cb9c15205586125d41ce111b5fd2d8815e4d45ba..d0b7d90847255219a035f7aa406e41f6efe04244 100644 (file)
 #include <asm/hvm/event.h>
 #include <asm/monitor.h>
 #include <asm/altp2m.h>
+#include <asm/vm_event.h>
 #include <public/vm_event.h>
 
-static void hvm_event_fill_regs(vm_event_request_t *req)
-{
-    const struct cpu_user_regs *regs = guest_cpu_user_regs();
-    const struct vcpu *curr = current;
-
-    req->data.regs.x86.rax = regs->eax;
-    req->data.regs.x86.rcx = regs->ecx;
-    req->data.regs.x86.rdx = regs->edx;
-    req->data.regs.x86.rbx = regs->ebx;
-    req->data.regs.x86.rsp = regs->esp;
-    req->data.regs.x86.rbp = regs->ebp;
-    req->data.regs.x86.rsi = regs->esi;
-    req->data.regs.x86.rdi = regs->edi;
-
-    req->data.regs.x86.r8  = regs->r8;
-    req->data.regs.x86.r9  = regs->r9;
-    req->data.regs.x86.r10 = regs->r10;
-    req->data.regs.x86.r11 = regs->r11;
-    req->data.regs.x86.r12 = regs->r12;
-    req->data.regs.x86.r13 = regs->r13;
-    req->data.regs.x86.r14 = regs->r14;
-    req->data.regs.x86.r15 = regs->r15;
-
-    req->data.regs.x86.rflags = regs->eflags;
-    req->data.regs.x86.rip    = regs->eip;
-
-    req->data.regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
-    req->data.regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
-    req->data.regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
-    req->data.regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
-}
-
 static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
 {
     int rc;
@@ -92,7 +61,7 @@ static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
         req->altp2m_idx = vcpu_altp2m(curr).p2midx;
     }
 
-    hvm_event_fill_regs(req);
+    vm_event_fill_regs(req);
     vm_event_put_request(currd, &currd->vm_event->monitor, req);
 
     return 1;
index a25b18b4f3c94c1e9ee6a805d88b0d7419d087fc..b3fce1b7abf07d419215d8264ece6a538b9ac387 100644 (file)
@@ -1541,61 +1541,6 @@ void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp)
     }
 }
 
-static void p2m_vm_event_fill_regs(vm_event_request_t *req)
-{
-    const struct cpu_user_regs *regs = guest_cpu_user_regs();
-    struct segment_register seg;
-    struct hvm_hw_cpu ctxt;
-    struct vcpu *curr = current;
-
-    /* Architecture-specific vmcs/vmcb bits */
-    hvm_funcs.save_cpu_ctxt(curr, &ctxt);
-
-    req->data.regs.x86.rax = regs->eax;
-    req->data.regs.x86.rcx = regs->ecx;
-    req->data.regs.x86.rdx = regs->edx;
-    req->data.regs.x86.rbx = regs->ebx;
-    req->data.regs.x86.rsp = regs->esp;
-    req->data.regs.x86.rbp = regs->ebp;
-    req->data.regs.x86.rsi = regs->esi;
-    req->data.regs.x86.rdi = regs->edi;
-
-    req->data.regs.x86.r8  = regs->r8;
-    req->data.regs.x86.r9  = regs->r9;
-    req->data.regs.x86.r10 = regs->r10;
-    req->data.regs.x86.r11 = regs->r11;
-    req->data.regs.x86.r12 = regs->r12;
-    req->data.regs.x86.r13 = regs->r13;
-    req->data.regs.x86.r14 = regs->r14;
-    req->data.regs.x86.r15 = regs->r15;
-
-    req->data.regs.x86.rflags = regs->eflags;
-    req->data.regs.x86.rip    = regs->eip;
-
-    req->data.regs.x86.dr7 = curr->arch.debugreg[7];
-    req->data.regs.x86.cr0 = ctxt.cr0;
-    req->data.regs.x86.cr2 = ctxt.cr2;
-    req->data.regs.x86.cr3 = ctxt.cr3;
-    req->data.regs.x86.cr4 = ctxt.cr4;
-
-    req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
-    req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
-    req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
-
-    req->data.regs.x86.msr_efer = ctxt.msr_efer;
-    req->data.regs.x86.msr_star = ctxt.msr_star;
-    req->data.regs.x86.msr_lstar = ctxt.msr_lstar;
-
-    hvm_get_segment_register(curr, x86_seg_fs, &seg);
-    req->data.regs.x86.fs_base = seg.base;
-
-    hvm_get_segment_register(curr, x86_seg_gs, &seg);
-    req->data.regs.x86.gs_base = seg.base;
-
-    hvm_get_segment_register(curr, x86_seg_cs, &seg);
-    req->data.regs.x86.cs_arbytes = seg.attr.bytes;
-}
-
 void p2m_mem_access_emulate_check(struct vcpu *v,
                                   const vm_event_response_t *rsp)
 {
@@ -1760,7 +1705,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
         req->vcpu_id = v->vcpu_id;
 
-        p2m_vm_event_fill_regs(req);
+        vm_event_fill_regs(req);
 
         if ( altp2m_active(v->domain) )
         {
index 08d678a9cf345044276f661e6079433139c7cb27..2a2abd71f0443519f671bd8441f6f69351f6b6cd 100644 (file)
@@ -122,6 +122,63 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
     v->arch.user_regs.eip = rsp->data.regs.x86.rip;
 }
 
+void vm_event_fill_regs(vm_event_request_t *req)
+{
+    const struct cpu_user_regs *regs = guest_cpu_user_regs();
+    struct segment_register seg;
+    struct hvm_hw_cpu ctxt;
+    struct vcpu *curr = current;
+
+    ASSERT(is_hvm_vcpu(curr));
+
+    /* Architecture-specific vmcs/vmcb bits */
+    hvm_funcs.save_cpu_ctxt(curr, &ctxt);
+
+    req->data.regs.x86.rax = regs->eax;
+    req->data.regs.x86.rcx = regs->ecx;
+    req->data.regs.x86.rdx = regs->edx;
+    req->data.regs.x86.rbx = regs->ebx;
+    req->data.regs.x86.rsp = regs->esp;
+    req->data.regs.x86.rbp = regs->ebp;
+    req->data.regs.x86.rsi = regs->esi;
+    req->data.regs.x86.rdi = regs->edi;
+
+    req->data.regs.x86.r8  = regs->r8;
+    req->data.regs.x86.r9  = regs->r9;
+    req->data.regs.x86.r10 = regs->r10;
+    req->data.regs.x86.r11 = regs->r11;
+    req->data.regs.x86.r12 = regs->r12;
+    req->data.regs.x86.r13 = regs->r13;
+    req->data.regs.x86.r14 = regs->r14;
+    req->data.regs.x86.r15 = regs->r15;
+
+    req->data.regs.x86.rflags = regs->eflags;
+    req->data.regs.x86.rip    = regs->eip;
+
+    req->data.regs.x86.dr7 = curr->arch.debugreg[7];
+    req->data.regs.x86.cr0 = ctxt.cr0;
+    req->data.regs.x86.cr2 = ctxt.cr2;
+    req->data.regs.x86.cr3 = ctxt.cr3;
+    req->data.regs.x86.cr4 = ctxt.cr4;
+
+    req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
+    req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
+    req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
+
+    req->data.regs.x86.msr_efer = ctxt.msr_efer;
+    req->data.regs.x86.msr_star = ctxt.msr_star;
+    req->data.regs.x86.msr_lstar = ctxt.msr_lstar;
+
+    hvm_get_segment_register(curr, x86_seg_fs, &seg);
+    req->data.regs.x86.fs_base = seg.base;
+
+    hvm_get_segment_register(curr, x86_seg_gs, &seg);
+    req->data.regs.x86.gs_base = seg.base;
+
+    hvm_get_segment_register(curr, x86_seg_cs, &seg);
+    req->data.regs.x86.cs_arbytes = seg.attr.bytes;
+}
+
 /*
  * Local variables:
  * mode: C
index fff8326444ced83311e5e21999fd5fa5b3cbf972..026f42e47753145200f497075eaba2e48724615a 100644 (file)
@@ -42,4 +42,6 @@ void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp);
 
 void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
 
+void vm_event_fill_regs(vm_event_request_t *req);
+
 #endif /* __ASM_X86_VM_EVENT_H__ */