]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
x86/HVM: move vendor independent CPU save/restore logic to shared code
authorJan Beulich <jbeulich@suse.com>
Tue, 9 Oct 2018 14:25:35 +0000 (16:25 +0200)
committerJan Beulich <jbeulich@suse.com>
Tue, 9 Oct 2018 14:25:35 +0000 (16:25 +0200)
A few pieces of the handling here are (no longer?) vendor specific, and
hence there's no point in replicating the code. Zero the full structure
before calling the save hook, eliminating the need for the hook
functions to zero individual fields.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/vm_event.c

index 6c1301df429371bd395cc48cc638d2cb4b716f54..9c105ff056e0d56047c414f5ac088a17e30f596e 100644 (file)
@@ -787,12 +787,17 @@ static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
         .r13 = v->arch.user_regs.r13,
         .r14 = v->arch.user_regs.r14,
         .r15 = v->arch.user_regs.r15,
+        .cr0 = v->arch.hvm.guest_cr[0],
+        .cr2 = v->arch.hvm.guest_cr[2],
+        .cr3 = v->arch.hvm.guest_cr[3],
+        .cr4 = v->arch.hvm.guest_cr[4],
         .dr0 = v->arch.debugreg[0],
         .dr1 = v->arch.debugreg[1],
         .dr2 = v->arch.debugreg[2],
         .dr3 = v->arch.debugreg[3],
         .dr6 = v->arch.debugreg[6],
         .dr7 = v->arch.debugreg[7],
+        .msr_efer = v->arch.hvm.guest_efer,
     };
 
     /*
@@ -1023,6 +1028,9 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
     if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
         return -EINVAL;
 
+    v->arch.hvm.guest_cr[2] = ctxt.cr2;
+    hvm_update_guest_cr(v, 2);
+
     if ( hvm_funcs.tsc_scaling.setup )
         hvm_funcs.tsc_scaling.setup(v);
 
index c98cfc2c138e4097fa4a8d44b4a50f1a2dc9b138..fa18cc07fd5b3f7437598ffe98b19a9eaf438469 100644 (file)
@@ -272,17 +272,10 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    c->cr0 = v->arch.hvm.guest_cr[0];
-    c->cr2 = v->arch.hvm.guest_cr[2];
-    c->cr3 = v->arch.hvm.guest_cr[3];
-    c->cr4 = v->arch.hvm.guest_cr[4];
-
     c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
     c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
     c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
 
-    c->pending_event = 0;
-    c->error_code = 0;
     if ( vmcb->eventinj.fields.v &&
          hvm_event_needs_reinjection(vmcb->eventinj.fields.type,
                                      vmcb->eventinj.fields.vector) )
@@ -341,11 +334,9 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
     }
 
     v->arch.hvm.guest_cr[0] = c->cr0 | X86_CR0_ET;
-    v->arch.hvm.guest_cr[2] = c->cr2;
     v->arch.hvm.guest_cr[3] = c->cr3;
     v->arch.hvm.guest_cr[4] = c->cr4;
     svm_update_guest_cr(v, 0, 0);
-    svm_update_guest_cr(v, 2, 0);
     svm_update_guest_cr(v, 4, 0);
 
     /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
@@ -387,8 +378,6 @@ static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
     data->msr_star         = vmcb->star;
     data->msr_cstar        = vmcb->cstar;
     data->msr_syscall_mask = vmcb->sfmask;
-    data->msr_efer         = v->arch.hvm.guest_efer;
-    data->msr_flags        = 0;
 }
 
 
index bf90e22a9a7ea05cb487cb49401c3a1ed648fc01..c85aa62ce7dc727685cc55301388acd7b2f6f44f 100644 (file)
@@ -646,19 +646,10 @@ static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
 
     vmx_vmcs_enter(v);
 
-    c->cr0 = v->arch.hvm.guest_cr[0];
-    c->cr2 = v->arch.hvm.guest_cr[2];
-    c->cr3 = v->arch.hvm.guest_cr[3];
-    c->cr4 = v->arch.hvm.guest_cr[4];
-
-    c->msr_efer = v->arch.hvm.guest_efer;
-
     __vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
     __vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
     __vmread(GUEST_SYSENTER_EIP, &c->sysenter_eip);
 
-    c->pending_event = 0;
-    c->error_code = 0;
     __vmread(VM_ENTRY_INTR_INFO, &ev);
     if ( (ev & INTR_INFO_VALID_MASK) &&
          hvm_event_needs_reinjection(MASK_EXTR(ev, INTR_INFO_INTR_TYPE_MASK),
@@ -732,10 +723,8 @@ static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c)
 
     vmx_vmcs_enter(v);
 
-    v->arch.hvm.guest_cr[2] = c->cr2;
     v->arch.hvm.guest_cr[4] = c->cr4;
     vmx_update_guest_cr(v, 0, 0);
-    vmx_update_guest_cr(v, 2, 0);
     vmx_update_guest_cr(v, 4, 0);
 
     v->arch.hvm.guest_efer = c->msr_efer;
@@ -770,7 +759,6 @@ static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c)
 static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
     data->shadow_gs        = v->arch.hvm.vmx.shadow_gs;
-    data->msr_flags        = 0;
     data->msr_lstar        = v->arch.hvm.vmx.lstar;
     data->msr_star         = v->arch.hvm.vmx.star;
     data->msr_cstar        = v->arch.hvm.vmx.cstar;
index a2e470a65b1958c4ac8aa6ff686b63e0572b78c7..15de43c3e60f2c524a5669b198cc370aecd0af43 100644 (file)
@@ -127,7 +127,7 @@ void vm_event_fill_regs(vm_event_request_t *req)
 #ifdef CONFIG_HVM
     const struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct segment_register seg;
-    struct hvm_hw_cpu ctxt;
+    struct hvm_hw_cpu ctxt = {};
     struct vcpu *curr = current;
 
     ASSERT(is_hvm_vcpu(curr));
@@ -157,16 +157,16 @@ void vm_event_fill_regs(vm_event_request_t *req)
     req->data.regs.x86.rip    = regs->rip;
 
     req->data.regs.x86.dr7 = curr->arch.debugreg[7];
-    req->data.regs.x86.cr0 = ctxt.cr0;
-    req->data.regs.x86.cr2 = ctxt.cr2;
-    req->data.regs.x86.cr3 = ctxt.cr3;
-    req->data.regs.x86.cr4 = ctxt.cr4;
+    req->data.regs.x86.cr0 = curr->arch.hvm.guest_cr[0];
+    req->data.regs.x86.cr2 = curr->arch.hvm.guest_cr[2];
+    req->data.regs.x86.cr3 = curr->arch.hvm.guest_cr[3];
+    req->data.regs.x86.cr4 = curr->arch.hvm.guest_cr[4];
 
     req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
     req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
     req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
 
-    req->data.regs.x86.msr_efer = ctxt.msr_efer;
+    req->data.regs.x86.msr_efer = curr->arch.hvm.guest_efer;
     req->data.regs.x86.msr_star = ctxt.msr_star;
     req->data.regs.x86.msr_lstar = ctxt.msr_lstar;