]> xenbits.xensource.com Git - xen.git/commitdiff
hvm: hvm_{load,store}_cpu_guest_regs() does not touch segment
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Wed, 19 Sep 2007 09:24:24 +0000 (10:24 +0100)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Wed, 19 Sep 2007 09:24:24 +0000 (10:24 +0100)
selectors. We have separate accessors for that now. It is now an
invariant that guest_cpu_user_regs()->{cs,ds,es,fs,gs,ss} are invalid
for an HVM guest.
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/domctl.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/platform.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/mm/shadow/multi.c
xen/arch/x86/oprofile/op_model_athlon.c
xen/arch/x86/x86_32/traps.c
xen/arch/x86/x86_64/traps.c
xen/include/asm-x86/hvm/hvm.h

index 24b579d466bad69c975d2c8e6aade1e7d919138b..f325a79b3c4ab5dad20d8747e41fcd07b38518e0 100644 (file)
@@ -555,18 +555,27 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
     if ( is_hvm_vcpu(v) )
     {
         if ( !is_pv_32on64_domain(v->domain) )
-            hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
+        {
+            hvm_store_cpu_guest_regs(v, &c.nat->user_regs);
+            memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
+            c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
+            c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
+            c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
+            c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
+        }
 #ifdef CONFIG_COMPAT
         else
         {
             struct cpu_user_regs user_regs;
-            typeof(c.nat->ctrlreg) ctrlreg;
             unsigned i;
 
-            hvm_store_cpu_guest_regs(v, &user_regs, ctrlreg);
+            hvm_store_cpu_guest_regs(v, &user_regs);
             XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
-            for ( i = 0; i < ARRAY_SIZE(c.cmp->ctrlreg); ++i )
-                c.cmp->ctrlreg[i] = ctrlreg[i];
+            memset(c.cmp->ctrlreg, 0, sizeof(c.cmp->ctrlreg));
+            c.cmp->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
+            c.cmp->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
+            c.cmp->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
+            c.cmp->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
         }
 #endif
     }
index 795c71a7275d021ec3fd859ecdc81a445c7c0336..ad930fb99b710acac55a3ac59eded22a6bdd674b 100644 (file)
@@ -973,7 +973,7 @@ void hvm_task_switch(
         goto out;
     }
 
-    hvm_store_cpu_guest_regs(v, regs, NULL);
+    hvm_store_cpu_guest_regs(v, regs);
 
     ptss = hvm_map(prev_tr.base, sizeof(tss));
     if ( ptss == NULL )
@@ -1322,7 +1322,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
 #endif
     case 4:
     case 2:
-        hvm_store_cpu_guest_regs(current, regs, NULL);
+        hvm_store_cpu_guest_regs(current, regs);
         if ( unlikely(ring_3(regs)) )
         {
     default:
index 3d69e9cca52507c826456db1907f0a196349f7ad..db66c9cbe43b25f80c5419f346bece675a23d18c 100644 (file)
@@ -1032,7 +1032,7 @@ void handle_mmio(unsigned long gpa)
 
     /* Copy current guest state into io instruction state structure. */
     memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
-    hvm_store_cpu_guest_regs(v, regs, NULL);
+    hvm_store_cpu_guest_regs(v, regs);
 
     df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
 
index 846f5074a8be99665e5202da6f8fa651d07c3ae3..b165c45dc35369d13b25a4af1222bd3675865bec 100644 (file)
@@ -109,27 +109,13 @@ static int svm_lme_is_set(struct vcpu *v)
 }
 
 static void svm_store_cpu_guest_regs(
-    struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
+    struct vcpu *v, struct cpu_user_regs *regs)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    if ( regs != NULL )
-    {
-        regs->ss     = vmcb->ss.sel;
-        regs->esp    = vmcb->rsp;
-        regs->eflags = vmcb->rflags;
-        regs->cs     = vmcb->cs.sel;
-        regs->eip    = vmcb->rip;
-    }
-
-    if ( crs != NULL )
-    {
-        /* Returning the guest's regs */
-        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
-        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
-        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
-        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
-    }
+    regs->esp    = vmcb->rsp;
+    regs->eflags = vmcb->rflags;
+    regs->eip    = vmcb->rip;
 }
 
 static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
@@ -702,7 +688,6 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
     {
     case x86_seg_cs:
         memcpy(&vmcb->cs, reg, sizeof(*reg));
-        guest_cpu_user_regs()->cs = reg->sel;
         break;
     case x86_seg_ds:
         memcpy(&vmcb->ds, reg, sizeof(*reg));
@@ -722,7 +707,7 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
         break;
     case x86_seg_ss:
         memcpy(&vmcb->ss, reg, sizeof(*reg));
-        guest_cpu_user_regs()->ss = reg->sel;
+        vmcb->cpl = vmcb->ss.attr.fields.dpl;
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
@@ -829,10 +814,8 @@ static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    vmcb->ss.sel   = regs->ss;
     vmcb->rsp      = regs->esp;   
     vmcb->rflags   = regs->eflags | 2UL;
-    vmcb->cs.sel   = regs->cs;
     vmcb->rip      = regs->eip;
 }
 
@@ -1518,7 +1501,7 @@ static void svm_io_instruction(struct vcpu *v)
 
     /* Copy current guest state into io instruction state structure. */
     memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
-    svm_store_cpu_guest_regs(v, regs, NULL);
+    svm_store_cpu_guest_regs(v, regs);
 
     info.bytes = vmcb->exitinfo1;
 
@@ -2292,7 +2275,7 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
 
     case VMEXIT_EXCEPTION_MC:
         HVMTRACE_0D(MCE, v);
-        svm_store_cpu_guest_regs(v, regs, NULL);
+        svm_store_cpu_guest_regs(v, regs);
         do_machine_check(regs);
         break;
 
index 3f6d7e0620911301aabcafc0f2801e98b1381888..eabb3691608add9f4d1cbb7cfa8e2b6d59173d35 100644 (file)
@@ -794,61 +794,25 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
 }
 
 static void vmx_store_cpu_guest_regs(
-    struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
+    struct vcpu *v, struct cpu_user_regs *regs)
 {
     vmx_vmcs_enter(v);
 
-    if ( regs != NULL )
-    {
-        regs->eflags = __vmread(GUEST_RFLAGS);
-        regs->ss = __vmread(GUEST_SS_SELECTOR);
-        regs->cs = __vmread(GUEST_CS_SELECTOR);
-        regs->eip = __vmread(GUEST_RIP);
-        regs->esp = __vmread(GUEST_RSP);
-    }
-
-    if ( crs != NULL )
-    {
-        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
-        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
-        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
-        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
-    }
+    regs->eflags = __vmread(GUEST_RFLAGS);
+    regs->eip = __vmread(GUEST_RIP);
+    regs->esp = __vmread(GUEST_RSP);
 
     vmx_vmcs_exit(v);
 }
 
 static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
 {
-    unsigned long base;
-
     vmx_vmcs_enter(v);
 
-    __vmwrite(GUEST_SS_SELECTOR, regs->ss);
-    __vmwrite(GUEST_RSP, regs->esp);
-
     /* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */
     __vmwrite(GUEST_RFLAGS, regs->eflags | 2UL);
-
-    if ( regs->eflags & EF_VM )
-    {
-        /*
-         * The VMX spec (section 4.3.1.2, Checks on Guest Segment
-         * Registers) says that virtual-8086 mode guests' segment
-         * base-address fields in the VMCS must be equal to their
-         * corresponding segment selector field shifted right by
-         * four bits upon vmentry.
-         */
-        base = __vmread(GUEST_CS_BASE);
-        if ( (regs->cs << 4) != base )
-            __vmwrite(GUEST_CS_BASE, regs->cs << 4);
-        base = __vmread(GUEST_SS_BASE);
-        if ( (regs->ss << 4) != base )
-            __vmwrite(GUEST_SS_BASE, regs->ss << 4);
-    }
-
-    __vmwrite(GUEST_CS_SELECTOR, regs->cs);
     __vmwrite(GUEST_RIP, regs->eip);
+    __vmwrite(GUEST_RSP, regs->esp);
 
     vmx_vmcs_exit(v);
 }
@@ -978,7 +942,6 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
         __vmwrite(GUEST_CS_LIMIT, reg->limit);
         __vmwrite(GUEST_CS_BASE, reg->base);
         __vmwrite(GUEST_CS_AR_BYTES, attr);
-        guest_cpu_user_regs()->cs = reg->sel;
         break;
     case x86_seg_ds:
         __vmwrite(GUEST_DS_SELECTOR, reg->sel);
@@ -1009,7 +972,6 @@ static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg,
         __vmwrite(GUEST_SS_LIMIT, reg->limit);
         __vmwrite(GUEST_SS_BASE, reg->base);
         __vmwrite(GUEST_SS_AR_BYTES, attr);
-        guest_cpu_user_regs()->ss = reg->sel;
         break;
     case x86_seg_tr:
         __vmwrite(GUEST_TR_SELECTOR, reg->sel);
@@ -1890,7 +1852,7 @@ static void vmx_io_instruction(unsigned long exit_qualification,
 
     /* Copy current guest state into io instruction state structure. */
     memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
-    vmx_store_cpu_guest_regs(current, regs, NULL);
+    vmx_store_cpu_guest_regs(current, regs);
 
     HVM_DBG_LOG(DBG_LEVEL_IO, "vm86 %d, eip=%x:%lx, "
                 "exit_qualification = %lx",
@@ -2639,7 +2601,7 @@ static void vmx_failed_vmentry(unsigned int exit_reason,
     case EXIT_REASON_MACHINE_CHECK:
         printk("caused by machine check.\n");
         HVMTRACE_0D(MCE, current);
-        vmx_store_cpu_guest_regs(current, regs, NULL);
+        vmx_store_cpu_guest_regs(current, regs);
         do_machine_check(regs);
         break;
     default:
@@ -2761,12 +2723,12 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
                  (X86_EVENTTYPE_NMI << 8) )
                 goto exit_and_crash;
             HVMTRACE_0D(NMI, v);
-            vmx_store_cpu_guest_regs(v, regs, NULL);
+            vmx_store_cpu_guest_regs(v, regs);
             do_nmi(regs); /* Real NMI, vector 2: normal processing. */
             break;
         case TRAP_machine_check:
             HVMTRACE_0D(MCE, v);
-            vmx_store_cpu_guest_regs(v, regs, NULL);
+            vmx_store_cpu_guest_regs(v, regs);
             do_machine_check(regs);
             break;
         default:
index fb0ef1949bc42ac8ccefcc8c7dec60200cebd12d..82c54c52075eb7a3fb4b2ebc7604686fd8cd3401 100644 (file)
@@ -2929,7 +2929,7 @@ static int sh_page_fault(struct vcpu *v,
             goto done;
         }
 
-        hvm_store_cpu_guest_regs(v, regs, NULL);
+        hvm_store_cpu_guest_regs(v, regs);
     }
 
     SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n", 
index adcc9209f7171234644fcbede7ab7519273afa93..d3c757a57ee5a34d1710d6f705ac299ae3a6c715 100644 (file)
@@ -119,7 +119,7 @@ static int athlon_check_ctrs(unsigned int const cpu,
            (regs->eip == (unsigned long)svm_stgi_label)) {
                /* SVM guest was running when NMI occurred */
                ASSERT(is_hvm_vcpu(v));
-               hvm_store_cpu_guest_regs(v, guest_regs, NULL);
+               hvm_store_cpu_guest_regs(v, guest_regs);
                eip = guest_regs->eip;
                mode = xenoprofile_get_mode(v, guest_regs);
        } else {
index 813283b2853eb8c719588cc16f1b59d564f35c3f..dcee52f65f3703e91db24469981441b2abdd72d3 100644 (file)
@@ -41,11 +41,29 @@ void show_registers(struct cpu_user_regs *regs)
     struct cpu_user_regs fault_regs = *regs;
     unsigned long fault_crs[8];
     const char *context;
+    struct vcpu *v = current;
 
-    if ( is_hvm_vcpu(current) && guest_mode(regs) )
+    if ( is_hvm_vcpu(v) && guest_mode(regs) )
     {
+        struct segment_register sreg;
         context = "hvm";
-        hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
+        hvm_store_cpu_guest_regs(v, &fault_regs);
+        fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+        fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+        fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+        fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4];
+        hvm_get_segment_register(v, x86_seg_cs, &sreg);
+        fault_regs.cs = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_ds, &sreg);
+        fault_regs.ds = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_es, &sreg);
+        fault_regs.es = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_fs, &sreg);
+        fault_regs.fs = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_gs, &sreg);
+        fault_regs.gs = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_ss, &sreg);
+        fault_regs.ss = sreg.sel;
     }
     else
     {
@@ -63,7 +81,7 @@ void show_registers(struct cpu_user_regs *regs)
         else
         {
             context = "guest";
-            fault_crs[2] = current->vcpu_info->arch.cr2;
+            fault_crs[2] = v->vcpu_info->arch.cr2;
         }
 
         fault_crs[0] = read_cr0();
index d1ceabcd6d345e8d51ba86c021e19c7339121b85..ed48005da3fe4f8d5c324db4e9974888f2b4c394 100644 (file)
@@ -44,18 +44,36 @@ void show_registers(struct cpu_user_regs *regs)
     struct cpu_user_regs fault_regs = *regs;
     unsigned long fault_crs[8];
     const char *context;
+    struct vcpu *v = current;
 
-    if ( is_hvm_vcpu(current) && guest_mode(regs) )
+    if ( is_hvm_vcpu(v) && guest_mode(regs) )
     {
+        struct segment_register sreg;
         context = "hvm";
-        hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
+        hvm_store_cpu_guest_regs(v, &fault_regs);
+        fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+        fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+        fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+        fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4];
+        hvm_get_segment_register(v, x86_seg_cs, &sreg);
+        fault_regs.cs = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_ds, &sreg);
+        fault_regs.ds = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_es, &sreg);
+        fault_regs.es = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_fs, &sreg);
+        fault_regs.fs = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_gs, &sreg);
+        fault_regs.gs = sreg.sel;
+        hvm_get_segment_register(v, x86_seg_ss, &sreg);
+        fault_regs.ss = sreg.sel;
     }
     else
     {
         if ( guest_mode(regs) )
         {
             context = "guest";
-            fault_crs[2] = arch_get_cr2(current);
+            fault_crs[2] = arch_get_cr2(v);
         }
         else
         {
index 1d195dabf264dc15a86487976cf62c306652556d..475e374ee683a8e576177d8697bb5dcb290d3acb 100644 (file)
@@ -85,7 +85,7 @@ struct hvm_function_table {
      * 2) modify guest state (e.g., set debug flags).
      */
     void (*store_cpu_guest_regs)(
-        struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
+        struct vcpu *v, struct cpu_user_regs *r);
     void (*load_cpu_guest_regs)(
         struct vcpu *v, struct cpu_user_regs *r);
 
@@ -168,9 +168,9 @@ void hvm_send_assist_req(struct vcpu *v);
 
 static inline void
 hvm_store_cpu_guest_regs(
-    struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
+    struct vcpu *v, struct cpu_user_regs *r)
 {
-    hvm_funcs.store_cpu_guest_regs(v, r, crs);
+    hvm_funcs.store_cpu_guest_regs(v, r);
 }
 
 static inline void