]> xenbits.xensource.com Git - xen.git/commitdiff
x86/cpuid: Move all leaf 7 handling into guest_cpuid()
authorAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 11 Jan 2017 11:59:02 +0000 (11:59 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 11 Jan 2017 11:59:02 +0000 (11:59 +0000)
All per-domain policy data concerning leaf 7 is accurate.  Handle it all in
guest_cpuid() by reading out of the raw array block, and introduing a dynamic
adjustment for OSPKE.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/cpuid.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/traps.c
xen/include/asm-x86/cpuid.h

index c6552b709796e6f8705bbadf546839ca4922f64d..faf745be59a32df1721700db059de24d7cf9cd81 100644 (file)
@@ -362,6 +362,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
      * First pass:
      * - Perform max_leaf/subleaf calculations.  Out-of-range leaves return
      *   all zeros, following the AMD model.
+     * - Fill in *res for leaves no longer handled on the legacy path.
      * - Dispatch the virtualised leaves to their respective handlers.
      */
     switch ( leaf )
@@ -375,12 +376,18 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
         case 0x7:
             if ( subleaf > p->feat.max_subleaf )
                 return;
+
+            BUG_ON(subleaf >= ARRAY_SIZE(p->feat.raw));
+            *res = p->feat.raw[subleaf];
             break;
 
         case XSTATE_CPUID:
             if ( subleaf > ARRAY_SIZE(p->xstate.raw) )
                 return;
-            break;
+
+            /* Fallthrough. */
+        default:
+            goto legacy;
         }
         break;
 
@@ -400,12 +407,47 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
     case 0x80000000 ... 0x80000000 + CPUID_GUEST_NR_EXTD - 1:
         if ( leaf > p->extd.max_leaf )
             return;
-        break;
+        goto legacy;
 
     default:
         return;
     }
 
+    /*
+     * Skip dynamic adjustments if we are in the wrong context.
+     *
+     * All dynamic adjustments depends on current register state, which will
+     * be stale if the vcpu is running elsewhere.  It is simpler, quicker, and
+     * more reliable for the caller to do nothing (consistently) than to hand
+     * back stale data which it can't use safely.
+     */
+    if ( v != current )
+        return;
+
+    /*
+     * Second pass:
+     * - Dynamic adjustments
+     */
+    switch ( leaf )
+    {
+    case 0x7:
+        switch ( subleaf )
+        {
+        case 0:
+            /* OSPKE clear in policy.  Fast-forward CR4 back in. */
+            if ( (is_pv_domain(d)
+                  ? v->arch.pv_vcpu.ctrlreg[4]
+                  : v->arch.hvm_vcpu.guest_cr[4]) & X86_CR4_PKE )
+                res->c |= cpufeat_mask(X86_FEATURE_OSPKE);
+            break;
+        }
+        break;
+    }
+
+    /* Done. */
+    return;
+
+ legacy:
     /* {hvm,pv}_cpuid() have this expectation. */
     ASSERT(v == current);
 
index e2af1795466d9205e753526beb38f856b32993da..472f10fb253fed2b69b5352730816c2475579cae 100644 (file)
@@ -3355,19 +3355,6 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
 
         break;
 
-    case 0x7:
-        if ( count == 0 )
-        {
-            *ebx = p->feat._7b0;
-            *ecx = p->feat._7c0;
-            *edx = p->feat._7d0;
-
-            /* OSPKE clear in policy.  Fast-forward CR4 back in. */
-            if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE )
-                *ecx |= cpufeat_mask(X86_FEATURE_OSPKE);
-        }
-        break;
-
     case 0xb:
         /* Fix the x2APIC identifier. */
         *edx = v->vcpu_id * 2;
@@ -3544,6 +3531,10 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
         else
             *eax = 0;
         break;
+
+    case 0x7:
+        ASSERT_UNREACHABLE();
+        /* Now handled in guest_cpuid(). */
     }
 }
 
index 6e08efe3c2ce4c63fd9d04fda83bf912ab7ab1b1..47d313af927c562d600695edb5ddea420328d89d 100644 (file)
@@ -1179,30 +1179,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
         }
         break;
 
-    case 0x00000007:
-        if ( subleaf == 0 )
-        {
-            b = p->feat._7b0;
-            c = p->feat._7c0;
-            d = p->feat._7d0;
-
-            if ( !is_pvh_domain(currd) )
-            {
-                /*
-                 * Delete the PVH condition when HVMLite formally replaces PVH,
-                 * and HVM guests no longer enter a PV codepath.
-                 */
-
-                /* OSPKE clear in policy.  Fast-forward CR4 back in. */
-                if ( curr->arch.pv_vcpu.ctrlreg[4] & X86_CR4_PKE )
-                    c |= cpufeat_mask(X86_FEATURE_OSPKE);
-            }
-        }
-        else
-            b = c = d = 0;
-        a = 0;
-        break;
-
     case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
         if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
              !vpmu_enabled(curr) )
@@ -1304,6 +1280,10 @@ void pv_cpuid(struct cpu_user_regs *regs)
     unsupported:
         a = b = c = d = 0;
         break;
+
+    case 0x7:
+        ASSERT_UNREACHABLE();
+        /* Now handled in guest_cpuid(). */
     }
 
     regs->rax = a;
index 9354e3afe6fe11d21b5ca563e4e537beacae8142..5b1448ac0f565680aec7835bf38044b82c728e43 100644 (file)
@@ -81,12 +81,14 @@ struct cpuid_policy
      *   - {xcr0,xss}_{high,low}
      *
      * - Guest accurate:
+     *   - All of the feat union
      *   - max_{,sub}leaf
      *   - All FEATURESET_* words
      *
      * Per-domain objects:
      *
      * - Guest accurate:
+     *   - All of the feat union
      *   - max_{,sub}leaf
      *   - All FEATURESET_* words
      *