]> xenbits.xensource.com Git - xen.git/commitdiff
x86/cpuid: Dispatch cpuid_hypervisor_leaves() from guest_cpuid()
authorAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 11 Jan 2017 11:59:02 +0000 (11:59 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 11 Jan 2017 11:59:02 +0000 (11:59 +0000)
... rather than from the legacy path.  Update the API to match guest_cpuid(),
and remove its dependence on current.

Make use of guest_cpuid() unconditionally zeroing res to avoid repeated
re-zeroing.  To use a const struct domain, domain_cpuid() needs to be
const-corrected.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/cpuid.c
xen/arch/x86/domain.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/traps.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/processor.h

index 45522b452d87ddaf9bbbe094432e563904b1f3e2..ff0d4ec0af679ca46dcd5bddde14d436a9545670 100644 (file)
@@ -346,7 +346,15 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
     case 0x40000000 ... 0x400000ff:
         if ( is_viridian_domain(d) )
             return cpuid_viridian_leaves(v, leaf, subleaf, res);
-        break;
+
+        /*
+         * Fallthrough.
+         *
+         * Intel reserve up until 0x4fffffff for hypervisor use.  AMD reserve
+         * only until 0x400000ff, but we already use double that.
+         */
+    case 0x40000100 ... 0x400001ff:
+        return cpuid_hypervisor_leaves(v, leaf, subleaf, res);
     }
 
     /* {hvm,pv}_cpuid() have this expectation. */
index 7d33c41165871125e3d924d7d8f2b9133570e198..b554a9c595dcebf4055c7e15b5de1b31d7ab0ff7 100644 (file)
@@ -2623,7 +2623,7 @@ void arch_dump_vcpu_info(struct vcpu *v)
 }
 
 void domain_cpuid(
-    struct domain *d,
+    const struct domain *d,
     unsigned int  input,
     unsigned int  sub_input,
     unsigned int  *eax,
index 2230db9a5dadc9f734ba4dd27021bd13f823d524..1b57dd8e09b3727cb8f511b9458a19ef048514cb 100644 (file)
@@ -3353,9 +3353,6 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
     if ( !edx )
         edx = &dummy;
 
-    if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
-        return;
-
     if ( input & 0x7fffffff )
     {
         /*
index 3acc2444336cc0b8e9d7a81c258eaecc36b41b79..5ce8936634da68bf7af119b4e2c45d344fd10a32 100644 (file)
@@ -902,20 +902,18 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
     return 0;
 }
 
-int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
-               uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
+void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf,
+                             uint32_t subleaf, struct cpuid_leaf *res)
 {
-    struct vcpu *curr = current;
-    struct domain *currd = curr->domain;
-    /* Optionally shift out of the way of Viridian architectural leaves. */
-    uint32_t base = is_viridian_domain(currd) ? 0x40000100 : 0x40000000;
+    const struct domain *d = v->domain;
+    uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000;
+    uint32_t idx  = leaf - base;
     uint32_t limit, dummy;
 
-    idx -= base;
     if ( idx > XEN_CPUID_MAX_NUM_LEAVES )
-        return 0; /* Avoid unnecessary pass through domain_cpuid() */
+        return; /* Avoid unnecessary pass through domain_cpuid() */
 
-    domain_cpuid(currd, base, 0, &limit, &dummy, &dummy, &dummy);
+    domain_cpuid(d, base, 0, &limit, &dummy, &dummy, &dummy);
     if ( limit == 0 )
         /* Default number of leaves */
         limit = XEN_CPUID_MAX_NUM_LEAVES;
@@ -929,83 +927,70 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
             limit = XEN_CPUID_MAX_NUM_LEAVES;
     }
 
-    if ( idx > limit ) 
-        return 0;
+    if ( idx > limit )
+        return;
 
     switch ( idx )
     {
     case 0:
-        *eax = base + limit; /* Largest leaf */
-        *ebx = XEN_CPUID_SIGNATURE_EBX;
-        *ecx = XEN_CPUID_SIGNATURE_ECX;
-        *edx = XEN_CPUID_SIGNATURE_EDX;
+        res->a = base + limit; /* Largest leaf */
+        res->b = XEN_CPUID_SIGNATURE_EBX;
+        res->c = XEN_CPUID_SIGNATURE_ECX;
+        res->d = XEN_CPUID_SIGNATURE_EDX;
         break;
 
     case 1:
-        *eax = (xen_major_version() << 16) | xen_minor_version();
-        *ebx = 0;          /* Reserved */
-        *ecx = 0;          /* Reserved */
-        *edx = 0;          /* Reserved */
+        res->a = (xen_major_version() << 16) | xen_minor_version();
         break;
 
     case 2:
-        *eax = 1;          /* Number of hypercall-transfer pages */
-        *ebx = 0x40000000; /* MSR base address */
-        if ( is_viridian_domain(currd) )
-            *ebx = 0x40000200;
-        *ecx = 0;          /* Features 1 */
-        *edx = 0;          /* Features 2 */
-        if ( is_pv_domain(currd) )
-            *ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
+        res->a = 1;            /* Number of hypercall-transfer pages */
+                               /* MSR base address */
+        res->b = is_viridian_domain(d) ? 0x40000200 : 0x40000000;
+        if ( is_pv_domain(d) ) /* Features */
+            res->c |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
         break;
 
     case 3: /* Time leaf. */
-        switch ( sub_idx )
+        switch ( subleaf )
         {
         case 0: /* features */
-            *eax = ((!!currd->arch.vtsc << 0) |
-                    (!!host_tsc_is_safe() << 1) |
-                    (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2));
-            *ebx = currd->arch.tsc_mode;
-            *ecx = currd->arch.tsc_khz;
-            *edx = currd->arch.incarnation;
+            res->a = ((d->arch.vtsc << 0) |
+                      (!!host_tsc_is_safe() << 1) |
+                      (!!boot_cpu_has(X86_FEATURE_RDTSCP) << 2));
+            res->b = d->arch.tsc_mode;
+            res->c = d->arch.tsc_khz;
+            res->d = d->arch.incarnation;
             break;
 
         case 1: /* scale and offset */
         {
             uint64_t offset;
 
-            if ( !currd->arch.vtsc )
-                offset = currd->arch.vtsc_offset;
+            if ( !d->arch.vtsc )
+                offset = d->arch.vtsc_offset;
             else
                 /* offset already applied to value returned by virtual rdtscp */
                 offset = 0;
-            *eax = (uint32_t)offset;
-            *ebx = (uint32_t)(offset >> 32);
-            *ecx = currd->arch.vtsc_to_ns.mul_frac;
-            *edx = (s8)currd->arch.vtsc_to_ns.shift;
+            res->a = offset;
+            res->b = offset >> 32;
+            res->c = d->arch.vtsc_to_ns.mul_frac;
+            res->d = (s8)d->arch.vtsc_to_ns.shift;
             break;
         }
 
         case 2: /* physical cpu_khz */
-            *eax = cpu_khz;
-            *ebx = *ecx = *edx = 0;
-            break;
-
-        default:
-            *eax = *ebx = *ecx = *edx = 0;
+            res->a = cpu_khz;
             break;
         }
         break;
 
     case 4: /* HVM hypervisor leaf. */
-        *eax = *ebx = *ecx = *edx = 0;
-
-        if ( !has_hvm_container_domain(currd) || sub_idx != 0 )
+        if ( !has_hvm_container_domain(d) || subleaf != 0 )
             break;
 
         if ( cpu_has_vmx_apic_reg_virt )
-            *eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
+            res->a |= XEN_HVM_CPUID_APIC_ACCESS_VIRT;
 
         /*
          * We want to claim that x2APIC is virtualized if APIC MSR accesses
@@ -1016,24 +1001,22 @@ int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
         if ( cpu_has_vmx_virtualize_x2apic_mode &&
              cpu_has_vmx_apic_reg_virt &&
              cpu_has_vmx_virtual_intr_delivery )
-            *eax |= XEN_HVM_CPUID_X2APIC_VIRT;
+            res->a |= XEN_HVM_CPUID_X2APIC_VIRT;
 
         /*
          * Indicate that memory mapped from other domains (either grants or
          * foreign pages) has valid IOMMU entries.
          */
-        *eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
+        res->a |= XEN_HVM_CPUID_IOMMU_MAPPINGS;
 
         /* Indicate presence of vcpu id and set it in ebx */
-        *eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
-        *ebx = curr->vcpu_id;
+        res->a |= XEN_HVM_CPUID_VCPU_ID_PRESENT;
+        res->b = v->vcpu_id;
         break;
 
     default:
         BUG();
     }
-
-    return 1;
 }
 
 void pv_cpuid(struct cpu_user_regs *regs)
@@ -1047,9 +1030,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
     subleaf = c = regs->_ecx;
     d = regs->_edx;
 
-    if ( cpuid_hypervisor_leaves(leaf, subleaf, &a, &b, &c, &d) )
-        goto out;
-
     if ( leaf & 0x7fffffff )
     {
         /*
@@ -1381,7 +1361,6 @@ void pv_cpuid(struct cpu_user_regs *regs)
         break;
     }
 
- out:
     regs->rax = a;
     regs->rbx = b;
     regs->rcx = c;
index 780f31164b20821cf7cb010f2fd02dcf017a961d..896e78d9bfa7aa396fefea30bdeec254a3cdb5db 100644 (file)
@@ -617,7 +617,7 @@ unsigned long pv_guest_cr4_fixup(const struct vcpu *, unsigned long guest_cr4);
              X86_CR4_OSXSAVE | X86_CR4_SMEP |               \
              X86_CR4_FSGSBASE | X86_CR4_SMAP))
 
-void domain_cpuid(struct domain *d,
+void domain_cpuid(const struct domain *d,
                   unsigned int  input,
                   unsigned int  sub_input,
                   unsigned int  *eax,
index aff115b21f416da803deec58c9125b26a8c8569f..3e84164fa870b28cc8cfe59852dea2411854b99d 100644 (file)
@@ -610,8 +610,8 @@ struct stubs {
 DECLARE_PER_CPU(struct stubs, stubs);
 unsigned long alloc_stub_page(unsigned int cpu, unsigned long *mfn);
 
-int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
-          uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
+void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf,
+                             uint32_t subleaf, struct cpuid_leaf *res);
 int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *val);
 int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val);