]> xenbits.xensource.com Git - people/tklengyel/xen.git/commitdiff
x86: Merge the system {cpuid,msr} policy objects
authorAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 29 Mar 2023 06:39:44 +0000 (07:39 +0100)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 8 Aug 2023 15:02:17 +0000 (16:02 +0100)
Right now, they're the same underlying type, containing disjoint information.

Introduce a new cpu-policy.{h,c} to be the new location for all policy
handling logic.  Place the combined objects in __ro_after_init, which is new
since the original logic was written.

As we're trying to phase out the use of struct old_cpu_policy entirely, rework
update_domain_cpu_policy() to not pointer-chase through system_policies[].

This in turn allows system_policies[] in sysctl.c to become static and reduced
in scope to XEN_SYSCTL_get_cpu_policy.

No practical change.  This undoes the transient doubling of storage space from
earlier patches.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
(cherry picked from commit 6bc33366795d14a21a3244d0f3b63f7dccea87ef)

xen/arch/x86/Makefile
xen/arch/x86/cpu-policy.c [new file with mode: 0644]
xen/arch/x86/cpu/common.c
xen/arch/x86/cpuid.c
xen/arch/x86/domctl.c
xen/arch/x86/include/asm/cpu-policy.h [new file with mode: 0644]
xen/arch/x86/include/asm/cpuid.h
xen/arch/x86/include/asm/msr.h
xen/arch/x86/msr.c
xen/arch/x86/sysctl.c

index 5accbe4c6746df91e894c968e0d1df24e80ad551..f213a6b56a4d7a790ec76a589e18bc61b9cc6d1f 100644 (file)
@@ -18,6 +18,7 @@ obj-y += bitops.o
 obj-bin-y += bzimage.init.o
 obj-bin-y += clear_page.o
 obj-bin-y += copy_page.o
+obj-y += cpu-policy.o
 obj-y += cpuid.o
 obj-$(CONFIG_PV) += compat.o
 obj-$(CONFIG_PV32) += x86_64/compat.o
diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c
new file mode 100644 (file)
index 0000000..663e9a0
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <xen/cache.h>
+#include <xen/kernel.h>
+
+#include <xen/lib/x86/cpu-policy.h>
+
+#include <asm/cpu-policy.h>
+
+struct cpu_policy __ro_after_init     raw_cpu_policy;
+struct cpu_policy __ro_after_init    host_cpu_policy;
+#ifdef CONFIG_PV
+struct cpu_policy __ro_after_init  pv_max_cpu_policy;
+struct cpu_policy __ro_after_init  pv_def_cpu_policy;
+#endif
+#ifdef CONFIG_HVM
+struct cpu_policy __ro_after_init hvm_max_cpu_policy;
+struct cpu_policy __ro_after_init hvm_def_cpu_policy;
+#endif
index 27f73d3bbe3164e1fa70ddb513d7f2fdf29e2f1b..665200db382fa77638d9faf48c117b563af428b4 100644 (file)
@@ -3,6 +3,8 @@
 #include <xen/delay.h>
 #include <xen/param.h>
 #include <xen/smp.h>
+
+#include <asm/cpu-policy.h>
 #include <asm/current.h>
 #include <asm/debugreg.h>
 #include <asm/processor.h>
@@ -138,7 +140,7 @@ bool __init probe_cpuid_faulting(void)
                return false;
 
        if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
-               raw_msr_policy.platform_info.cpuid_faulting =
+               raw_cpu_policy.platform_info.cpuid_faulting =
                        val & MSR_PLATFORM_INFO_CPUID_FAULTING;
 
        if (rc ||
index acc2f606cea8eac7317f6d732a491cce8ee57663..1327dba30dd8d4b6e81f74cb95c9cdb2c02d4088 100644 (file)
@@ -4,6 +4,7 @@
 #include <xen/sched.h>
 #include <xen/nospec.h>
 #include <asm/amd.h>
+#include <asm/cpu-policy.h>
 #include <asm/cpuid.h>
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/nestedhvm.h>
@@ -142,17 +143,6 @@ static void zero_leaves(struct cpuid_leaf *l,
     memset(&l[first], 0, sizeof(*l) * (last - first + 1));
 }
 
-struct cpuid_policy __read_mostly     raw_cpuid_policy,
-                    __read_mostly    host_cpuid_policy;
-#ifdef CONFIG_PV
-struct cpuid_policy __read_mostly  pv_max_cpuid_policy;
-struct cpuid_policy __read_mostly  pv_def_cpuid_policy;
-#endif
-#ifdef CONFIG_HVM
-struct cpuid_policy __read_mostly hvm_max_cpuid_policy;
-struct cpuid_policy __read_mostly hvm_def_cpuid_policy;
-#endif
-
 static void sanitise_featureset(uint32_t *fs)
 {
     /* for_each_set_bit() uses unsigned longs.  Extend with zeroes. */
@@ -344,7 +334,7 @@ static void recalculate_misc(struct cpuid_policy *p)
 
 static void __init calculate_raw_policy(void)
 {
-    struct cpuid_policy *p = &raw_cpuid_policy;
+    struct cpuid_policy *p = &raw_cpu_policy;
 
     x86_cpuid_policy_fill_native(p);
 
@@ -354,10 +344,10 @@ static void __init calculate_raw_policy(void)
 
 static void __init calculate_host_policy(void)
 {
-    struct cpuid_policy *p = &host_cpuid_policy;
+    struct cpuid_policy *p = &host_cpu_policy;
     unsigned int max_extd_leaf;
 
-    *p = raw_cpuid_policy;
+    *p = raw_cpu_policy;
 
     p->basic.max_leaf =
         min_t(uint32_t, p->basic.max_leaf,   ARRAY_SIZE(p->basic.raw) - 1);
@@ -449,17 +439,17 @@ static void __init guest_common_feature_adjustments(uint32_t *fs)
      * of IBRS by using the AMD feature bit.  An administrator may wish for
      * performance reasons to offer IBPB without IBRS.
      */
-    if ( host_cpuid_policy.feat.ibrsb )
+    if ( host_cpu_policy.feat.ibrsb )
         __set_bit(X86_FEATURE_IBPB, fs);
 }
 
 static void __init calculate_pv_max_policy(void)
 {
-    struct cpuid_policy *p = &pv_max_cpuid_policy;
+    struct cpuid_policy *p = &pv_max_cpu_policy;
     uint32_t pv_featureset[FSCAPINTS];
     unsigned int i;
 
-    *p = host_cpuid_policy;
+    *p = host_cpu_policy;
     cpuid_policy_to_featureset(p, pv_featureset);
 
     for ( i = 0; i < ARRAY_SIZE(pv_featureset); ++i )
@@ -486,11 +476,11 @@ static void __init calculate_pv_max_policy(void)
 
 static void __init calculate_pv_def_policy(void)
 {
-    struct cpuid_policy *p = &pv_def_cpuid_policy;
+    struct cpuid_policy *p = &pv_def_cpu_policy;
     uint32_t pv_featureset[FSCAPINTS];
     unsigned int i;
 
-    *p = pv_max_cpuid_policy;
+    *p = pv_max_cpu_policy;
     cpuid_policy_to_featureset(p, pv_featureset);
 
     for ( i = 0; i < ARRAY_SIZE(pv_featureset); ++i )
@@ -506,12 +496,12 @@ static void __init calculate_pv_def_policy(void)
 
 static void __init calculate_hvm_max_policy(void)
 {
-    struct cpuid_policy *p = &hvm_max_cpuid_policy;
+    struct cpuid_policy *p = &hvm_max_cpu_policy;
     uint32_t hvm_featureset[FSCAPINTS];
     unsigned int i;
     const uint32_t *hvm_featuremask;
 
-    *p = host_cpuid_policy;
+    *p = host_cpu_policy;
     cpuid_policy_to_featureset(p, hvm_featureset);
 
     hvm_featuremask = hvm_hap_supported() ?
@@ -539,7 +529,7 @@ static void __init calculate_hvm_max_policy(void)
      * HVM guests are able if running in protected mode.
      */
     if ( (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
-         raw_cpuid_policy.basic.sep )
+         raw_cpu_policy.basic.sep )
         __set_bit(X86_FEATURE_SEP, hvm_featureset);
 
     /*
@@ -588,12 +578,12 @@ static void __init calculate_hvm_max_policy(void)
 
 static void __init calculate_hvm_def_policy(void)
 {
-    struct cpuid_policy *p = &hvm_def_cpuid_policy;
+    struct cpuid_policy *p = &hvm_def_cpu_policy;
     uint32_t hvm_featureset[FSCAPINTS];
     unsigned int i;
     const uint32_t *hvm_featuremask;
 
-    *p = hvm_max_cpuid_policy;
+    *p = hvm_max_cpu_policy;
     cpuid_policy_to_featureset(p, hvm_featureset);
 
     hvm_featuremask = hvm_hap_supported() ?
@@ -661,8 +651,8 @@ void recalculate_cpuid_policy(struct domain *d)
 {
     struct cpuid_policy *p = d->arch.cpuid;
     const struct cpuid_policy *max = is_pv_domain(d)
-        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_max_cpuid_policy : NULL)
-        : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpuid_policy : NULL);
+        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_max_cpu_policy : NULL)
+        : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpu_policy : NULL);
     uint32_t fs[FSCAPINTS], max_fs[FSCAPINTS];
     unsigned int i;
 
@@ -737,7 +727,7 @@ void recalculate_cpuid_policy(struct domain *d)
     /* Fold host's FDP_EXCP_ONLY and NO_FPU_SEL into guest's view. */
     fs[FEATURESET_7b0] &= ~(cpufeat_mask(X86_FEATURE_FDP_EXCP_ONLY) |
                             cpufeat_mask(X86_FEATURE_NO_FPU_SEL));
-    fs[FEATURESET_7b0] |= (host_cpuid_policy.feat._7b0 &
+    fs[FEATURESET_7b0] |= (host_cpu_policy.feat._7b0 &
                            (cpufeat_mask(X86_FEATURE_FDP_EXCP_ONLY) |
                             cpufeat_mask(X86_FEATURE_NO_FPU_SEL)));
 
@@ -788,8 +778,8 @@ void recalculate_cpuid_policy(struct domain *d)
 int init_domain_cpuid_policy(struct domain *d)
 {
     struct cpuid_policy *p = is_pv_domain(d)
-        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_def_cpuid_policy : NULL)
-        : (IS_ENABLED(CONFIG_HVM) ? &hvm_def_cpuid_policy : NULL);
+        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_def_cpu_policy : NULL)
+        : (IS_ENABLED(CONFIG_HVM) ? &hvm_def_cpu_policy : NULL);
 
     if ( !p )
     {
@@ -1093,7 +1083,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
         if ( is_pv_domain(d) && is_hardware_domain(d) &&
              guest_kernel_mode(v, regs) && cpu_has_monitor &&
              regs->entry_vector == TRAP_gp_fault )
-            *res = raw_cpuid_policy.basic.raw[5];
+            *res = raw_cpu_policy.basic.raw[5];
         break;
 
     case 0x7:
@@ -1225,14 +1215,14 @@ static void __init __maybe_unused build_assertions(void)
     /* Find some more clever allocation scheme if this trips. */
     BUILD_BUG_ON(sizeof(struct cpuid_policy) > PAGE_SIZE);
 
-    BUILD_BUG_ON(sizeof(raw_cpuid_policy.basic) !=
-                 sizeof(raw_cpuid_policy.basic.raw));
-    BUILD_BUG_ON(sizeof(raw_cpuid_policy.feat) !=
-                 sizeof(raw_cpuid_policy.feat.raw));
-    BUILD_BUG_ON(sizeof(raw_cpuid_policy.xstate) !=
-                 sizeof(raw_cpuid_policy.xstate.raw));
-    BUILD_BUG_ON(sizeof(raw_cpuid_policy.extd) !=
-                 sizeof(raw_cpuid_policy.extd.raw));
+    BUILD_BUG_ON(sizeof(raw_cpu_policy.basic) !=
+                 sizeof(raw_cpu_policy.basic.raw));
+    BUILD_BUG_ON(sizeof(raw_cpu_policy.feat) !=
+                 sizeof(raw_cpu_policy.feat.raw));
+    BUILD_BUG_ON(sizeof(raw_cpu_policy.xstate) !=
+                 sizeof(raw_cpu_policy.xstate.raw));
+    BUILD_BUG_ON(sizeof(raw_cpu_policy.extd) !=
+                 sizeof(raw_cpu_policy.extd.raw));
 }
 
 /*
index 175d473e412ac74507e6441b453e74bfc752c3fe..2689df813b391615cb8e152ed0fdbc953ceb0eba 100644 (file)
 #include <asm/mem_sharing.h>
 #include <asm/xstate.h>
 #include <asm/psr.h>
-#include <asm/cpuid.h>
+#include <asm/cpu-policy.h>
 
 static int update_domain_cpu_policy(struct domain *d,
                                     xen_domctl_cpu_policy_t *xdpc)
 {
     struct old_cpu_policy new = {};
-    const struct old_cpu_policy *sys = is_pv_domain(d)
-        ? &system_policies[XEN_SYSCTL_cpu_policy_pv_max]
-        : &system_policies[XEN_SYSCTL_cpu_policy_hvm_max];
+    struct cpu_policy *sys = is_pv_domain(d)
+        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_max_cpu_policy : NULL)
+        : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpu_policy : NULL);
+    struct old_cpu_policy old_sys = { sys, sys };
     struct cpu_policy_errors err = INIT_CPU_POLICY_ERRORS;
     int ret = -ENOMEM;
 
+    if ( !sys )
+    {
+        ASSERT_UNREACHABLE();
+        return -EOPNOTSUPP;
+    }
+
     /* Start by copying the domain's existing policies. */
     if ( !(new.cpuid = xmemdup(d->arch.cpuid)) ||
          !(new.msr   = xmemdup(d->arch.msr)) )
@@ -65,7 +72,7 @@ static int update_domain_cpu_policy(struct domain *d,
     x86_cpuid_policy_clear_out_of_range_leaves(new.cpuid);
 
     /* Audit the combined dataset. */
-    ret = x86_cpu_policies_are_compatible(sys, &new, &err);
+    ret = x86_cpu_policies_are_compatible(&old_sys, &new, &err);
     if ( ret )
         goto out;
 
diff --git a/xen/arch/x86/include/asm/cpu-policy.h b/xen/arch/x86/include/asm/cpu-policy.h
new file mode 100644 (file)
index 0000000..eef14bb
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef X86_CPU_POLICY_H
+#define X86_CPU_POLICY_H
+
+struct cpu_policy;
+
+extern struct cpu_policy     raw_cpu_policy;
+extern struct cpu_policy    host_cpu_policy;
+extern struct cpu_policy  pv_max_cpu_policy;
+extern struct cpu_policy  pv_def_cpu_policy;
+extern struct cpu_policy hvm_max_cpu_policy;
+extern struct cpu_policy hvm_def_cpu_policy;
+
+#endif /* X86_CPU_POLICY_H */
index d418e8100dde14bc142e35c1805b4152daad5c5f..ea0586277331f4b73ad1983187e9fe6c3dd616d9 100644 (file)
@@ -46,12 +46,6 @@ DECLARE_PER_CPU(struct cpuidmasks, cpuidmasks);
 /* Default masking MSR values, calculated at boot. */
 extern struct cpuidmasks cpuidmask_defaults;
 
-extern struct cpuid_policy raw_cpuid_policy, host_cpuid_policy,
-    pv_max_cpuid_policy, pv_def_cpuid_policy,
-    hvm_max_cpuid_policy, hvm_def_cpuid_policy;
-
-extern const struct old_cpu_policy system_policies[];
-
 /* Check that all previously present features are still available. */
 bool recheck_cpu_features(unsigned int cpu);
 
index bb32bf19adc7425a315a4e9e23f85533b47b5208..8a4da50c500a2fcd57cf8744a62f8ddc5cd55acc 100644 (file)
@@ -280,13 +280,6 @@ static inline void wrmsr_tsc_aux(uint32_t val)
 
 uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp);
 
-extern struct msr_policy     raw_msr_policy,
-                            host_msr_policy,
-                          pv_max_msr_policy,
-                          pv_def_msr_policy,
-                         hvm_max_msr_policy,
-                         hvm_def_msr_policy;
-
 /* Container object for per-vCPU MSRs */
 struct vcpu_msrs
 {
index cf46b18aa64c153e6e07d87147f36d9a22d9c520..01f95603e29742362e6e206bad55b7093d919e2f 100644 (file)
@@ -25,6 +25,7 @@
 #include <xen/sched.h>
 
 #include <asm/amd.h>
+#include <asm/cpu-policy.h>
 #include <asm/debugreg.h>
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/viridian.h>
 
 DEFINE_PER_CPU(uint32_t, tsc_aux);
 
-struct msr_policy __read_mostly     raw_msr_policy,
-                  __read_mostly    host_msr_policy;
-#ifdef CONFIG_PV
-struct msr_policy __read_mostly  pv_max_msr_policy;
-struct msr_policy __read_mostly  pv_def_msr_policy;
-#endif
-#ifdef CONFIG_HVM
-struct msr_policy __read_mostly hvm_max_msr_policy;
-struct msr_policy __read_mostly hvm_def_msr_policy;
-#endif
-
 static void __init calculate_raw_policy(void)
 {
-    struct msr_policy *mp = &raw_msr_policy;
+    struct msr_policy *mp = &raw_cpu_policy;
 
     /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
     /* Was already added by probe_cpuid_faulting() */
@@ -61,9 +51,9 @@ static void __init calculate_raw_policy(void)
 
 static void __init calculate_host_policy(void)
 {
-    struct msr_policy *mp = &host_msr_policy;
+    struct msr_policy *mp = &host_cpu_policy;
 
-    *mp = raw_msr_policy;
+    *mp = raw_cpu_policy;
 
     /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
     /* probe_cpuid_faulting() sanity checks presence of MISC_FEATURES_ENABLES */
@@ -81,25 +71,25 @@ static void __init calculate_host_policy(void)
 
 static void __init calculate_pv_max_policy(void)
 {
-    struct msr_policy *mp = &pv_max_msr_policy;
+    struct msr_policy *mp = &pv_max_cpu_policy;
 
-    *mp = host_msr_policy;
+    *mp = host_cpu_policy;
 
     mp->arch_caps.raw = 0; /* Not supported yet. */
 }
 
 static void __init calculate_pv_def_policy(void)
 {
-    struct msr_policy *mp = &pv_def_msr_policy;
+    struct msr_policy *mp = &pv_def_cpu_policy;
 
-    *mp = pv_max_msr_policy;
+    *mp = pv_max_cpu_policy;
 }
 
 static void __init calculate_hvm_max_policy(void)
 {
-    struct msr_policy *mp = &hvm_max_msr_policy;
+    struct msr_policy *mp = &hvm_max_cpu_policy;
 
-    *mp = host_msr_policy;
+    *mp = host_cpu_policy;
 
     /* It's always possible to emulate CPUID faulting for HVM guests */
     mp->platform_info.cpuid_faulting = true;
@@ -109,9 +99,9 @@ static void __init calculate_hvm_max_policy(void)
 
 static void __init calculate_hvm_def_policy(void)
 {
-    struct msr_policy *mp = &hvm_def_msr_policy;
+    struct msr_policy *mp = &hvm_def_cpu_policy;
 
-    *mp = hvm_max_msr_policy;
+    *mp = hvm_max_cpu_policy;
 }
 
 void __init init_guest_msr_policy(void)
@@ -135,8 +125,8 @@ void __init init_guest_msr_policy(void)
 int init_domain_msr_policy(struct domain *d)
 {
     struct msr_policy *mp = is_pv_domain(d)
-        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_def_msr_policy : NULL)
-        : (IS_ENABLED(CONFIG_HVM) ? &hvm_def_msr_policy : NULL);
+        ? (IS_ENABLED(CONFIG_PV)  ?  &pv_def_cpu_policy : NULL)
+        : (IS_ENABLED(CONFIG_HVM) ? &hvm_def_cpu_policy : NULL);
 
     if ( !mp )
     {
index 838a9947bfe310bfa600d8591d0fd4cb7a69aa1b..c68242e5bcaff4841d680a69583fecc21a106b8f 100644 (file)
 #include <xen/cpu.h>
 #include <xsm/xsm.h>
 #include <asm/psr.h>
-#include <asm/cpuid.h>
-
-const struct old_cpu_policy system_policies[6] = {
-    [ XEN_SYSCTL_cpu_policy_raw ] = {
-        &raw_cpuid_policy,
-        &raw_msr_policy,
-    },
-    [ XEN_SYSCTL_cpu_policy_host ] = {
-        &host_cpuid_policy,
-        &host_msr_policy,
-    },
-#ifdef CONFIG_PV
-    [ XEN_SYSCTL_cpu_policy_pv_max ] = {
-        &pv_max_cpuid_policy,
-        &pv_max_msr_policy,
-    },
-    [ XEN_SYSCTL_cpu_policy_pv_default ] = {
-        &pv_def_cpuid_policy,
-        &pv_def_msr_policy,
-    },
-#endif
-#ifdef CONFIG_HVM
-    [ XEN_SYSCTL_cpu_policy_hvm_max ] = {
-        &hvm_max_cpuid_policy,
-        &hvm_max_msr_policy,
-    },
-    [ XEN_SYSCTL_cpu_policy_hvm_default ] = {
-        &hvm_def_cpuid_policy,
-        &hvm_def_msr_policy,
-    },
-#endif
-};
+#include <asm/cpu-policy.h>
 
 struct l3_cache_info {
     int ret;
@@ -327,19 +296,19 @@ long arch_do_sysctl(
 
     case XEN_SYSCTL_get_cpu_featureset:
     {
-        static const struct cpuid_policy *const policy_table[6] = {
-            [XEN_SYSCTL_cpu_featureset_raw]  = &raw_cpuid_policy,
-            [XEN_SYSCTL_cpu_featureset_host] = &host_cpuid_policy,
+        static const struct cpu_policy *const policy_table[6] = {
+            [XEN_SYSCTL_cpu_featureset_raw]  = &raw_cpu_policy,
+            [XEN_SYSCTL_cpu_featureset_host] = &host_cpu_policy,
 #ifdef CONFIG_PV
-            [XEN_SYSCTL_cpu_featureset_pv]   = &pv_def_cpuid_policy,
-            [XEN_SYSCTL_cpu_featureset_pv_max] = &pv_max_cpuid_policy,
+            [XEN_SYSCTL_cpu_featureset_pv]   = &pv_def_cpu_policy,
+            [XEN_SYSCTL_cpu_featureset_pv_max] = &pv_max_cpu_policy,
 #endif
 #ifdef CONFIG_HVM
-            [XEN_SYSCTL_cpu_featureset_hvm]  = &hvm_def_cpuid_policy,
-            [XEN_SYSCTL_cpu_featureset_hvm_max] = &hvm_max_cpuid_policy,
+            [XEN_SYSCTL_cpu_featureset_hvm]  = &hvm_def_cpu_policy,
+            [XEN_SYSCTL_cpu_featureset_hvm_max] = &hvm_max_cpu_policy,
 #endif
         };
-        const struct cpuid_policy *p = NULL;
+        const struct cpu_policy *p = NULL;
         uint32_t featureset[FSCAPINTS];
         unsigned int nr;
 
@@ -392,7 +361,19 @@ long arch_do_sysctl(
 
     case XEN_SYSCTL_get_cpu_policy:
     {
-        const struct old_cpu_policy *policy;
+        static const struct cpu_policy *const system_policies[6] = {
+            [XEN_SYSCTL_cpu_policy_raw]         = &raw_cpu_policy,
+            [XEN_SYSCTL_cpu_policy_host]        = &host_cpu_policy,
+#ifdef CONFIG_PV
+            [XEN_SYSCTL_cpu_policy_pv_max]      = &pv_max_cpu_policy,
+            [XEN_SYSCTL_cpu_policy_pv_default]  = &pv_def_cpu_policy,
+#endif
+#ifdef CONFIG_HVM
+            [XEN_SYSCTL_cpu_policy_hvm_max]     = &hvm_max_cpu_policy,
+            [XEN_SYSCTL_cpu_policy_hvm_default] = &hvm_def_cpu_policy,
+#endif
+        };
+        const struct cpu_policy *policy;
 
         /* Reserved field set, or bad policy index? */
         if ( sysctl->u.cpu_policy._rsvd ||
@@ -401,11 +382,11 @@ long arch_do_sysctl(
             ret = -EINVAL;
             break;
         }
-        policy = &system_policies[
+        policy = system_policies[
             array_index_nospec(sysctl->u.cpu_policy.index,
                                ARRAY_SIZE(system_policies))];
 
-        if ( !policy->cpuid || !policy->msr )
+        if ( !policy )
         {
             ret = -EOPNOTSUPP;
             break;
@@ -415,7 +396,7 @@ long arch_do_sysctl(
         if ( guest_handle_is_null(sysctl->u.cpu_policy.leaves) )
             sysctl->u.cpu_policy.nr_leaves = CPUID_MAX_SERIALISED_LEAVES;
         else if ( (ret = x86_cpuid_copy_to_buffer(
-                       policy->cpuid,
+                       policy,
                        sysctl->u.cpu_policy.leaves,
                        &sysctl->u.cpu_policy.nr_leaves)) )
             break;
@@ -431,7 +412,7 @@ long arch_do_sysctl(
         if ( guest_handle_is_null(sysctl->u.cpu_policy.msrs) )
             sysctl->u.cpu_policy.nr_msrs = MSR_MAX_SERIALISED_ENTRIES;
         else if ( (ret = x86_msr_copy_to_buffer(
-                       policy->msr,
+                       policy,
                        sysctl->u.cpu_policy.msrs,
                        &sysctl->u.cpu_policy.nr_msrs)) )
             break;