obj-bin-y += bzimage.init.o
obj-bin-y += clear_page.o
obj-bin-y += copy_page.o
+obj-y += cpu-policy.o
obj-y += cpuid.o
obj-$(CONFIG_PV) += compat.o x86_64/compat.o
obj-$(CONFIG_KEXEC) += crash.o
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#include <xen/cache.h>
+#include <xen/kernel.h>
+
+#include <xen/lib/x86/cpu-policy.h>
+
+#include <asm/cpu-policy.h>
+
+struct cpu_policy __read_mostly raw_cpu_policy;
+struct cpu_policy __read_mostly host_cpu_policy;
+#ifdef CONFIG_PV
+struct cpu_policy __read_mostly pv_max_cpu_policy;
+struct cpu_policy __read_mostly pv_def_cpu_policy;
+#endif
+#ifdef CONFIG_HVM
+struct cpu_policy __read_mostly hvm_max_cpu_policy;
+struct cpu_policy __read_mostly hvm_def_cpu_policy;
+#endif
#include <xen/delay.h>
#include <xen/param.h>
#include <xen/smp.h>
+
+#include <asm/cpu-policy.h>
#include <asm/current.h>
#include <asm/debugreg.h>
#include <asm/processor.h>
return false;
if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
- raw_msr_policy.platform_info.cpuid_faulting =
+ raw_cpu_policy.platform_info.cpuid_faulting =
val & MSR_PLATFORM_INFO_CPUID_FAULTING;
if (rc ||
#include <xen/param.h>
#include <xen/sched.h>
#include <xen/nospec.h>
+#include <asm/cpu-policy.h>
#include <asm/cpuid.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/nestedhvm.h>
memset(&l[first], 0, sizeof(*l) * (last - first + 1));
}
-struct cpuid_policy __read_mostly raw_cpuid_policy,
- __read_mostly host_cpuid_policy;
-#ifdef CONFIG_PV
-struct cpuid_policy __read_mostly pv_max_cpuid_policy;
-struct cpuid_policy __read_mostly pv_def_cpuid_policy;
-#endif
-#ifdef CONFIG_HVM
-struct cpuid_policy __read_mostly hvm_max_cpuid_policy;
-struct cpuid_policy __read_mostly hvm_def_cpuid_policy;
-#endif
-
static void sanitise_featureset(uint32_t *fs)
{
/* for_each_set_bit() uses unsigned longs. Extend with zeroes. */
static void __init calculate_raw_policy(void)
{
- struct cpuid_policy *p = &raw_cpuid_policy;
+ struct cpuid_policy *p = &raw_cpu_policy;
x86_cpuid_policy_fill_native(p);
static void __init calculate_host_policy(void)
{
- struct cpuid_policy *p = &host_cpuid_policy;
+ struct cpuid_policy *p = &host_cpu_policy;
unsigned int max_extd_leaf;
- *p = raw_cpuid_policy;
+ *p = raw_cpu_policy;
p->basic.max_leaf =
min_t(uint32_t, p->basic.max_leaf, ARRAY_SIZE(p->basic.raw) - 1);
* of IBRS by using the AMD feature bit. An administrator may wish for
* performance reasons to offer IBPB without IBRS.
*/
- if ( host_cpuid_policy.feat.ibrsb )
+ if ( host_cpu_policy.feat.ibrsb )
__set_bit(X86_FEATURE_IBPB, fs);
}
static void __init calculate_pv_max_policy(void)
{
- struct cpuid_policy *p = &pv_max_cpuid_policy;
+ struct cpuid_policy *p = &pv_max_cpu_policy;
uint32_t pv_featureset[FSCAPINTS];
unsigned int i;
- *p = host_cpuid_policy;
+ *p = host_cpu_policy;
cpuid_policy_to_featureset(p, pv_featureset);
for ( i = 0; i < ARRAY_SIZE(pv_featureset); ++i )
static void __init calculate_pv_def_policy(void)
{
- struct cpuid_policy *p = &pv_def_cpuid_policy;
+ struct cpuid_policy *p = &pv_def_cpu_policy;
uint32_t pv_featureset[FSCAPINTS];
unsigned int i;
- *p = pv_max_cpuid_policy;
+ *p = pv_max_cpu_policy;
cpuid_policy_to_featureset(p, pv_featureset);
for ( i = 0; i < ARRAY_SIZE(pv_featureset); ++i )
static void __init calculate_hvm_max_policy(void)
{
- struct cpuid_policy *p = &hvm_max_cpuid_policy;
+ struct cpuid_policy *p = &hvm_max_cpu_policy;
uint32_t hvm_featureset[FSCAPINTS];
unsigned int i;
const uint32_t *hvm_featuremask;
- *p = host_cpuid_policy;
+ *p = host_cpu_policy;
cpuid_policy_to_featureset(p, hvm_featureset);
hvm_featuremask = hvm_hap_supported() ?
* HVM guests are able if running in protected mode.
*/
if ( (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
- raw_cpuid_policy.basic.sep )
+ raw_cpu_policy.basic.sep )
__set_bit(X86_FEATURE_SEP, hvm_featureset);
/*
static void __init calculate_hvm_def_policy(void)
{
- struct cpuid_policy *p = &hvm_def_cpuid_policy;
+ struct cpuid_policy *p = &hvm_def_cpu_policy;
uint32_t hvm_featureset[FSCAPINTS];
unsigned int i;
const uint32_t *hvm_featuremask;
- *p = hvm_max_cpuid_policy;
+ *p = hvm_max_cpu_policy;
cpuid_policy_to_featureset(p, hvm_featureset);
hvm_featuremask = hvm_hap_supported() ?
{
struct cpuid_policy *p = d->arch.cpuid;
const struct cpuid_policy *max = is_pv_domain(d)
- ? (IS_ENABLED(CONFIG_PV) ? &pv_max_cpuid_policy : NULL)
- : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpuid_policy : NULL);
+ ? (IS_ENABLED(CONFIG_PV) ? &pv_max_cpu_policy : NULL)
+ : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpu_policy : NULL);
uint32_t fs[FSCAPINTS], max_fs[FSCAPINTS];
unsigned int i;
/* Fold host's FDP_EXCP_ONLY and NO_FPU_SEL into guest's view. */
fs[FEATURESET_7b0] &= ~(cpufeat_mask(X86_FEATURE_FDP_EXCP_ONLY) |
cpufeat_mask(X86_FEATURE_NO_FPU_SEL));
- fs[FEATURESET_7b0] |= (host_cpuid_policy.feat._7b0 &
+ fs[FEATURESET_7b0] |= (host_cpu_policy.feat._7b0 &
(cpufeat_mask(X86_FEATURE_FDP_EXCP_ONLY) |
cpufeat_mask(X86_FEATURE_NO_FPU_SEL)));
int init_domain_cpuid_policy(struct domain *d)
{
struct cpuid_policy *p = is_pv_domain(d)
- ? (IS_ENABLED(CONFIG_PV) ? &pv_def_cpuid_policy : NULL)
- : (IS_ENABLED(CONFIG_HVM) ? &hvm_def_cpuid_policy : NULL);
+ ? (IS_ENABLED(CONFIG_PV) ? &pv_def_cpu_policy : NULL)
+ : (IS_ENABLED(CONFIG_HVM) ? &hvm_def_cpu_policy : NULL);
if ( !p )
{
if ( is_pv_domain(d) && is_hardware_domain(d) &&
guest_kernel_mode(v, regs) && cpu_has_monitor &&
regs->entry_vector == TRAP_gp_fault )
- *res = raw_cpuid_policy.basic.raw[5];
+ *res = raw_cpu_policy.basic.raw[5];
break;
case 0x7:
/* Find some more clever allocation scheme if this trips. */
BUILD_BUG_ON(sizeof(struct cpuid_policy) > PAGE_SIZE);
- BUILD_BUG_ON(sizeof(raw_cpuid_policy.basic) !=
- sizeof(raw_cpuid_policy.basic.raw));
- BUILD_BUG_ON(sizeof(raw_cpuid_policy.feat) !=
- sizeof(raw_cpuid_policy.feat.raw));
- BUILD_BUG_ON(sizeof(raw_cpuid_policy.xstate) !=
- sizeof(raw_cpuid_policy.xstate.raw));
- BUILD_BUG_ON(sizeof(raw_cpuid_policy.extd) !=
- sizeof(raw_cpuid_policy.extd.raw));
+ BUILD_BUG_ON(sizeof(raw_cpu_policy.basic) !=
+ sizeof(raw_cpu_policy.basic.raw));
+ BUILD_BUG_ON(sizeof(raw_cpu_policy.feat) !=
+ sizeof(raw_cpu_policy.feat.raw));
+ BUILD_BUG_ON(sizeof(raw_cpu_policy.xstate) !=
+ sizeof(raw_cpu_policy.xstate.raw));
+ BUILD_BUG_ON(sizeof(raw_cpu_policy.extd) !=
+ sizeof(raw_cpu_policy.extd.raw));
}
/*
#include <asm/xstate.h>
#include <asm/debugger.h>
#include <asm/psr.h>
-#include <asm/cpuid.h>
+#include <asm/cpu-policy.h>
#ifdef CONFIG_GDBSX
static int gdbsx_guest_mem_io(domid_t domid, struct xen_domctl_gdbsx_memio *iop)
xen_domctl_cpu_policy_t *xdpc)
{
struct old_cpu_policy new = {};
- const struct old_cpu_policy *sys = is_pv_domain(d)
- ? &system_policies[XEN_SYSCTL_cpu_policy_pv_max]
- : &system_policies[XEN_SYSCTL_cpu_policy_hvm_max];
+ struct cpu_policy *sys = is_pv_domain(d)
+ ? (IS_ENABLED(CONFIG_PV) ? &pv_max_cpu_policy : NULL)
+ : (IS_ENABLED(CONFIG_HVM) ? &hvm_max_cpu_policy : NULL);
+ struct old_cpu_policy old_sys = { sys, sys };
struct cpu_policy_errors err = INIT_CPU_POLICY_ERRORS;
int ret = -ENOMEM;
+ if ( !sys )
+ {
+ ASSERT_UNREACHABLE();
+ return -EOPNOTSUPP;
+ }
+
/* Start by copying the domain's existing policies. */
if ( !(new.cpuid = xmemdup(d->arch.cpuid)) ||
!(new.msr = xmemdup(d->arch.msr)) )
x86_cpuid_policy_clear_out_of_range_leaves(new.cpuid);
/* Audit the combined dataset. */
- ret = x86_cpu_policies_are_compatible(sys, &new, &err);
+ ret = x86_cpu_policies_are_compatible(&old_sys, &new, &err);
if ( ret )
goto out;
#include <xen/nospec.h>
#include <xen/sched.h>
+#include <asm/cpu-policy.h>
#include <asm/debugreg.h>
#include <asm/hvm/viridian.h>
#include <asm/msr.h>
DEFINE_PER_CPU(uint32_t, tsc_aux);
-struct msr_policy __read_mostly raw_msr_policy,
- __read_mostly host_msr_policy;
-#ifdef CONFIG_PV
-struct msr_policy __read_mostly pv_max_msr_policy;
-struct msr_policy __read_mostly pv_def_msr_policy;
-#endif
-#ifdef CONFIG_HVM
-struct msr_policy __read_mostly hvm_max_msr_policy;
-struct msr_policy __read_mostly hvm_def_msr_policy;
-#endif
-
static void __init calculate_raw_policy(void)
{
- struct msr_policy *mp = &raw_msr_policy;
+ struct msr_policy *mp = &raw_cpu_policy;
/* 0x000000ce MSR_INTEL_PLATFORM_INFO */
/* Was already added by probe_cpuid_faulting() */
static void __init calculate_host_policy(void)
{
- struct msr_policy *mp = &host_msr_policy;
+ struct msr_policy *mp = &host_cpu_policy;
- *mp = raw_msr_policy;
+ *mp = raw_cpu_policy;
/* 0x000000ce MSR_INTEL_PLATFORM_INFO */
/* probe_cpuid_faulting() sanity checks presence of MISC_FEATURES_ENABLES */
static void __init calculate_pv_max_policy(void)
{
- struct msr_policy *mp = &pv_max_msr_policy;
+ struct msr_policy *mp = &pv_max_cpu_policy;
- *mp = host_msr_policy;
+ *mp = host_cpu_policy;
mp->arch_caps.raw = 0; /* Not supported yet. */
}
static void __init calculate_pv_def_policy(void)
{
- struct msr_policy *mp = &pv_def_msr_policy;
+ struct msr_policy *mp = &pv_def_cpu_policy;
- *mp = pv_max_msr_policy;
+ *mp = pv_max_cpu_policy;
}
static void __init calculate_hvm_max_policy(void)
{
- struct msr_policy *mp = &hvm_max_msr_policy;
+ struct msr_policy *mp = &hvm_max_cpu_policy;
- *mp = host_msr_policy;
+ *mp = host_cpu_policy;
/* It's always possible to emulate CPUID faulting for HVM guests */
mp->platform_info.cpuid_faulting = true;
static void __init calculate_hvm_def_policy(void)
{
- struct msr_policy *mp = &hvm_def_msr_policy;
+ struct msr_policy *mp = &hvm_def_cpu_policy;
- *mp = hvm_max_msr_policy;
+ *mp = hvm_max_cpu_policy;
}
void __init init_guest_msr_policy(void)
int init_domain_msr_policy(struct domain *d)
{
struct msr_policy *mp = is_pv_domain(d)
- ? (IS_ENABLED(CONFIG_PV) ? &pv_def_msr_policy : NULL)
- : (IS_ENABLED(CONFIG_HVM) ? &hvm_def_msr_policy : NULL);
+ ? (IS_ENABLED(CONFIG_PV) ? &pv_def_cpu_policy : NULL)
+ : (IS_ENABLED(CONFIG_HVM) ? &hvm_def_cpu_policy : NULL);
if ( !mp )
{
#include <xen/cpu.h>
#include <xsm/xsm.h>
#include <asm/psr.h>
-#include <asm/cpuid.h>
-
-const struct old_cpu_policy system_policies[6] = {
- [ XEN_SYSCTL_cpu_policy_raw ] = {
- &raw_cpuid_policy,
- &raw_msr_policy,
- },
- [ XEN_SYSCTL_cpu_policy_host ] = {
- &host_cpuid_policy,
- &host_msr_policy,
- },
-#ifdef CONFIG_PV
- [ XEN_SYSCTL_cpu_policy_pv_max ] = {
- &pv_max_cpuid_policy,
- &pv_max_msr_policy,
- },
- [ XEN_SYSCTL_cpu_policy_pv_default ] = {
- &pv_def_cpuid_policy,
- &pv_def_msr_policy,
- },
-#endif
-#ifdef CONFIG_HVM
- [ XEN_SYSCTL_cpu_policy_hvm_max ] = {
- &hvm_max_cpuid_policy,
- &hvm_max_msr_policy,
- },
- [ XEN_SYSCTL_cpu_policy_hvm_default ] = {
- &hvm_def_cpuid_policy,
- &hvm_def_msr_policy,
- },
-#endif
-};
+#include <asm/cpu-policy.h>
struct l3_cache_info {
int ret;
case XEN_SYSCTL_get_cpu_featureset:
{
- static const struct cpuid_policy *const policy_table[6] = {
- [XEN_SYSCTL_cpu_featureset_raw] = &raw_cpuid_policy,
- [XEN_SYSCTL_cpu_featureset_host] = &host_cpuid_policy,
+ static const struct cpu_policy *const policy_table[6] = {
+ [XEN_SYSCTL_cpu_featureset_raw] = &raw_cpu_policy,
+ [XEN_SYSCTL_cpu_featureset_host] = &host_cpu_policy,
#ifdef CONFIG_PV
- [XEN_SYSCTL_cpu_featureset_pv] = &pv_def_cpuid_policy,
- [XEN_SYSCTL_cpu_featureset_pv_max] = &pv_max_cpuid_policy,
+ [XEN_SYSCTL_cpu_featureset_pv] = &pv_def_cpu_policy,
+ [XEN_SYSCTL_cpu_featureset_pv_max] = &pv_max_cpu_policy,
#endif
#ifdef CONFIG_HVM
- [XEN_SYSCTL_cpu_featureset_hvm] = &hvm_def_cpuid_policy,
- [XEN_SYSCTL_cpu_featureset_hvm_max] = &hvm_max_cpuid_policy,
+ [XEN_SYSCTL_cpu_featureset_hvm] = &hvm_def_cpu_policy,
+ [XEN_SYSCTL_cpu_featureset_hvm_max] = &hvm_max_cpu_policy,
#endif
};
- const struct cpuid_policy *p = NULL;
+ const struct cpu_policy *p = NULL;
uint32_t featureset[FSCAPINTS];
unsigned int nr;
case XEN_SYSCTL_get_cpu_policy:
{
- const struct old_cpu_policy *policy;
+ static const struct cpu_policy *const system_policies[6] = {
+ [XEN_SYSCTL_cpu_policy_raw] = &raw_cpu_policy,
+ [XEN_SYSCTL_cpu_policy_host] = &host_cpu_policy,
+#ifdef CONFIG_PV
+ [XEN_SYSCTL_cpu_policy_pv_max] = &pv_max_cpu_policy,
+ [XEN_SYSCTL_cpu_policy_pv_default] = &pv_def_cpu_policy,
+#endif
+#ifdef CONFIG_HVM
+ [XEN_SYSCTL_cpu_policy_hvm_max] = &hvm_max_cpu_policy,
+ [XEN_SYSCTL_cpu_policy_hvm_default] = &hvm_def_cpu_policy,
+#endif
+ };
+ const struct cpu_policy *policy;
/* Reserved field set, or bad policy index? */
if ( sysctl->u.cpu_policy._rsvd ||
ret = -EINVAL;
break;
}
- policy = &system_policies[
+ policy = system_policies[
array_index_nospec(sysctl->u.cpu_policy.index,
ARRAY_SIZE(system_policies))];
- if ( !policy->cpuid || !policy->msr )
+ if ( !policy )
{
ret = -EOPNOTSUPP;
break;
if ( guest_handle_is_null(sysctl->u.cpu_policy.leaves) )
sysctl->u.cpu_policy.nr_leaves = CPUID_MAX_SERIALISED_LEAVES;
else if ( (ret = x86_cpuid_copy_to_buffer(
- policy->cpuid,
+ policy,
sysctl->u.cpu_policy.leaves,
&sysctl->u.cpu_policy.nr_leaves)) )
break;
if ( guest_handle_is_null(sysctl->u.cpu_policy.msrs) )
sysctl->u.cpu_policy.nr_msrs = MSR_MAX_SERIALISED_ENTRIES;
else if ( (ret = x86_msr_copy_to_buffer(
- policy->msr,
+ policy,
sysctl->u.cpu_policy.msrs,
&sysctl->u.cpu_policy.nr_msrs)) )
break;
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef X86_CPU_POLICY_H
+#define X86_CPU_POLICY_H
+
+struct cpu_policy;
+
+extern struct cpu_policy raw_cpu_policy;
+extern struct cpu_policy host_cpu_policy;
+extern struct cpu_policy pv_max_cpu_policy;
+extern struct cpu_policy pv_def_cpu_policy;
+extern struct cpu_policy hvm_max_cpu_policy;
+extern struct cpu_policy hvm_def_cpu_policy;
+
+#endif /* X86_CPU_POLICY_H */
/* Default masking MSR values, calculated at boot. */
extern struct cpuidmasks cpuidmask_defaults;
-extern struct cpuid_policy raw_cpuid_policy, host_cpuid_policy,
- pv_max_cpuid_policy, pv_def_cpuid_policy,
- hvm_max_cpuid_policy, hvm_def_cpuid_policy;
-
-extern const struct old_cpu_policy system_policies[];
-
/* Check that all previously present features are still available. */
bool recheck_cpu_features(unsigned int cpu);
uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp);
-extern struct msr_policy raw_msr_policy,
- host_msr_policy,
- pv_max_msr_policy,
- pv_def_msr_policy,
- hvm_max_msr_policy,
- hvm_def_msr_policy;
-
/* Container object for per-vCPU MSRs */
struct vcpu_msrs
{