#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/microcode.h>
+#include <asm/prot-key.h>
#include <asm/spec_ctrl.h>
#include <acpi/cpufreq/cpufreq.h>
update_mcu_opt_ctrl();
+ /*
+ * This should be before restoring CR4, but that is earlier in asm and
+ * awkward. Instead, we rely on MSR_PKRS being something sane out of S3
+ * (0, or Xen's previous value) until this point, where we need to become
+ * certain that Xen's cache matches reality.
+ */
+ if ( cpu_has_pks )
+ wrpkrs_and_cache(0);
+
/* (re)initialise SYSCALL/SYSENTER state, amongst other things. */
percpu_traps_init();
DEFINE_PER_CPU(bool, full_gdt_loaded);
+DEFINE_PER_CPU(uint32_t, pkrs);
+
void __init setup_clear_cpu_cap(unsigned int cap)
{
const uint32_t *dfs;
#include <asm/event.h>
#include <asm/mce.h>
#include <asm/monitor.h>
+#include <asm/prot-key.h>
#include <public/arch-x86/cpuid.h>
static bool_t __initdata opt_force_ept;
static void vmx_save_guest_msrs(struct vcpu *v)
{
+ const struct cpuid_policy *cp = v->domain->arch.cpuid;
struct vcpu_msrs *msrs = v->arch.msrs;
/*
rdmsrl(MSR_RTIT_OUTPUT_MASK, msrs->rtit.output_mask);
rdmsrl(MSR_RTIT_STATUS, msrs->rtit.status);
}
+
+ if ( cp->feat.pks )
+ msrs->pkrs = rdpkrs_and_cache();
}
static void vmx_restore_guest_msrs(struct vcpu *v)
{
+ const struct cpuid_policy *cp = v->domain->arch.cpuid;
const struct vcpu_msrs *msrs = v->arch.msrs;
write_gs_shadow(v->arch.hvm.vmx.shadow_gs);
wrmsrl(MSR_RTIT_OUTPUT_MASK, msrs->rtit.output_mask);
wrmsrl(MSR_RTIT_STATUS, msrs->rtit.status);
}
+
+ if ( cp->feat.pks )
+ wrpkrs(msrs->pkrs);
}
void vmx_update_cpu_exec_control(struct vcpu *v)
};
} rtit;
+ /*
+ * 0x000006e1 - MSR_PKRS - Protection Key Supervisor.
+ *
+ * Exposed R/W to guests. Xen doesn't use PKS yet, so only context
+ * switched per vcpu. When in current context, live value is in hardware,
+ * and this value is stale.
+ */
+ uint32_t pkrs;
+
/* 0x00000da0 - MSR_IA32_XSS */
struct {
uint64_t raw;
#ifndef ASM_PROT_KEY_H
#define ASM_PROT_KEY_H
+#include <xen/percpu.h>
#include <xen/types.h>
+#include <asm/msr.h>
+
#define PKEY_AD 1 /* Access Disable */
#define PKEY_WD 2 /* Write Disable */
:: "a" (pkru), "d" (0), "c" (0) );
}
+/*
+ * Xen does not use PKS.
+ *
+ * Guest kernel use is expected to be one default key, except for tiny windows
+ * with a double write to switch to a non-default key in a permitted critical
+ * section.
+ *
+ * As such, we want MSR_PKRS un-intercepted. Furthermore, as we only need it
+ * in Xen for emulation or migration purposes (i.e. possibly never in a
+ * domain's lifetime), we don't want to re-sync the hardware value on every
+ * vmexit.
+ *
+ * Therefore, we read and cache the guest value in ctxt_switch_from(), in the
+ * expectation that we can short-circuit the write in ctxt_switch_to().
+ * During regular operations in current context, the guest value is in
+ * hardware and the per-cpu cache is stale.
+ */
+DECLARE_PER_CPU(uint32_t, pkrs);
+
+static inline uint32_t rdpkrs(void)
+{
+ uint32_t pkrs, tmp;
+
+ rdmsr(MSR_PKRS, pkrs, tmp);
+
+ return pkrs;
+}
+
+static inline uint32_t rdpkrs_and_cache(void)
+{
+ return this_cpu(pkrs) = rdpkrs();
+}
+
+static inline void wrpkrs(uint32_t pkrs)
+{
+ uint32_t *this_pkrs = &this_cpu(pkrs);
+
+ if ( *this_pkrs != pkrs )
+ {
+ *this_pkrs = pkrs;
+
+ wrmsr_ns(MSR_PKRS, pkrs, 0);
+ }
+}
+
+static inline void wrpkrs_and_cache(uint32_t pkrs)
+{
+ this_cpu(pkrs) = pkrs;
+ wrmsr_ns(MSR_PKRS, pkrs, 0);
+}
+
#endif /* ASM_PROT_KEY_H */
#include <asm/spec_ctrl.h>
#include <asm/guest.h>
#include <asm/microcode.h>
+#include <asm/prot-key.h>
#include <asm/pv/domain.h>
/* opt_nosmp: If true, secondary processors are ignored. */
if ( opt_invpcid && cpu_has_invpcid )
use_invpcid = true;
+ if ( cpu_has_pks )
+ wrpkrs_and_cache(0); /* Must be before setting CR4.PKS */
+
init_speculation_mitigations();
init_idle_domain();
#include <asm/microcode.h>
#include <asm/msr.h>
#include <asm/mtrr.h>
+#include <asm/prot-key.h>
#include <asm/setup.h>
#include <asm/spec_ctrl.h>
#include <asm/time.h>
/* Full exception support from here on in. */
+ if ( cpu_has_pks )
+ wrpkrs_and_cache(0); /* Must be before setting CR4.PKS */
+
/* Safe to enable feature such as CR4.MCE with the IDT set up now. */
write_cr4(mmu_cr4_features);