#include <xen/mm.h>
#include <xen/stdbool.h>
#include <asm/flushtlb.h>
+#include <asm/invpcid.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
* has been called.
*/
-static void prepare_set(void)
+static bool prepare_set(void)
{
+ unsigned long cr4;
+
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
write_cr0(read_cr0() | X86_CR0_CD);
wbinvd();
- /* TLB flushing here relies on Xen always using CR4.PGE. */
- BUILD_BUG_ON(!(XEN_MINIMAL_CR4 & X86_CR4_PGE));
- write_cr4(read_cr4() & ~X86_CR4_PGE);
+ cr4 = read_cr4();
+ if (cr4 & X86_CR4_PGE)
+ write_cr4(cr4 & ~X86_CR4_PGE);
+ else if (use_invpcid)
+ invpcid_flush_all();
+ else
+ write_cr3(read_cr3());
/* Save MTRR state */
rdmsrl(MSR_MTRRdefType, deftype);
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff);
+
+ return cr4 & X86_CR4_PGE;
}
-static void post_set(void)
+static void post_set(bool pge)
{
/* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, deftype);
write_cr0(read_cr0() & ~X86_CR0_CD);
/* Reenable CR4.PGE (also flushes the TLB) */
- write_cr4(read_cr4() | X86_CR4_PGE);
+ if (pge)
+ write_cr4(read_cr4() | X86_CR4_PGE);
+ else if (use_invpcid)
+ invpcid_flush_all();
+ else
+ write_cr3(read_cr3());
spin_unlock(&set_atomicity_lock);
}
{
unsigned long mask, count;
unsigned long flags;
+ bool pge;
local_irq_save(flags);
- prepare_set();
+ pge = prepare_set();
/* Actually set the state */
mask = set_mtrr_state();
- post_set();
+ post_set(pge);
local_irq_restore(flags);
/* Use the atomic bitops to update the global mask */
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
-
}
static void generic_set_mtrr(unsigned int reg, unsigned long base,
{
unsigned long flags;
struct mtrr_var_range *vr;
+ bool pge;
vr = &mtrr_state.var_ranges[reg];
local_irq_save(flags);
- prepare_set();
+ pge = prepare_set();
if (size == 0) {
/* The invalid bit is kept in the mask, so we simply clear the
mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask);
}
- post_set();
+ post_set(pge);
local_irq_restore(flags);
}
#include <xen/smp.h>
#include <xen/softirq.h>
#include <asm/flushtlb.h>
+#include <asm/invpcid.h>
#include <asm/page.h>
/* Debug builds: Wrap frequently to stress-test the wrap logic. */
this_cpu(tlbflush_time) = t;
}
+static void do_tlb_flush(void)
+{
+ u32 t = pre_flush();
+
+ if ( use_invpcid )
+ invpcid_flush_all();
+ else
+ {
+ unsigned long cr4 = read_cr4();
+
+ write_cr4(cr4 ^ X86_CR4_PGE);
+ write_cr4(cr4);
+ }
+
+ post_flush(t);
+}
+
void switch_cr3(unsigned long cr3)
{
unsigned long flags, cr4;
: : "m" (*(const char *)(va)) : "memory" );
}
else
- {
- u32 t = pre_flush();
- unsigned long cr4 = read_cr4();
-
- write_cr4(cr4 & ~X86_CR4_PGE);
- barrier();
- write_cr4(cr4);
-
- post_flush(t);
- }
+ do_tlb_flush();
}
if ( flags & FLUSH_CACHE )
static unsigned int __initdata max_cpus;
integer_param("maxcpus", max_cpus);
+/* opt_invpcid: If false, don't use INVPCID instruction even if available. */
+static bool __initdata opt_invpcid = true;
+boolean_param("invpcid", opt_invpcid);
+bool __read_mostly use_invpcid;
+
unsigned long __read_mostly cr4_pv32_mask;
/* **** Linux config option: propagated to domain0. */
if ( cpu_has_fsgsbase )
set_in_cr4(X86_CR4_FSGSBASE);
+ if ( opt_invpcid && cpu_has_invpcid )
+ use_invpcid = true;
+
init_speculation_mitigations();
init_idle_domain();