#include <xen/init.h>
#include <xen/mm.h>
#include <asm/flushtlb.h>
+#include <asm/invpcid.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
}
-static unsigned long cr4 = 0;
static DEFINE_SPINLOCK(set_atomicity_lock);
/*
* has been called.
*/
-static void prepare_set(void)
+static bool_t prepare_set(void)
{
- unsigned long cr0;
+ unsigned long cr0, cr4;
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
write_cr0(cr0);
wbinvd();
- /* Save value of CR4 and clear Page Global Enable (bit 7) */
- if ( cpu_has_pge ) {
- cr4 = read_cr4();
+ cr4 = read_cr4();
+ if (cr4 & X86_CR4_PGE)
write_cr4(cr4 & ~X86_CR4_PGE);
- }
-
- /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
- flush_tlb_local();
+ else if (use_invpcid)
+ invpcid_flush_all();
+ else
+ write_cr3(read_cr3());
/* Save MTRR state */
rdmsrl(MSR_MTRRdefType, deftype);
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype & ~0xcff);
+
+ return cr4 & X86_CR4_PGE;
}
-static void post_set(void)
+static void post_set(bool_t pge)
{
- /* Flush TLBs (no need to flush caches - they are disabled) */
- flush_tlb_local();
-
/* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, deftype);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
- /* Restore value of CR4 */
- if ( cpu_has_pge )
- write_cr4(cr4);
+ if (pge)
+ write_cr4(read_cr4() | X86_CR4_PGE);
+ else if (use_invpcid)
+ invpcid_flush_all();
+ else
+ write_cr3(read_cr3());
+
spin_unlock(&set_atomicity_lock);
}
{
unsigned long mask, count;
unsigned long flags;
+ bool_t pge;
local_irq_save(flags);
- prepare_set();
+ pge = prepare_set();
/* Actually set the state */
mask = set_mtrr_state();
- post_set();
+ post_set(pge);
local_irq_restore(flags);
/* Use the atomic bitops to update the global mask */
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
-
}
static void generic_set_mtrr(unsigned int reg, unsigned long base,
{
unsigned long flags;
struct mtrr_var_range *vr;
+ bool_t pge;
vr = &mtrr_state.var_ranges[reg];
local_irq_save(flags);
- prepare_set();
+ pge = prepare_set();
if (size == 0) {
/* The invalid bit is kept in the mask, so we simply clear the
mtrr_wrmsr(MSR_IA32_MTRR_PHYSMASK(reg), vr->mask);
}
- post_set();
+ post_set(pge);
local_irq_restore(flags);
}
#include <xen/smp.h>
#include <xen/softirq.h>
#include <asm/flushtlb.h>
+#include <asm/invpcid.h>
#include <asm/page.h>
/* Debug builds: Wrap frequently to stress-test the wrap logic. */
this_cpu(tlbflush_time) = t;
}
+static void do_tlb_flush(void)
+{
+ u32 t = pre_flush();
+
+ if ( use_invpcid )
+ invpcid_flush_all();
+ else
+ {
+ unsigned long cr4 = read_cr4();
+
+ write_cr4(cr4 ^ X86_CR4_PGE);
+ write_cr4(cr4);
+ }
+
+ post_flush(t);
+}
+
void switch_cr3(unsigned long cr3)
{
unsigned long flags, cr4;
: : "m" (*(const char *)(va)) : "memory" );
}
else
- {
- u32 t = pre_flush();
- unsigned long cr4 = read_cr4();
-
- write_cr4(cr4 & ~X86_CR4_PGE);
- barrier();
- write_cr4(cr4);
-
- post_flush(t);
- }
+ do_tlb_flush();
}
if ( flags & FLUSH_CACHE )
static bool_t __initdata disable_smap;
invbool_param("smap", disable_smap);
+/* opt_invpcid: If false, don't use INVPCID instruction even if available. */
+static bool_t __initdata opt_invpcid = 1;
+boolean_param("invpcid", opt_invpcid);
+bool_t __read_mostly use_invpcid;
+
unsigned long __read_mostly cr4_pv32_mask;
/* Boot dom0 in pvh mode */
if ( cpu_has_fsgsbase )
set_in_cr4(X86_CR4_FSGSBASE);
+ if ( opt_invpcid && cpu_has_invpcid )
+ use_invpcid = 1;
+
init_speculation_mitigations();
init_idle_domain();