mode.
### xpti
-> `= <boolean>`
+> `= List of [ default | <boolean> | dom0=<bool> | domu=<bool> ]`
-> Default: `false` on AMD hardware
+> Default: `false` on hardware not to be vulnerable to Meltdown (e.g. AMD)
> Default: `true` everywhere else
Override default selection of whether to isolate 64-bit PV guest page
tables.
+`true` activates page table isolation even on hardware not vulnerable by
+Meltdown for all domains.
+
+`false` deactivates page table isolation on all systems for all domains.
+
+`default` sets the default behaviour.
+
+With `dom0` and `domu` it is possible to control page table isolation
+for dom0 or guest domains only.
+
** WARNING: Not yet a complete isolation implementation, but better than
nothing. **
if (test_bit(X86_FEATURE_SC_MSR_IDLE,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_SC_MSR_IDLE, c->x86_capability);
+ if (test_bit(X86_FEATURE_NO_XPTI,
+ boot_cpu_data.x86_capability))
+ __set_bit(X86_FEATURE_NO_XPTI, c->x86_capability);
/* AND the already accumulated flags with these */
for ( i = 0 ; i < NCAPINTS ; i++ )
d->arch.x87_fip_width = 4;
+ d->arch.pv_domain.xpti = 0;
+
return 0;
undo_and_fail:
goto fail;
}
else
+ {
/* 64-bit PV guest by default. */
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+ d->arch.pv_domain.xpti = opt_xpti & (is_hardware_domain(d)
+ ? OPT_XPTI_DOM0 : OPT_XPTI_DOMU);
+ }
+
/* initialize default tsc behavior in case tools don't */
tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);
spin_lock_init(&d->arch.vtsc_lock);
if ( compat32 )
{
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
+ d->arch.pv_domain.xpti = 0;
v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0];
if ( setup_compat_arg_xlat(v) != 0 )
BUG();
void write_ptbase(struct vcpu *v)
{
- get_cpu_info()->root_pgt_changed = 1;
- switch_cr3(v->arch.cr3);
+ struct cpu_info *cpu_info = get_cpu_info();
+
+ if ( is_pv_vcpu(v) && v->domain->arch.pv_domain.xpti )
+ {
+ cpu_info->root_pgt_changed = 1;
+ cpu_info->pv_cr3 = __pa(this_cpu(root_pgt));
+ switch_cr3(v->arch.cr3);
+ }
+ else
+ {
+ /* Make sure to clear xen_cr3 before pv_cr3; switch_cr3() serializes. */
+ cpu_info->xen_cr3 = 0;
+ switch_cr3(v->arch.cr3);
+ cpu_info->pv_cr3 = 0;
+ }
}
/*
case PGT_l4_page_table:
rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
- if ( !rc && this_cpu(root_pgt) )
+ if ( !rc && pt_owner->arch.pv_domain.xpti )
{
sync_guest = 1;
if ( pagetable_get_pfn(curr->arch.guest_table) == mfn )
spin_debug_disable();
get_cpu_info()->xen_cr3 = 0;
- get_cpu_info()->pv_cr3 = this_cpu(root_pgt) ? __pa(this_cpu(root_pgt)) : 0;
+ get_cpu_info()->pv_cr3 = 0;
load_system_tables();
return 0;
}
-static __read_mostly int8_t opt_xpti = -1;
-boolean_param("xpti", opt_xpti);
DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
static int setup_cpu_root_pgt(unsigned int cpu)
stack_base[0] = stack_start;
- if ( opt_xpti < 0 )
- opt_xpti = boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-
rc = setup_cpu_root_pgt(0);
if ( rc )
panic("Error %d setting up PV root page table\n", rc);
if ( per_cpu(root_pgt, 0) )
- get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
+ get_cpu_info()->pv_cr3 = 0;
set_nr_sockets();
boot_cpu_has(X86_FEATURE_SC_RSB_HVM)) ? "" : " None",
boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_SPEC_CTRL" : "",
boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : "");
+
+ printk(" XPTI (64-bit PV only): Dom0 %s, DomU %s\n",
+ opt_xpti & OPT_XPTI_DOM0 ? "enabled" : "disabled",
+ opt_xpti & OPT_XPTI_DOMU ? "enabled" : "disabled");
}
/* Calculate whether Retpoline is known-safe on this CPU. */
}
}
+#define OPT_XPTI_DEFAULT 0xff
+uint8_t __read_mostly opt_xpti = OPT_XPTI_DEFAULT;
+
+static __init void xpti_init_default(bool_t force)
+{
+ if ( !force && (opt_xpti != OPT_XPTI_DEFAULT) )
+ return;
+
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ opt_xpti = 0;
+ else
+ opt_xpti = OPT_XPTI_DOM0 | OPT_XPTI_DOMU;
+}
+
+static __init int parse_xpti(char *s)
+{
+ char *ss;
+ int val, rc = 0;
+
+ xpti_init_default(0);
+
+ do {
+ ss = strchr(s, ',');
+ if ( ss )
+ *ss = '\0';
+
+ switch ( parse_bool(s) )
+ {
+ case 0:
+ opt_xpti = 0;
+ break;
+
+ case 1:
+ opt_xpti = OPT_XPTI_DOM0 | OPT_XPTI_DOMU;
+ break;
+
+ default:
+ if ( !strcmp(s, "default") )
+ xpti_init_default(1);
+ else if ( (val = parse_boolean("dom0", s, ss)) >= 0 )
+ opt_xpti = (opt_xpti & ~OPT_XPTI_DOM0) |
+ (val ? OPT_XPTI_DOM0 : 0);
+ else if ( (val = parse_boolean("domu", s, ss)) >= 0 )
+ opt_xpti = (opt_xpti & ~OPT_XPTI_DOMU) |
+ (val ? OPT_XPTI_DOMU : 0);
+ else
+ rc = -EINVAL;
+ break;
+ }
+
+ s = ss + 1;
+ } while ( ss );
+
+ return rc;
+}
+custom_param("xpti", parse_xpti);
+
void __init init_speculation_mitigations(void)
{
enum ind_thunk thunk = THUNK_DEFAULT;
if ( default_xen_spec_ctrl )
__set_bit(X86_FEATURE_SC_MSR_IDLE, boot_cpu_data.x86_capability);
+ xpti_init_default(0);
+ if ( opt_xpti == 0 )
+ __set_bit(X86_FEATURE_NO_XPTI, boot_cpu_data.x86_capability);
+ else
+ setup_clear_cpu_cap(X86_FEATURE_NO_XPTI);
+
print_details(thunk, caps);
/*
#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+15) /* clflush reqd with monitor */
#define X86_FEATURE_SC_MSR_IDLE (3*32+16) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */
+#define X86_FEATURE_NO_XPTI (3*32+17) /* XPTI mitigation not in use */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
/*
* Of the two following fields the latter is being set to the CR3 value
* to be used on the given pCPU for loading whenever 64-bit PV guest
- * context is being entered. The value never changes once set.
+ * context is being entered. A value of zero indicates no setting of CR3
+ * is to be performed.
* The former is the value to restore when re-entering Xen, if any. IOW
* its value being zero means there's nothing to restore. However, its
* value can also be negative, indicating to the exit-to-Xen code that
atomic_t nr_l4_pages;
+ /* XPTI active? */
+ bool_t xpti;
+
/* map_domain_page() mapping cache. */
struct mapcache_domain mapcache;
};
#define flush_root_pgtbl_domain(d) \
{ \
- if ( this_cpu(root_pgt) && is_pv_domain(d) && !is_pv_32bit_domain(d) ) \
+ if ( is_pv_domain(d) && (d)->arch.pv_domain.xpti ) \
flush_mask((d)->domain_dirty_cpumask, FLUSH_ROOT_PGTBL); \
}
extern uint8_t default_xen_spec_ctrl;
extern uint8_t default_spec_ctrl_flags;
+extern uint8_t opt_xpti;
+#define OPT_XPTI_DOM0 0x01
+#define OPT_XPTI_DOMU 0x02
+
static inline void init_shadow_spec_ctrl_state(void)
{
struct cpu_info *info = get_cpu_info();