First of all we don't need it on AMD systems. Additionally allow its use
to be controlled by command line option. For best backportability, this
intentionally doesn't use alternative instruction patching to achieve
the intended effect - while we likely want it, this will be later
follow-up.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit:
e871e80c38547d9faefc6604532ba3e985e65873
master date: 2018-01-16 17:50:59 +0100
clustered mode. The default, given no hint from the **FADT**, is cluster
mode.
+### xpti
+> `= <boolean>`
+
+> Default: `false` on AMD hardware
+> Default: `true` everywhere else
+
+Override default selection of whether to isolate 64-bit PV guest page
+tables.
+
+** WARNING: Not yet a complete isolation implementation, but better than
+nothing. **
+
### xsave
> `= <boolean>`
static void paravirt_ctxt_switch_to(struct vcpu *v)
{
+ root_pgentry_t *root_pgt = this_cpu(root_pgt);
unsigned long cr4;
switch_kernel_stack(v);
- this_cpu(root_pgt)[root_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(v->domain->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
+ if ( root_pgt )
+ root_pgt[root_table_offset(PERDOMAIN_VIRT_START)] =
+ l4e_from_page(v->domain->arch.perdomain_l3_pg,
+ __PAGE_HYPERVISOR_RW);
cr4 = pv_guest_cr4_to_real_cr4(v);
if ( unlikely(cr4 != read_cr4()) )
rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
if ( !rc )
- sync_guest = 1;
+ sync_guest = !!this_cpu(root_pgt);
break;
case PGT_writable_page:
perfc_incr(writable_mmu_updates);
spin_debug_disable();
get_cpu_info()->xen_cr3 = 0;
- get_cpu_info()->pv_cr3 = __pa(this_cpu(root_pgt));
+ get_cpu_info()->pv_cr3 = this_cpu(root_pgt) ? __pa(this_cpu(root_pgt)) : 0;
load_system_tables();
return 0;
}
+static __read_mostly int8_t opt_xpti = -1;
+boolean_param("xpti", opt_xpti);
DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
static int setup_cpu_root_pgt(unsigned int cpu)
{
- root_pgentry_t *rpt = alloc_xen_pagetable();
+ root_pgentry_t *rpt;
unsigned int off;
int rc;
+ if ( !opt_xpti )
+ return 0;
+
+ rpt = alloc_xen_pagetable();
if ( !rpt )
return -ENOMEM;
stack_base[0] = stack_start;
+ if ( opt_xpti < 0 )
+ opt_xpti = boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+
rc = setup_cpu_root_pgt(0);
if ( rc )
panic("Error %d setting up PV root page table\n", rc);
- get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
+ if ( per_cpu(root_pgt, 0) )
+ get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
set_nr_sockets();
cpumask_set_cpu(smp_processor_id(), &cpu_present_map);
get_cpu_info()->xen_cr3 = 0;
+ get_cpu_info()->pv_cr3 = 0;
}
static void
movabs $DIRECTMAP_VIRT_START, %rcx
mov %rdi, %rax
and %rsi, %rdi
+ jz .Lrag_keep_cr3
and %r9, %rsi
add %rcx, %rdi
add %rcx, %rsi
rep movsq
mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
write_cr3 rax, rdi, rsi
+.Lrag_keep_cr3:
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)