static void paravirt_ctxt_switch_to(struct vcpu *v)
{
root_pgentry_t *root_pgt = this_cpu(root_pgt);
- unsigned long cr4;
switch_kernel_stack(v);
l4e_from_page(v->domain->arch.perdomain_l3_pg,
__PAGE_HYPERVISOR_RW);
- cr4 = pv_guest_cr4_to_real_cr4(v);
- if ( unlikely(cr4 != read_cr4()) )
- write_cr4(cr4);
-
if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
activate_debugregs(v);
update_cr3(v);
/* We run on dom0's page tables for the final part of the build process. */
- write_ptbase(v);
+ switch_cr3_cr4(v->arch.cr3, read_cr4());
mapcache_override_current(v);
/* Copy the OS image and free temporary buffer. */
(parms.virt_hypercall >= v_end) )
{
mapcache_override_current(NULL);
- write_ptbase(current);
+ switch_cr3_cr4(current->arch.cr3, read_cr4());
printk("Invalid HYPERCALL_PAGE field in ELF notes.\n");
rc = -1;
goto out;
/* Return to idle domain's page tables. */
mapcache_override_current(NULL);
- write_ptbase(current);
+ switch_cr3_cr4(current->arch.cr3, read_cr4());
update_domain_wallclock_time(d);
post_flush(t);
}
-void switch_cr3(unsigned long cr3)
+void switch_cr3_cr4(unsigned long cr3, unsigned long cr4)
{
- unsigned long flags, cr4;
+ unsigned long flags, old_cr4;
u32 t;
/* This non-reentrant function is sometimes called in interrupt context. */
local_irq_save(flags);
t = pre_flush();
- cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE);
+ old_cr4 = read_cr4();
+ if ( old_cr4 & X86_CR4_PGE )
+ {
+ old_cr4 = cr4 & ~X86_CR4_PGE;
+ write_cr4(old_cr4);
+ }
+
write_cr3(cr3);
- write_cr4(cr4);
+
+ if ( old_cr4 != cr4 )
+ write_cr4(cr4);
post_flush(t);
void write_ptbase(struct vcpu *v)
{
struct cpu_info *cpu_info = get_cpu_info();
+ unsigned long new_cr4;
+
+ new_cr4 = (is_pv_vcpu(v) && !is_idle_vcpu(v))
+ ? pv_guest_cr4_to_real_cr4(v)
+ : ((read_cr4() & ~X86_CR4_TSD) | X86_CR4_PGE);
if ( is_pv_vcpu(v) && v->domain->arch.pv_domain.xpti )
{
cpu_info->root_pgt_changed = 1;
cpu_info->pv_cr3 = __pa(this_cpu(root_pgt));
- switch_cr3(v->arch.cr3);
+ switch_cr3_cr4(v->arch.cr3, new_cr4);
}
else
{
- /* Make sure to clear xen_cr3 before pv_cr3; switch_cr3() serializes. */
+ /* Make sure to clear xen_cr3 before pv_cr3. */
cpu_info->xen_cr3 = 0;
- switch_cr3(v->arch.cr3);
+ /* switch_cr3_cr4() serializes. */
+ switch_cr3_cr4(v->arch.cr3, new_cr4);
cpu_info->pv_cr3 = 0;
}
+
+ ASSERT(is_pv_vcpu(v) || read_cr4() == mmu_cr4_features);
}
/*
ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rdi
rep movsq
.Lrag_copy_done:
- mov STACK_CPUINFO_FIELD(cr4)(%rdx), %rdi
mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
- mov %rdi, %rsi
- and $~X86_CR4_PGE, %rdi
- mov %rdi, %cr4
mov %rax, %cr3
- mov %rsi, %cr4
.Lrag_keep_cr3:
/* Restore stashed SPEC_CTRL value. */
* so "g" will have to do.
*/
UNLIKELY_START(g, exit_cr3)
- mov %cr4, %rdi
- mov %rdi, %rsi
- and $~X86_CR4_PGE, %rdi
- mov %rdi, %cr4
mov %rax, %cr3
- mov %rsi, %cr4
UNLIKELY_END(exit_cr3)
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
}
- switch_cr3(virt_to_maddr(efi_l4_pgtable));
+ switch_cr3_cr4(virt_to_maddr(efi_l4_pgtable), read_cr4());
return state;
}
{
if ( !state->cr3 )
return;
- switch_cr3(state->cr3);
+ switch_cr3_cr4(state->cr3, read_cr4());
if ( is_pv_vcpu(current) && !is_idle_vcpu(current) )
{
struct desc_ptr gdt_desc = {
#define pv_guest_cr4_to_real_cr4(v) \
(((v)->arch.pv_vcpu.ctrlreg[4] \
| (mmu_cr4_features \
- & (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_SMEP | \
+ & (X86_CR4_PSE | X86_CR4_SMEP | \
X86_CR4_SMAP | X86_CR4_OSXSAVE | \
X86_CR4_FSGSBASE)) \
+ | ((v)->domain->arch.pv_domain.xpti ? 0 : X86_CR4_PGE) \
| ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)) \
& ~X86_CR4_DE)
#define real_cr4_to_pv_guest_cr4(c) \
}
/* Write pagetable base and implicitly tick the tlbflush clock. */
-void switch_cr3(unsigned long cr3);
+void switch_cr3_cr4(unsigned long cr3, unsigned long cr4);
/* flush_* flag fields: */
/*