load_segments() writes selector registers before doing any of the base
address updates. Any of these selector loads can cause a page fault in
case it references the LDT, and the LDT page accessed was only recently
installed. Therefore the call tree map_ldt_shadow_page() ->
guest_get_eff_kern_l1e() -> toggle_guest_mode() would in such a case
wrongly latch the outgoing vCPU's GS.base into the incoming vCPU's
recorded state.
Split page table toggling from GS handling - neither
guest_get_eff_kern_l1e() nor guest_io_okay() need more than the page
tables being the kernel ones for the memory access they want to do.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Release-acked-by: Julien Grall <julien.grall@linaro.org>
else
v->arch.pv_vcpu.gs_base_user = __rdgsbase();
}
- v->arch.flags ^= TF_kernel_mode;
asm volatile ( "swapgs" );
+
+ toggle_guest_pt(v);
+}
+
+void toggle_guest_pt(struct vcpu *v)
+{
+ if ( is_pv_32bit_vcpu(v) )
+ return;
+
+ v->arch.flags ^= TF_kernel_mode;
update_cr3(v);
/* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
* read as 0xff (no access allowed).
*/
if ( user_mode )
- toggle_guest_mode(v);
+ toggle_guest_pt(v);
switch ( __copy_from_guest_offset(x.bytes, v->arch.pv_vcpu.iobmp,
port>>3, 2) )
}
if ( user_mode )
- toggle_guest_mode(v);
+ toggle_guest_pt(v);
if ( (x.mask & (((1 << bytes) - 1) << (port & 7))) == 0 )
return true;
l1_pgentry_t l1e;
if ( user_mode )
- toggle_guest_mode(curr);
+ toggle_guest_pt(curr);
l1e = guest_get_eff_l1e(linear);
if ( user_mode )
- toggle_guest_mode(curr);
+ toggle_guest_pt(curr);
return l1e;
}
/* x86/64: toggle guest between kernel and user modes. */
void toggle_guest_mode(struct vcpu *);
+/* x86/64: toggle guest page tables between kernel and user modes. */
+void toggle_guest_pt(struct vcpu *);
/*
* Initialise a hypercall-transfer page. The given pointer must be mapped