From: Jan Beulich Date: Thu, 26 Oct 2017 07:57:04 +0000 (-0600) Subject: x86: don't latch wrong (stale) GS base addresses X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=a711f6f24a7157ae70d1cc32e61b98f23dc0c584;p=people%2Fiwj%2Fxen.git x86: don't latch wrong (stale) GS base addresses load_segments() writes selector registers before doing any of the base address updates. Any of these selector loads can cause a page fault in case it references the LDT, and the LDT page accessed was only recently installed. Therefore the call tree map_ldt_shadow_page() -> guest_get_eff_kern_l1e() -> toggle_guest_mode() would in such a case wrongly latch the outgoing vCPU's GS.base into the incoming vCPU's recorded state. Split page table toggling from GS handling - neither guest_get_eff_kern_l1e() nor guest_io_okay() need more than the page tables being the kernel ones for the memory access they want to do. Signed-off-by: Jan Beulich Reviewed-by: Andrew Cooper Release-acked-by: Julien Grall --- diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c index 2fb19960ba..2234128bb3 100644 --- a/xen/arch/x86/pv/domain.c +++ b/xen/arch/x86/pv/domain.c @@ -233,8 +233,17 @@ void toggle_guest_mode(struct vcpu *v) else v->arch.pv_vcpu.gs_base_user = __rdgsbase(); } - v->arch.flags ^= TF_kernel_mode; asm volatile ( "swapgs" ); + + toggle_guest_pt(v); +} + +void toggle_guest_pt(struct vcpu *v) +{ + if ( is_pv_32bit_vcpu(v) ) + return; + + v->arch.flags ^= TF_kernel_mode; update_cr3(v); /* Don't flush user global mappings from the TLB. Don't tick TLB clock. */ asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" ); diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c index dd90713acf..2f9264548a 100644 --- a/xen/arch/x86/pv/emul-priv-op.c +++ b/xen/arch/x86/pv/emul-priv-op.c @@ -137,7 +137,7 @@ static bool guest_io_okay(unsigned int port, unsigned int bytes, * read as 0xff (no access allowed). */ if ( user_mode ) - toggle_guest_mode(v); + toggle_guest_pt(v); switch ( __copy_from_guest_offset(x.bytes, v->arch.pv_vcpu.iobmp, port>>3, 2) ) @@ -150,7 +150,7 @@ static bool guest_io_okay(unsigned int port, unsigned int bytes, } if ( user_mode ) - toggle_guest_mode(v); + toggle_guest_pt(v); if ( (x.mask & (((1 << bytes) - 1) << (port & 7))) == 0 ) return true; diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c index 6890e80efd..8d7a4fd85f 100644 --- a/xen/arch/x86/pv/mm.c +++ b/xen/arch/x86/pv/mm.c @@ -72,12 +72,12 @@ static l1_pgentry_t guest_get_eff_kern_l1e(unsigned long linear) l1_pgentry_t l1e; if ( user_mode ) - toggle_guest_mode(curr); + toggle_guest_pt(curr); l1e = guest_get_eff_l1e(linear); if ( user_mode ) - toggle_guest_mode(curr); + toggle_guest_pt(curr); return l1e; } diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 4d0b77dc28..f69911918e 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -76,6 +76,8 @@ void mapcache_override_current(struct vcpu *); /* x86/64: toggle guest between kernel and user modes. */ void toggle_guest_mode(struct vcpu *); +/* x86/64: toggle guest page tables between kernel and user modes. */ +void toggle_guest_pt(struct vcpu *); /* * Initialise a hypercall-transfer page. The given pointer must be mapped