if ( n->arch.pv_vcpu.fs_base | (dirty_segment_mask & DIRTY_FS_BASE) )
wrfsbase(n->arch.pv_vcpu.fs_base);
- /* Most kernels have non-zero GS base, so don't bother testing. */
- /* (This is also a serialising instruction, avoiding AMD erratum #88.) */
- wrmsrl(MSR_SHADOW_GS_BASE, n->arch.pv_vcpu.gs_base_kernel);
+ /*
+ * Most kernels have non-zero GS base, so don't bother testing.
+ * (For old AMD hardware this is also a serialising instruction,
+ * avoiding erratum #88.)
+ */
+ wrgsshadow(n->arch.pv_vcpu.gs_base_kernel);
/* This can only be non-zero if selector is NULL. */
if ( n->arch.pv_vcpu.gs_base_user |
* We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can
* be updated at any time via SWAPGS, which we cannot trap.
*/
- rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs);
+ v->arch.hvm_vmx.shadow_gs = rdgsshadow();
}
static void vmx_restore_guest_msrs(struct vcpu *v)
{
- wrmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs);
+ wrgsshadow(v->arch.hvm_vmx.shadow_gs);
wrmsrl(MSR_STAR, v->arch.hvm_vmx.star);
wrmsrl(MSR_LSTAR, v->arch.hvm_vmx.lstar);
wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm_vmx.sfmask);
break;
case MSR_SHADOW_GS_BASE:
- rdmsrl(MSR_SHADOW_GS_BASE, *msr_content);
+ *msr_content = rdgsshadow();
break;
case MSR_STAR:
else if ( msr == MSR_GS_BASE )
__vmwrite(GUEST_GS_BASE, msr_content);
else
- wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
+ wrgsshadow(msr_content);
break;
case MSR_SHADOW_GS_BASE:
if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) )
break;
- wrmsrl(MSR_SHADOW_GS_BASE, val);
+ wrgsshadow(val);
curr->arch.pv_vcpu.gs_base_user = val;
return X86EMUL_OKAY;
case SEGBASE_GS_USER:
if ( is_canonical_address(base) )
{
- wrmsrl(MSR_SHADOW_GS_BASE, base);
+ wrgsshadow(base);
v->arch.pv_vcpu.gs_base_user = base;
}
else
regs->gs = read_sreg(gs);
crs[5] = rdfsbase();
crs[6] = rdgsbase();
- rdmsrl(MSR_SHADOW_GS_BASE, crs[7]);
+ crs[7] = rdgsshadow();
}
static void _show_registers(
return base;
}
+static inline unsigned long rdgsshadow(void)
+{
+ unsigned long base;
+
+ if ( cpu_has_fsgsbase )
+ {
+ asm volatile ( "swapgs" );
+ base = __rdgsbase();
+ asm volatile ( "swapgs" );
+ }
+ else
+ rdmsrl(MSR_SHADOW_GS_BASE, base);
+
+ return base;
+}
+
static inline void wrfsbase(unsigned long base)
{
if ( cpu_has_fsgsbase )
wrmsrl(MSR_GS_BASE, base);
}
+static inline void wrgsshadow(unsigned long base)
+{
+ if ( cpu_has_fsgsbase )
+ {
+ asm volatile ( "swapgs\n\t"
+#ifdef HAVE_AS_FSGSBASE
+ "wrgsbase %0\n\t"
+ "swapgs"
+ :: "r" (base) );
+#else
+ ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd8\n\t"
+ "swapgs"
+ :: "a" (base) );
+#endif
+ }
+ else
+ wrmsrl(MSR_SHADOW_GS_BASE, base);
+}
+
DECLARE_PER_CPU(uint64_t, efer);
static inline uint64_t read_efer(void)
{