]> xenbits.xensource.com Git - xen.git/commitdiff
x86/pv: Avoid leaking other guests' MSR_TSC_AUX values into PV context
authorAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 6 Mar 2018 14:54:10 +0000 (15:54 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 6 Mar 2018 14:54:10 +0000 (15:54 +0100)
If the CPU pipeline supports RDTSCP or RDPID, a guest can observe the value in
MSR_TSC_AUX, irrespective of whether the relevant CPUID features are
advertised/hidden.

At the moment, paravirt_ctxt_switch_to() only writes to MSR_TSC_AUX if
TSC_MODE_PVRDTSCP mode is enabled, but this is not the default mode.
Therefore, default PV guests can read the value from a previously scheduled
HVM vcpu, or TSC_MODE_PVRDTSCP-enabled PV guest.

Alter the PV path to always write to MSR_TSC_AUX, using 0 in the common case.

To amortise overhead cost, introduce wrmsr_tsc_aux() which performs a lazy
update of the MSR, and use this function consistently across the codebase.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
master commit: cc0e45db277922b5723a7b1d9657d6f744230cf1
master date: 2018-02-27 10:47:23 +0000

xen/arch/x86/domain.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/msr.c
xen/include/asm-x86/msr.h

index f871bbd7f4f59593e0a14d1cb264e76257266fc3..5a4d5c3bfcdf06fa9ad8bc0c5f7658942adcbd06 100644 (file)
@@ -1531,9 +1531,9 @@ void paravirt_ctxt_switch_to(struct vcpu *v)
     if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
         activate_debugregs(v);
 
-    if ( (v->domain->arch.tsc_mode ==  TSC_MODE_PVRDTSCP) &&
-         boot_cpu_has(X86_FEATURE_RDTSCP) )
-        write_rdtscp_aux(v->domain->arch.incarnation);
+    if ( cpu_has_rdtscp )
+        wrmsr_tsc_aux(v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP
+                      ? v->domain->arch.incarnation : 0);
 }
 
 /* Update per-VCPU guest runstate shared memory area (if registered). */
index 2e212f6f80d4d3bbb6b244dd6b357796cd7fc08b..18adec5ad89fb7edc77bef8109f03f1e0e3fcf0f 100644 (file)
@@ -3582,7 +3582,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
         v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content;
         if ( cpu_has_rdtscp
              && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
-            wrmsrl(MSR_TSC_AUX, (uint32_t)msr_content);
+            wrmsr_tsc_aux(msr_content);
         break;
 
     case MSR_IA32_APICBASE:
index e978268fb55ccd8ed5495ff7af890564e8d39980..b50eb43c5fa46b84f815014f2a51e9104cd0964d 100644 (file)
@@ -1077,7 +1077,7 @@ static void svm_ctxt_switch_to(struct vcpu *v)
     svm_tsc_ratio_load(v);
 
     if ( cpu_has_rdtscp )
-        wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
+        wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
 }
 
 static void noreturn svm_do_resume(struct vcpu *v)
index 9416ad5df29150f493f88790b345bc52b2217b9e..577a876ea578775bdc62e228f04fd915f5bd7f60 100644 (file)
@@ -622,7 +622,7 @@ static void vmx_restore_guest_msrs(struct vcpu *v)
     }
 
     if ( cpu_has_rdtscp )
-        wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
+        wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
 }
 
 void vmx_update_cpu_exec_control(struct vcpu *v)
index 7875d9c1e0cd71a301e0a65363b728977b563d44..7ba9a101aac143b07de738650a0d50b9542d2ef7 100644 (file)
@@ -24,6 +24,8 @@
 #include <xen/sched.h>
 #include <asm/msr.h>
 
+DEFINE_PER_CPU(uint32_t, tsc_aux);
+
 struct msr_domain_policy __read_mostly hvm_max_msr_domain_policy,
                          __read_mostly  pv_max_msr_domain_policy;
 
index 20ba47e90582b3af725c32855ae148cb3a3d76b3..2c9277b6d5c8b2bf81e44aaa63d326380b14cc9b 100644 (file)
@@ -115,8 +115,6 @@ static inline uint64_t rdtsc_ordered(void)
     __write_tsc(val);                                           \
 })
 
-#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
-
 #define rdpmc(counter,low,high) \
      __asm__ __volatile__("rdpmc" \
                          : "=a" (low), "=d" (high) \
@@ -202,6 +200,20 @@ void write_efer(u64 val);
 
 DECLARE_PER_CPU(u32, ler_msr);
 
+DECLARE_PER_CPU(uint32_t, tsc_aux);
+
+/* Lazy update of MSR_TSC_AUX */
+static inline void wrmsr_tsc_aux(uint32_t val)
+{
+    uint32_t *this_tsc_aux = &this_cpu(tsc_aux);
+
+    if ( *this_tsc_aux != val )
+    {
+        wrmsr(MSR_TSC_AUX, val, 0);
+        *this_tsc_aux = val;
+    }
+}
+
 /* MSR policy object for shared per-domain MSRs */
 struct msr_domain_policy
 {