]> xenbits.xensource.com Git - people/sstabellini/xen-unstable.git/.git/commitdiff
x86/msr: Handle MSR_TSC_AUX consistently for PV and HVM guests
authorAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 15 Nov 2018 21:04:37 +0000 (21:04 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 18 Dec 2018 17:13:51 +0000 (17:13 +0000)
With PVRDTSCP mode removed, handling of MSR_TSC_AUX can move into the common
code.  Move its storage into struct vcpu_msrs (dropping the HVM-specific
msr_tsc_aux), and add an RDPID feature check as this bit also enumerates the
presence of the MSR.

Introduce cpu_has_rdpid along with the synthesized cpu_has_msr_tsc_aux to
correct the context switch paths, as MSR_TSC_AUX is enumerated by either
RDTSCP or RDPID.

Drop hvm_msr_tsc_aux() entirely, and use v->arch.msrs->tsc_aux directly.
Update hvm_load_cpu_ctxt() to check that the incoming ctxt.msr_tsc_aux isn't
out of range.  In practice, no previous version of Xen ever wrote an
out-of-range value.  Add MSR_TSC_AUX to the list of MSRs migrated for PV
guests, but leave the HVM path using the existing space in hvm_hw_cpu.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Brian Woods <brian.woods@amd.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/domain.c
xen/arch/x86/domctl.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/msr.c
xen/arch/x86/pv/emul-priv-op.c
xen/include/asm-x86/cpufeature.h
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/vcpu.h
xen/include/asm-x86/msr.h

index ae9f24e457607d309886439067a6ddc11233eff6..f5a1f325aa56ec631a406b803bf922d922d02769 100644 (file)
@@ -1595,9 +1595,8 @@ void paravirt_ctxt_switch_to(struct vcpu *v)
     if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
         activate_debugregs(v);
 
-    if ( cpu_has_rdtscp )
-        wrmsr_tsc_aux(v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP
-                      ? v->domain->arch.incarnation : 0);
+    if ( cpu_has_msr_tsc_aux )
+        wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
 }
 
 /* Update per-VCPU guest runstate shared memory area (if registered). */
index ed46df8c5d615fafcc107912e10656aadf67d4b2..9bf2d0820f53093208d534742ec6c1662ebd8876 100644 (file)
@@ -1274,6 +1274,7 @@ long arch_do_domctl(
         static const uint32_t msrs_to_send[] = {
             MSR_SPEC_CTRL,
             MSR_INTEL_MISC_FEATURES_ENABLES,
+            MSR_TSC_AUX,
             MSR_AMD64_DR0_ADDRESS_MASK,
             MSR_AMD64_DR1_ADDRESS_MASK,
             MSR_AMD64_DR2_ADDRESS_MASK,
@@ -1373,6 +1374,7 @@ long arch_do_domctl(
                 {
                 case MSR_SPEC_CTRL:
                 case MSR_INTEL_MISC_FEATURES_ENABLES:
+                case MSR_TSC_AUX:
                 case MSR_AMD64_DR0_ADDRESS_MASK:
                 case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
                     if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
index 97fcaadb0be8a51584e5da1895e3f910b0664988..d14ddcb52702b017e94e23e856188915cbdb7a2e 100644 (file)
@@ -774,7 +774,7 @@ static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
     struct segment_register seg;
     struct hvm_hw_cpu ctxt = {
         .tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm.sync_tsc),
-        .msr_tsc_aux = hvm_msr_tsc_aux(v),
+        .msr_tsc_aux = v->arch.msrs->tsc_aux,
         .rax = v->arch.user_regs.rax,
         .rbx = v->arch.user_regs.rbx,
         .rcx = v->arch.user_regs.rcx,
@@ -1014,6 +1014,13 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
         return -EINVAL;
     }
 
+    if ( ctxt.msr_tsc_aux != (uint32_t)ctxt.msr_tsc_aux )
+    {
+        printk(XENLOG_G_ERR "%pv: HVM restore: bad MSR_TSC_AUX %#"PRIx64"\n",
+               v, ctxt.msr_tsc_aux);
+        return -EINVAL;
+    }
+
     /* Older Xen versions used to save the segment arbytes directly 
      * from the VMCS on Intel hosts.  Detect this and rearrange them
      * into the struct segment_register format. */
@@ -1040,7 +1047,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
     if ( hvm_funcs.tsc_scaling.setup )
         hvm_funcs.tsc_scaling.setup(v);
 
-    v->arch.hvm.msr_tsc_aux = ctxt.msr_tsc_aux;
+    v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
 
     hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
 
@@ -3406,10 +3413,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
         *msr_content = v->arch.hvm.msr_tsc_adjust;
         break;
 
-    case MSR_TSC_AUX:
-        *msr_content = hvm_msr_tsc_aux(v);
-        break;
-
     case MSR_APIC_BASE:
         *msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
         break;
@@ -3557,13 +3560,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
         hvm_set_guest_tsc_adjust(v, msr_content);
         break;
 
-    case MSR_TSC_AUX:
-        v->arch.hvm.msr_tsc_aux = (uint32_t)msr_content;
-        if ( cpu_has_rdtscp
-             && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
-            wrmsr_tsc_aux(msr_content);
-        break;
-
     case MSR_APIC_BASE:
         return guest_wrmsr_apic_base(v, msr_content);
 
index 9464394b0d44c6f61cc5a3322a5c41e1f756ba50..954822c96049c6e695bf4eb1f07df53162955830 100644 (file)
@@ -1069,8 +1069,8 @@ static void svm_ctxt_switch_to(struct vcpu *v)
     svm_lwp_load(v);
     svm_tsc_ratio_load(v);
 
-    if ( cpu_has_rdtscp )
-        wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+    if ( cpu_has_msr_tsc_aux )
+        wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
 }
 
 static void noreturn svm_do_resume(struct vcpu *v)
@@ -2286,8 +2286,7 @@ static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs, bool rdtscp)
     enum instruction_index insn = rdtscp ? INSTR_RDTSCP : INSTR_RDTSC;
     unsigned int inst_len;
 
-    if ( rdtscp && !currd->arch.cpuid->extd.rdtscp &&
-         currd->arch.tsc_mode != TSC_MODE_PVRDTSCP )
+    if ( rdtscp && !currd->arch.cpuid->extd.rdtscp )
     {
         hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
         return;
@@ -2299,7 +2298,7 @@ static void svm_vmexit_do_rdtsc(struct cpu_user_regs *regs, bool rdtscp)
     __update_guest_eip(regs, inst_len);
 
     if ( rdtscp )
-        regs->rcx = hvm_msr_tsc_aux(curr);
+        regs->rcx = curr->arch.msrs->tsc_aux;
 
     hvm_rdtsc_intercept(regs);
 }
index 2166b0dfe4369ecbb611739840dc9df2fff348cb..64af8bf943a8e1637d3e8a8ac09522ee35c2a0c4 100644 (file)
@@ -511,8 +511,8 @@ static void vmx_restore_guest_msrs(struct vcpu *v)
     wrmsrl(MSR_LSTAR,          v->arch.hvm.vmx.lstar);
     wrmsrl(MSR_SYSCALL_MASK,   v->arch.hvm.vmx.sfmask);
 
-    if ( cpu_has_rdtscp )
-        wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+    if ( cpu_has_msr_tsc_aux )
+        wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
 }
 
 void vmx_update_cpu_exec_control(struct vcpu *v)
@@ -3957,14 +3957,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         vmx_invlpg_intercept(exit_qualification);
         break;
     case EXIT_REASON_RDTSCP:
-        if ( !currd->arch.cpuid->extd.rdtscp &&
-             currd->arch.tsc_mode != TSC_MODE_PVRDTSCP )
+        if ( !currd->arch.cpuid->extd.rdtscp )
         {
             hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
             break;
         }
 
-        regs->rcx = hvm_msr_tsc_aux(v);
+        regs->rcx = v->arch.msrs->tsc_aux;
         /* fall through */
     case EXIT_REASON_RDTSC:
         update_guest_eip(); /* Safe: RDTSC, RDTSCP */
index 85a58c0b58ff93eaf303749b85b1aea9476fe5e2..9bb38b6d663664fbca24fb2709d65db7e2d399dd 100644 (file)
@@ -170,6 +170,13 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
         ret = guest_rdmsr_xen(v, msr, val);
         break;
 
+    case MSR_TSC_AUX:
+        if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+            goto gp_fault;
+
+        *val = msrs->tsc_aux;
+        break;
+
     case MSR_AMD64_DR0_ADDRESS_MASK:
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !cp->extd.dbext )
@@ -324,6 +331,17 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
         ret = guest_wrmsr_xen(v, msr, val);
         break;
 
+    case MSR_TSC_AUX:
+        if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+            goto gp_fault;
+        if ( val != (uint32_t)val )
+            goto gp_fault;
+
+        msrs->tsc_aux = val;
+        if ( v == curr )
+            wrmsr_tsc_aux(val);
+        break;
+
     case MSR_AMD64_DR0_ADDRESS_MASK:
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !cp->extd.dbext || val != (uint32_t)val )
index 5133c3561ecfd9fe9c381ac25e88a666a34cff62..942ece2ca0c37a50e812db8158e420655a45be03 100644 (file)
@@ -847,10 +847,6 @@ static int read_msr(unsigned int reg, uint64_t *val,
         *val = currd->arch.vtsc ? pv_soft_rdtsc(curr, ctxt->regs) : rdtsc();
         return X86EMUL_OKAY;
 
-    case MSR_TSC_AUX:
-        *val = 0;
-        return X86EMUL_OKAY;
-
     case MSR_EFER:
         /* Hide unknown bits, and unconditionally hide SVME from guests. */
         *val = read_efer() & EFER_KNOWN_MASK & ~EFER_SVME;
index c2b0f6ae4e91de366e33ed0e59b203763d7af527..5592e1749d4bfcb583ac7618f050056dfae751e9 100644 (file)
 #define cpu_has_avx512bw        boot_cpu_has(X86_FEATURE_AVX512BW)
 #define cpu_has_avx512vl        boot_cpu_has(X86_FEATURE_AVX512VL)
 
+/* CPUID level 0x00000007:0.ecx */
+#define cpu_has_rdpid           boot_cpu_has(X86_FEATURE_RDPID)
+
 /* CPUID level 0x80000007.edx */
 #define cpu_has_itsc            boot_cpu_has(X86_FEATURE_ITSC)
 
 #define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH)
 #define cpu_has_xen_lbr         boot_cpu_has(X86_FEATURE_XEN_LBR)
 
+#define cpu_has_msr_tsc_aux     (cpu_has_rdtscp || cpu_has_rdpid)
+
 enum _cache_type {
     CACHE_TYPE_NULL = 0,
     CACHE_TYPE_DATA = 1,
index 95581ce6cb9f83640b994ff3627b40db4735ccf0..0a10b5155433273552801649b2797c164e9b9906 100644 (file)
@@ -560,12 +560,6 @@ static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)
 #endif
 }
 
-#define hvm_msr_tsc_aux(v) ({                                               \
-    struct domain *__d = (v)->domain;                                       \
-    (__d->arch.tsc_mode == TSC_MODE_PVRDTSCP)                               \
-        ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm.msr_tsc_aux;      \
-})
-
 /*
  * Nested HVM
  */
index 39778f9e4e94195608e432516c39795b38ba3e43..c8a40f6d55791047f3169938de7865b6b959ea8a 100644 (file)
@@ -170,7 +170,6 @@ struct hvm_vcpu {
 
     struct hvm_vcpu_asid n1asid;
 
-    u32                 msr_tsc_aux;
     u64                 msr_tsc_adjust;
     u64                 msr_xss;
 
index 05d905bafdccaa59e8dfb543b097c66058cfbea9..adfa2fa05b2c178337f70b6cedcdefd15ff02a35 100644 (file)
@@ -288,6 +288,15 @@ struct vcpu_msrs
         };
     } misc_features_enables;
 
+    /*
+     * 0xc0000103 - MSR_TSC_AUX
+     *
+     * Value is guest chosen, and always loaded in vcpu context.  Guests have
+     * no direct MSR access, and the value is accessible to userspace with the
+     * RDTSCP and RDPID instructions.
+     */
+    uint32_t tsc_aux;
+
     /*
      * 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
      *