if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
activate_debugregs(v);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP
- ? v->domain->arch.incarnation : 0);
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
/* Update per-VCPU guest runstate shared memory area (if registered). */
static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
+ MSR_TSC_AUX,
MSR_AMD64_DR0_ADDRESS_MASK,
MSR_AMD64_DR1_ADDRESS_MASK,
MSR_AMD64_DR2_ADDRESS_MASK,
{
case MSR_SPEC_CTRL:
case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_TSC_AUX:
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
struct segment_register seg;
struct hvm_hw_cpu ctxt = {
.tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm.sync_tsc),
- .msr_tsc_aux = hvm_msr_tsc_aux(v),
+ .msr_tsc_aux = v->arch.msrs->tsc_aux,
.rax = v->arch.user_regs.rax,
.rbx = v->arch.user_regs.rbx,
.rcx = v->arch.user_regs.rcx,
return -EINVAL;
}
+ if ( ctxt.msr_tsc_aux != (uint32_t)ctxt.msr_tsc_aux )
+ {
+ printk(XENLOG_G_ERR "%pv: HVM restore: bad MSR_TSC_AUX %#"PRIx64"\n",
+ v, ctxt.msr_tsc_aux);
+ return -EINVAL;
+ }
+
/* Older Xen versions used to save the segment arbytes directly
* from the VMCS on Intel hosts. Detect this and rearrange them
* into the struct segment_register format. */
if ( hvm_funcs.tsc_scaling.setup )
hvm_funcs.tsc_scaling.setup(v);
- v->arch.hvm.msr_tsc_aux = ctxt.msr_tsc_aux;
+ v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
*msr_content = v->arch.hvm.msr_tsc_adjust;
break;
- case MSR_TSC_AUX:
- *msr_content = hvm_msr_tsc_aux(v);
- break;
-
case MSR_APIC_BASE:
*msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
break;
hvm_set_guest_tsc_adjust(v, msr_content);
break;
- case MSR_TSC_AUX:
- v->arch.hvm.msr_tsc_aux = (uint32_t)msr_content;
- if ( cpu_has_rdtscp
- && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
- wrmsr_tsc_aux(msr_content);
- break;
-
case MSR_APIC_BASE:
return guest_wrmsr_apic_base(v, msr_content);
svm_lwp_load(v);
svm_tsc_ratio_load(v);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
static void noreturn svm_do_resume(struct vcpu *v)
enum instruction_index insn = rdtscp ? INSTR_RDTSCP : INSTR_RDTSC;
unsigned int inst_len;
- if ( rdtscp && !currd->arch.cpuid->extd.rdtscp &&
- currd->arch.tsc_mode != TSC_MODE_PVRDTSCP )
+ if ( rdtscp && !currd->arch.cpuid->extd.rdtscp )
{
hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
return;
__update_guest_eip(regs, inst_len);
if ( rdtscp )
- regs->rcx = hvm_msr_tsc_aux(curr);
+ regs->rcx = curr->arch.msrs->tsc_aux;
hvm_rdtsc_intercept(regs);
}
wrmsrl(MSR_LSTAR, v->arch.hvm.vmx.lstar);
wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm.vmx.sfmask);
- if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ if ( cpu_has_msr_tsc_aux )
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
void vmx_update_cpu_exec_control(struct vcpu *v)
vmx_invlpg_intercept(exit_qualification);
break;
case EXIT_REASON_RDTSCP:
- if ( !currd->arch.cpuid->extd.rdtscp &&
- currd->arch.tsc_mode != TSC_MODE_PVRDTSCP )
+ if ( !currd->arch.cpuid->extd.rdtscp )
{
hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
break;
}
- regs->rcx = hvm_msr_tsc_aux(v);
+ regs->rcx = v->arch.msrs->tsc_aux;
/* fall through */
case EXIT_REASON_RDTSC:
update_guest_eip(); /* Safe: RDTSC, RDTSCP */
ret = guest_rdmsr_xen(v, msr, val);
break;
+ case MSR_TSC_AUX:
+ if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+ goto gp_fault;
+
+ *val = msrs->tsc_aux;
+ break;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !cp->extd.dbext )
ret = guest_wrmsr_xen(v, msr, val);
break;
+ case MSR_TSC_AUX:
+ if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+ goto gp_fault;
+ if ( val != (uint32_t)val )
+ goto gp_fault;
+
+ msrs->tsc_aux = val;
+ if ( v == curr )
+ wrmsr_tsc_aux(val);
+ break;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
if ( !cp->extd.dbext || val != (uint32_t)val )
*val = currd->arch.vtsc ? pv_soft_rdtsc(curr, ctxt->regs) : rdtsc();
return X86EMUL_OKAY;
- case MSR_TSC_AUX:
- *val = 0;
- return X86EMUL_OKAY;
-
case MSR_EFER:
/* Hide unknown bits, and unconditionally hide SVME from guests. */
*val = read_efer() & EFER_KNOWN_MASK & ~EFER_SVME;
#define cpu_has_avx512bw boot_cpu_has(X86_FEATURE_AVX512BW)
#define cpu_has_avx512vl boot_cpu_has(X86_FEATURE_AVX512VL)
+/* CPUID level 0x00000007:0.ecx */
+#define cpu_has_rdpid boot_cpu_has(X86_FEATURE_RDPID)
+
/* CPUID level 0x80000007.edx */
#define cpu_has_itsc boot_cpu_has(X86_FEATURE_ITSC)
#define cpu_has_lfence_dispatch boot_cpu_has(X86_FEATURE_LFENCE_DISPATCH)
#define cpu_has_xen_lbr boot_cpu_has(X86_FEATURE_XEN_LBR)
+#define cpu_has_msr_tsc_aux (cpu_has_rdtscp || cpu_has_rdpid)
+
enum _cache_type {
CACHE_TYPE_NULL = 0,
CACHE_TYPE_DATA = 1,
#endif
}
-#define hvm_msr_tsc_aux(v) ({ \
- struct domain *__d = (v)->domain; \
- (__d->arch.tsc_mode == TSC_MODE_PVRDTSCP) \
- ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm.msr_tsc_aux; \
-})
-
/*
* Nested HVM
*/
struct hvm_vcpu_asid n1asid;
- u32 msr_tsc_aux;
u64 msr_tsc_adjust;
u64 msr_xss;
};
} misc_features_enables;
+ /*
+ * 0xc0000103 - MSR_TSC_AUX
+ *
+ * Value is guest chosen, and always loaded in vcpu context. Guests have
+ * no direct MSR access, and the value is accessible to userspace with the
+ * RDTSCP and RDPID instructions.
+ */
+ uint32_t tsc_aux;
+
/*
* 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
*