if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
activate_debugregs(v);
- if ( (v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP) &&
- boot_cpu_has(X86_FEATURE_RDTSCP) )
- write_rdtscp_aux(v->domain->arch.incarnation);
+ if ( cpu_has_rdtscp )
+ wrmsr_tsc_aux(v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP
+ ? v->domain->arch.incarnation : 0);
}
/* Update per-VCPU guest runstate shared memory area (if registered). */
v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content;
if ( cpu_has_rdtscp
&& (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
- wrmsrl(MSR_TSC_AUX, (uint32_t)msr_content);
+ wrmsr_tsc_aux(msr_content);
break;
case MSR_IA32_APICBASE:
svm_tsc_ratio_load(v);
if ( cpu_has_rdtscp )
- wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
+ wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
}
static void noreturn svm_do_resume(struct vcpu *v)
}
if ( cpu_has_rdtscp )
- wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
+ wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
}
void vmx_update_cpu_exec_control(struct vcpu *v)
#include <xen/sched.h>
#include <asm/msr.h>
+DEFINE_PER_CPU(uint32_t, tsc_aux);
+
struct msr_domain_policy __read_mostly raw_msr_domain_policy,
__read_mostly host_msr_domain_policy,
__read_mostly hvm_max_msr_domain_policy,
__write_tsc(val); \
})
-#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
-
#define rdpmc(counter,low,high) \
__asm__ __volatile__("rdpmc" \
: "=a" (low), "=d" (high) \
DECLARE_PER_CPU(u32, ler_msr);
+DECLARE_PER_CPU(uint32_t, tsc_aux);
+
+/* Lazy update of MSR_TSC_AUX */
+static inline void wrmsr_tsc_aux(uint32_t val)
+{
+ uint32_t *this_tsc_aux = &this_cpu(tsc_aux);
+
+ if ( *this_tsc_aux != val )
+ {
+ wrmsr(MSR_TSC_AUX, val, 0);
+ *this_tsc_aux = val;
+ }
+}
+
/* MSR policy object for shared per-domain MSRs */
struct msr_domain_policy
{