return 1;
}
+/*
+ * Get the ratio to scale host TSC frequency to gtsc_khz. zero will be
+ * returned if TSC scaling is unavailable or ratio cannot be handled
+ * by host CPU. Otherwise, a non-zero ratio will be returned.
+ */
+u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz)
+{
+ u8 ratio_frac_bits = hvm_funcs.tsc_scaling.ratio_frac_bits;
+ u64 max_ratio = hvm_funcs.tsc_scaling.max_ratio;
+ u64 ratio, dummy;
+
+ if ( !hvm_tsc_scaling_supported )
+ return 0;
+
+ /*
+ * Return early if the quotient is too large to fit in the integral
+ * part of TSC scaling ratio. This also avoids #DE from the following
+ * divq when the quotient can not fit in a 64-bit integer.
+ */
+ if ( gtsc_khz / cpu_khz > (max_ratio >> ratio_frac_bits) )
+ return 0;
+
+ /* ratio = (gtsc_khz << hvm_funcs.tsc_scaling.ratio_frac_bits) / cpu_khz */
+ asm ( "shldq %[frac],%[gkhz],%[zero] ; "
+ "shlq %[frac],%[gkhz] ; "
+ "divq %[hkhz] "
+ : "=d" (dummy), "=a" (ratio)
+ : [frac] "c" (ratio_frac_bits),
+ [gkhz] "a" ((u64) gtsc_khz),
+ [zero] "d" (0ULL),
+ [hkhz] "rm" ((u64) cpu_khz) );
+
+ return ratio > max_ratio ? 0 : ratio;
+}
+
void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc)
{
uint64_t tsc;
register_portio_handler(d, 0xe9, 1, hvm_print_line);
register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
+ if ( hvm_tsc_scaling_supported )
+ d->arch.hvm_domain.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
+
rc = hvm_funcs.domain_initialise(d);
if ( rc != 0 )
goto fail2;
{
ASSERT(cpu_has_tsc_ratio && !v->domain->arch.vtsc);
- return scale_tsc(tsc, vcpu_tsc_ratio(v));
+ return scale_tsc(tsc, hvm_tsc_scaling_ratio(v->domain));
}
static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
static inline void svm_tsc_ratio_load(struct vcpu *v)
{
if ( cpu_has_tsc_ratio && !v->domain->arch.vtsc )
- wrmsrl(MSR_AMD64_TSC_RATIO, vcpu_tsc_ratio(v));
+ wrmsrl(MSR_AMD64_TSC_RATIO, hvm_tsc_scaling_ratio(v->domain));
}
static void svm_ctxt_switch_from(struct vcpu *v)
*/
if ( tsc_mode == TSC_MODE_DEFAULT && host_tsc_is_safe() &&
(has_hvm_container_domain(d) ?
- d->arch.tsc_khz == cpu_khz || hvm_tsc_scaling_supported :
+ (d->arch.tsc_khz == cpu_khz ||
+ hvm_get_tsc_scaling_ratio(d->arch.tsc_khz)) :
incarnation == 0) )
{
case TSC_MODE_NEVER_EMULATE:
d->arch.vtsc = !boot_cpu_has(X86_FEATURE_RDTSCP) ||
!host_tsc_is_safe();
enable_tsc_scaling = has_hvm_container_domain(d) &&
- hvm_tsc_scaling_supported && !d->arch.vtsc;
+ !d->arch.vtsc &&
+ hvm_get_tsc_scaling_ratio(gtsc_khz ?: cpu_khz);
d->arch.tsc_khz = (enable_tsc_scaling && gtsc_khz) ? gtsc_khz : cpu_khz;
set_time_scale(&d->arch.vtsc_to_ns, d->arch.tsc_khz * 1000 );
d->arch.ns_to_vtsc = scale_reciprocal(d->arch.vtsc_to_ns);
d->arch.incarnation = incarnation + 1;
if ( has_hvm_container_domain(d) )
{
+ if ( hvm_tsc_scaling_supported && !d->arch.vtsc )
+ d->arch.hvm_domain.tsc_scaling_ratio =
+ hvm_get_tsc_scaling_ratio(d->arch.tsc_khz);
+
hvm_set_rdtsc_exiting(d, d->arch.vtsc);
if ( d->vcpu && d->vcpu[0] && incarnation == 0 )
{
*/
uint64_t sync_tsc;
+ uint64_t tsc_scaling_ratio;
+
unsigned long *io_bitmap;
/* List of permanently write-mapped pages. */
#define hvm_tsc_scaling_supported \
(!!hvm_funcs.tsc_scaling.ratio_frac_bits)
+#define hvm_default_tsc_scaling_ratio \
+ (1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits)
+
+#define hvm_tsc_scaling_ratio(d) \
+ ((d)->arch.hvm_domain.tsc_scaling_ratio)
+
+u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
+
int hvm_set_mode(struct vcpu *v, int mode);
void hvm_init_guest_time(struct domain *d);
void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
/* TSC rate */
#define DEFAULT_TSC_RATIO 0x0000000100000000ULL
#define TSC_RATIO_RSVD_BITS 0xffffff0000000000ULL
-#define TSC_RATIO(g_khz, h_khz) ( (((u64)(g_khz)<<32)/(u64)(h_khz)) & \
- ~TSC_RATIO_RSVD_BITS )
-#define vcpu_tsc_ratio(v) TSC_RATIO((v)->domain->arch.tsc_khz, cpu_khz)
extern void svm_host_osvw_reset(void);
extern void svm_host_osvw_init(void);