return ratio > max_ratio ? 0 : ratio;
}
+u64 hvm_scale_tsc(const struct domain *d, u64 tsc)
+{
+ u64 ratio = d->arch.hvm_domain.tsc_scaling_ratio;
+ u64 dummy;
+
+ if ( ratio == hvm_default_tsc_scaling_ratio )
+ return tsc;
+
+ /* tsc = (tsc * ratio) >> hvm_funcs.tsc_scaling.ratio_frac_bits */
+ asm ( "mulq %[ratio]; shrdq %[frac],%%rdx,%[tsc]"
+ : [tsc] "+a" (tsc), "=&d" (dummy)
+ : [frac] "c" (hvm_funcs.tsc_scaling.ratio_frac_bits),
+ [ratio] "rm" (ratio) );
+
+ return tsc;
+}
+
void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc)
{
uint64_t tsc;
{
tsc = at_tsc ?: rdtsc();
if ( hvm_tsc_scaling_supported )
- tsc = hvm_funcs.tsc_scaling.scale_tsc(v, tsc);
+ tsc = hvm_scale_tsc(v->domain, tsc);
}
delta_tsc = guest_tsc - tsc;
{
tsc = at_tsc ?: rdtsc();
if ( hvm_tsc_scaling_supported )
- tsc = hvm_funcs.tsc_scaling.scale_tsc(v, tsc);
+ tsc = hvm_scale_tsc(v->domain, tsc);
}
return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
return scaled_host_tsc;
}
-static uint64_t svm_scale_tsc(const struct vcpu *v, uint64_t tsc)
-{
- ASSERT(cpu_has_tsc_ratio && !v->domain->arch.vtsc);
-
- return scale_tsc(tsc, hvm_tsc_scaling_ratio(v->domain));
-}
-
static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
uint64_t ratio)
{
.tsc_scaling = {
.max_ratio = ~TSC_RATIO_RSVD_BITS,
- .scale_tsc = svm_scale_tsc,
},
};
{
if ( has_hvm_container_domain(d) && hvm_tsc_scaling_supported )
{
- tsc_stamp =
- hvm_funcs.tsc_scaling.scale_tsc(v, t->local_tsc_stamp);
+ tsc_stamp = hvm_scale_tsc(d, t->local_tsc_stamp);
_u.tsc_to_system_mul = d->arch.vtsc_to_ns.mul_frac;
_u.tsc_shift = d->arch.vtsc_to_ns.shift;
}
uint8_t ratio_frac_bits;
/* maximum-allowed TSC scaling ratio */
uint64_t max_ratio;
-
- uint64_t (*scale_tsc)(const struct vcpu *v, uint64_t tsc);
} tsc_scaling;
};
#define hvm_tsc_scaling_ratio(d) \
((d)->arch.hvm_domain.tsc_scaling_ratio)
+u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
int hvm_set_mode(struct vcpu *v, int mode);