#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/event.h>
#include <asm/hvm/vmx/vmx.h>
+#include <asm/hvm/svm/svm.h> /* for cpu_has_tsc_ratio */
#include <asm/altp2m.h>
#include <asm/mtrr.h>
#include <asm/apic.h>
tsc = hvm_get_guest_time_fixed(v, at_tsc);
tsc = gtime_to_gtsc(v->domain, tsc);
}
- else if ( at_tsc )
- {
- tsc = at_tsc;
- }
else
{
- tsc = rdtsc();
+ tsc = at_tsc ?: rdtsc();
+ if ( cpu_has_tsc_ratio )
+ tsc = hvm_funcs.scale_tsc(v, tsc);
}
delta_tsc = guest_tsc - tsc;
tsc = hvm_get_guest_time_fixed(v, at_tsc);
tsc = gtime_to_gtsc(v->domain, tsc);
}
- else if ( at_tsc )
- {
- tsc = at_tsc;
- }
else
{
- tsc = rdtsc();
+ tsc = at_tsc ?: rdtsc();
+ if ( cpu_has_tsc_ratio )
+ tsc = hvm_funcs.scale_tsc(v, tsc);
}
return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
return scaled_host_tsc;
}
+static uint64_t svm_scale_tsc(const struct vcpu *v, uint64_t tsc)
+{
+ ASSERT(cpu_has_tsc_ratio && !v->domain->arch.vtsc);
+
+ return scale_tsc(tsc, vcpu_tsc_ratio(v));
+}
+
static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
uint64_t ratio)
{
.nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
.nhvm_intr_blocked = nsvm_intr_blocked,
.nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m,
+
+ .scale_tsc = svm_scale_tsc,
};
void svm_vmexit_handler(struct cpu_user_regs *regs)
void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
int (*altp2m_vcpu_emulate_vmfunc)(struct cpu_user_regs *regs);
+
+ uint64_t (*scale_tsc)(const struct vcpu *v, uint64_t tsc);
};
extern struct hvm_function_table hvm_funcs;