]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
x86/hvm: scale host TSC when setting/getting guest TSC
authorHaozhong Zhang <haozhong.zhang@intel.com>
Tue, 19 Jan 2016 15:07:15 +0000 (16:07 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 19 Jan 2016 15:07:15 +0000 (16:07 +0100)
The existing hvm_[set|get]_guest_tsc_fixed() calculate the guest TSC by
adding the TSC offset to the host TSC. When the TSC scaling is enabled,
the host TSC should be scaled first. This patch adds the scaling logic
to those two functions.

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/include/asm-x86/hvm/hvm.h

index f646c1ee7b37e6232add89675a1300cfc05542d8..e0cb82d2a39de0402bfcad74de1e3e2963a62310 100644 (file)
@@ -60,6 +60,7 @@
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/event.h>
 #include <asm/hvm/vmx/vmx.h>
+#include <asm/hvm/svm/svm.h> /* for cpu_has_tsc_ratio */
 #include <asm/altp2m.h>
 #include <asm/mtrr.h>
 #include <asm/apic.h>
@@ -310,13 +311,11 @@ void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc)
         tsc = hvm_get_guest_time_fixed(v, at_tsc);
         tsc = gtime_to_gtsc(v->domain, tsc);
     }
-    else if ( at_tsc )
-    {
-        tsc = at_tsc;
-    }
     else
     {
-        tsc = rdtsc();
+        tsc = at_tsc ?: rdtsc();
+        if ( cpu_has_tsc_ratio )
+            tsc = hvm_funcs.scale_tsc(v, tsc);
     }
 
     delta_tsc = guest_tsc - tsc;
@@ -344,13 +343,11 @@ u64 hvm_get_guest_tsc_fixed(struct vcpu *v, uint64_t at_tsc)
         tsc = hvm_get_guest_time_fixed(v, at_tsc);
         tsc = gtime_to_gtsc(v->domain, tsc);
     }
-    else if ( at_tsc )
-    {
-        tsc = at_tsc;
-    }
     else
     {
-        tsc = rdtsc();
+        tsc = at_tsc ?: rdtsc();
+        if ( cpu_has_tsc_ratio )
+            tsc = hvm_funcs.scale_tsc(v, tsc);
     }
 
     return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
index a66d8544805dde6910d47eb6d02f79ece13eff11..a46bc98576175de0214bc7788374ec1e511bc354 100644 (file)
@@ -804,6 +804,13 @@ static uint64_t scale_tsc(uint64_t host_tsc, uint64_t ratio)
     return scaled_host_tsc;
 }
 
+static uint64_t svm_scale_tsc(const struct vcpu *v, uint64_t tsc)
+{
+    ASSERT(cpu_has_tsc_ratio && !v->domain->arch.vtsc);
+
+    return scale_tsc(tsc, vcpu_tsc_ratio(v));
+}
+
 static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
     uint64_t ratio)
 {
@@ -2272,6 +2279,8 @@ static struct hvm_function_table __initdata svm_function_table = {
     .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
     .nhvm_intr_blocked = nsvm_intr_blocked,
     .nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m,
+
+    .scale_tsc            = svm_scale_tsc,
 };
 
 void svm_vmexit_handler(struct cpu_user_regs *regs)
index b9d893d1e28951e2063d94be7e79a40e939bd3d8..a87224b138f498b47e341dc26db9bd7ae2a5c0b8 100644 (file)
@@ -212,6 +212,8 @@ struct hvm_function_table {
     void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
     bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
     int (*altp2m_vcpu_emulate_vmfunc)(struct cpu_user_regs *regs);
+
+    uint64_t (*scale_tsc)(const struct vcpu *v, uint64_t tsc);
 };
 
 extern struct hvm_function_table hvm_funcs;