ia64/xen-unstable
changeset 18933:ea0ad7b3ae41
x86: Simpler time handling when TSC is constant across all power saving states.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Gang Wei <gang.wei@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Gang Wei <gang.wei@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Tue Dec 16 11:59:22 2008 +0000 (2008-12-16) |
parents | c3df4b8ea2fc |
children | 86db039882ea |
files | xen/arch/x86/time.c |
line diff
1.1 --- a/xen/arch/x86/time.c Tue Dec 16 11:54:11 2008 +0000 1.2 +++ b/xen/arch/x86/time.c Tue Dec 16 11:59:22 2008 +0000 1.3 @@ -69,9 +69,6 @@ static DEFINE_PER_CPU(struct cpu_time, c 1.4 #define EPOCH MILLISECS(1000) 1.5 static struct timer calibration_timer; 1.6 1.7 -/* TSC is invariant on C state entry? */ 1.8 -static bool_t tsc_invariant; 1.9 - 1.10 /* 1.11 * We simulate a 32-bit platform timer from the 16-bit PIT ch2 counter. 1.12 * Otherwise overflow happens too quickly (~50ms) for us to guarantee that 1.13 @@ -672,7 +669,7 @@ void cstate_restore_tsc(void) 1.14 s_time_t stime_delta; 1.15 u64 tsc_delta; 1.16 1.17 - if ( tsc_invariant ) 1.18 + if ( boot_cpu_has(X86_FEATURE_NOSTOP_TSC) ) 1.19 return; 1.20 1.21 stime_delta = read_platform_stime() - t->stime_master_stamp; 1.22 @@ -941,6 +938,18 @@ static void local_time_calibration(void) 1.23 /* The overall calibration scale multiplier. */ 1.24 u32 calibration_mul_frac; 1.25 1.26 + if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ) 1.27 + { 1.28 + /* Atomically read cpu_calibration struct and write cpu_time struct. */ 1.29 + local_irq_disable(); 1.30 + t->local_tsc_stamp = c->local_tsc_stamp; 1.31 + t->stime_local_stamp = c->stime_master_stamp; 1.32 + t->stime_master_stamp = c->stime_master_stamp; 1.33 + local_irq_enable(); 1.34 + update_vcpu_system_time(current); 1.35 + goto out; 1.36 + } 1.37 + 1.38 prev_tsc = t->local_tsc_stamp; 1.39 prev_local_stime = t->stime_local_stamp; 1.40 prev_master_stime = t->stime_master_stamp; 1.41 @@ -1059,6 +1068,7 @@ struct calibration_rendezvous { 1.42 cpumask_t cpu_calibration_map; 1.43 atomic_t nr_cpus; 1.44 s_time_t master_stime; 1.45 + u64 master_tsc_stamp; 1.46 }; 1.47 1.48 static void time_calibration_rendezvous(void *_r) 1.49 @@ -1072,18 +1082,22 @@ static void time_calibration_rendezvous( 1.50 while ( atomic_read(&r->nr_cpus) != (total_cpus - 1) ) 1.51 cpu_relax(); 1.52 r->master_stime = read_platform_stime(); 1.53 - mb(); /* write r->master_stime /then/ signal */ 1.54 + rdtscll(r->master_tsc_stamp); 1.55 + mb(); /* write r->master_* /then/ signal */ 1.56 atomic_inc(&r->nr_cpus); 1.57 + c->local_tsc_stamp = r->master_tsc_stamp; 1.58 } 1.59 else 1.60 { 1.61 atomic_inc(&r->nr_cpus); 1.62 while ( atomic_read(&r->nr_cpus) != total_cpus ) 1.63 cpu_relax(); 1.64 - mb(); /* receive signal /then/ read r->master_stime */ 1.65 + mb(); /* receive signal /then/ read r->master_* */ 1.66 + if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ) 1.67 + wrmsrl(MSR_IA32_TSC, r->master_tsc_stamp); 1.68 + rdtscll(c->local_tsc_stamp); 1.69 } 1.70 1.71 - rdtscll(c->local_tsc_stamp); 1.72 c->stime_local_stamp = get_s_time(); 1.73 c->stime_master_stamp = r->master_stime; 1.74 1.75 @@ -1126,9 +1140,13 @@ void init_percpu_time(void) 1.76 /* Late init function (after all CPUs are booted). */ 1.77 int __init init_xen_time(void) 1.78 { 1.79 - /* Is TSC invariant during deep C state? */ 1.80 - if ( cpuid_edx(0x80000007) & (1u<<8) ) 1.81 - tsc_invariant = 1; 1.82 + /* If we have constant TSCs then scale factor can be shared. */ 1.83 + if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ) 1.84 + { 1.85 + int cpu; 1.86 + for_each_cpu ( cpu ) 1.87 + per_cpu(cpu_time, cpu).tsc_scale = per_cpu(cpu_time, 0).tsc_scale; 1.88 + } 1.89 1.90 open_softirq(TIME_CALIBRATE_SOFTIRQ, local_time_calibration); 1.91