ia64/xen-unstable

changeset 19508:ce8f37efc992

x86: Make special TSC handling (assuming all TSCs tick at exact same
rate) dependent on a command-line option 'consistent_tscs'.

Also clean up rendezvous logic.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Apr 06 14:26:29 2009 +0100 (2009-04-06)
parents 1f705f0a32e2
children ff9fdd6fce1e
files xen/arch/x86/time.c
line diff
     1.1 --- a/xen/arch/x86/time.c	Mon Apr 06 13:56:35 2009 +0100
     1.2 +++ b/xen/arch/x86/time.c	Mon Apr 06 14:26:29 2009 +0100
     1.3 @@ -35,6 +35,13 @@
     1.4  static char opt_clocksource[10];
     1.5  string_param("clocksource", opt_clocksource);
     1.6  
     1.7 +/*
     1.8 + * opt_consistent_tscs: All TSCs tick at the exact same rate, allowing
     1.9 + * simplified system time handling.
    1.10 + */
    1.11 +static int opt_consistent_tscs;
    1.12 +boolean_param("consistent_tscs", opt_consistent_tscs);
    1.13 +
    1.14  unsigned long cpu_khz;  /* CPU clock frequency in kHz. */
    1.15  DEFINE_SPINLOCK(rtc_lock);
    1.16  unsigned long pit0_ticks;
    1.17 @@ -959,7 +966,7 @@ static void local_time_calibration(void)
    1.18      /* The overall calibration scale multiplier. */
    1.19      u32 calibration_mul_frac;
    1.20  
    1.21 -    if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
    1.22 +    if ( opt_consistent_tscs )
    1.23      {
    1.24          /* Atomically read cpu_calibration struct and write cpu_time struct. */
    1.25          local_irq_disable();
    1.26 @@ -1087,64 +1094,53 @@ static void local_time_calibration(void)
    1.27   */
    1.28  struct calibration_rendezvous {
    1.29      cpumask_t cpu_calibration_map;
    1.30 -    atomic_t count_start;
    1.31 -    atomic_t count_end;
    1.32 +    atomic_t semaphore;
    1.33      s_time_t master_stime;
    1.34      u64 master_tsc_stamp;
    1.35  };
    1.36  
    1.37 -#define NR_LOOPS 5
    1.38 -
    1.39 -static void time_calibration_rendezvous(void *_r)
    1.40 +static void time_calibration_tsc_rendezvous(void *_r)
    1.41  {
    1.42      int i;
    1.43      struct cpu_calibration *c = &this_cpu(cpu_calibration);
    1.44      struct calibration_rendezvous *r = _r;
    1.45      unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
    1.46  
    1.47 -    /* 
    1.48 -     * Loop is used here to get rid of the cache's side effect to enlarge
    1.49 -     * the TSC difference among CPUs.
    1.50 -     */
    1.51 -    for ( i = 0; i < NR_LOOPS; i++ )
    1.52 +    /* Loop to get rid of cache effects on TSC skew. */
    1.53 +    for ( i = 4; i >= 0; i-- )
    1.54      {
    1.55          if ( smp_processor_id() == 0 )
    1.56          {
    1.57 -            while ( atomic_read(&r->count_start) != (total_cpus - 1) )
    1.58 +            while ( atomic_read(&r->semaphore) != (total_cpus - 1) )
    1.59                  mb();
    1.60 -   
    1.61 +
    1.62              if ( r->master_stime == 0 )
    1.63              {
    1.64                  r->master_stime = read_platform_stime();
    1.65 -                if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
    1.66 -                    rdtscll(r->master_tsc_stamp);
    1.67 +                rdtscll(r->master_tsc_stamp);
    1.68              }
    1.69 -            atomic_set(&r->count_end, 0);
    1.70 -            wmb();
    1.71 -            atomic_inc(&r->count_start);
    1.72 -    
    1.73 -            if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && 
    1.74 -                 i == NR_LOOPS - 1 )
    1.75 -                write_tsc((u32)r->master_tsc_stamp, (u32)(r->master_tsc_stamp >> 32));
    1.76 -    
    1.77 -            while (atomic_read(&r->count_end) != total_cpus - 1)
    1.78 +            atomic_inc(&r->semaphore);
    1.79 +
    1.80 +            if ( i == 0 )
    1.81 +                write_tsc((u32)r->master_tsc_stamp,
    1.82 +                          (u32)(r->master_tsc_stamp >> 32));
    1.83 +
    1.84 +            while ( atomic_read(&r->semaphore) != (2*total_cpus - 1) )
    1.85                  mb();
    1.86 -            atomic_set(&r->count_start, 0);
    1.87 -            wmb();
    1.88 -            atomic_inc(&r->count_end);
    1.89 +            atomic_set(&r->semaphore, 0);
    1.90          }
    1.91          else
    1.92          {
    1.93 -            atomic_inc(&r->count_start);
    1.94 -            while ( atomic_read(&r->count_start) != total_cpus )
    1.95 +            atomic_inc(&r->semaphore);
    1.96 +            while ( atomic_read(&r->semaphore) < total_cpus )
    1.97                  mb();
    1.98 -    
    1.99 -            if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && 
   1.100 -                 i == NR_LOOPS - 1 )
   1.101 -                write_tsc((u32)r->master_tsc_stamp, (u32)(r->master_tsc_stamp >> 32));
   1.102 -    
   1.103 -            atomic_inc(&r->count_end);
   1.104 -            while (atomic_read(&r->count_end) != total_cpus)
   1.105 +
   1.106 +            if ( i == 0 )
   1.107 +                write_tsc((u32)r->master_tsc_stamp,
   1.108 +                          (u32)(r->master_tsc_stamp >> 32));
   1.109 +
   1.110 +            atomic_inc(&r->semaphore);
   1.111 +            while ( atomic_read(&r->semaphore) > total_cpus )
   1.112                  mb();
   1.113          }
   1.114      }
   1.115 @@ -1156,18 +1152,48 @@ static void time_calibration_rendezvous(
   1.116      raise_softirq(TIME_CALIBRATE_SOFTIRQ);
   1.117  }
   1.118  
   1.119 +static void time_calibration_std_rendezvous(void *_r)
   1.120 +{
   1.121 +    struct cpu_calibration *c = &this_cpu(cpu_calibration);
   1.122 +    struct calibration_rendezvous *r = _r;
   1.123 +    unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
   1.124 +
   1.125 +    if ( smp_processor_id() == 0 )
   1.126 +    {
   1.127 +        while ( atomic_read(&r->semaphore) != (total_cpus - 1) )
   1.128 +            cpu_relax();
   1.129 +        r->master_stime = read_platform_stime();
   1.130 +        mb(); /* write r->master_stime /then/ signal */
   1.131 +        atomic_inc(&r->semaphore);
   1.132 +    }
   1.133 +    else
   1.134 +    {
   1.135 +        atomic_inc(&r->semaphore);
   1.136 +        while ( atomic_read(&r->semaphore) != total_cpus )
   1.137 +            cpu_relax();
   1.138 +        mb(); /* receive signal /then/ read r->master_stime */
   1.139 +    }
   1.140 +
   1.141 +    rdtscll(c->local_tsc_stamp);
   1.142 +    c->stime_local_stamp = get_s_time();
   1.143 +    c->stime_master_stamp = r->master_stime;
   1.144 +
   1.145 +    raise_softirq(TIME_CALIBRATE_SOFTIRQ);
   1.146 +}
   1.147 +
   1.148  static void time_calibration(void *unused)
   1.149  {
   1.150      struct calibration_rendezvous r = {
   1.151          .cpu_calibration_map = cpu_online_map,
   1.152 -        .count_start = ATOMIC_INIT(0),
   1.153 -        .count_end = ATOMIC_INIT(0),
   1.154 -        .master_stime = 0
   1.155 +        .semaphore = ATOMIC_INIT(0)
   1.156      };
   1.157  
   1.158      /* @wait=1 because we must wait for all cpus before freeing @r. */
   1.159      on_selected_cpus(r.cpu_calibration_map,
   1.160 -                     time_calibration_rendezvous, &r, 0, 1);
   1.161 +                     opt_consistent_tscs
   1.162 +                     ? time_calibration_tsc_rendezvous
   1.163 +                     : time_calibration_std_rendezvous,
   1.164 +                     &r, 0, 1);
   1.165  }
   1.166  
   1.167  void init_percpu_time(void)
   1.168 @@ -1194,8 +1220,11 @@ void init_percpu_time(void)
   1.169  /* Late init function (after all CPUs are booted). */
   1.170  int __init init_xen_time(void)
   1.171  {
   1.172 +    if ( !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
   1.173 +        opt_consistent_tscs = 0;
   1.174 +
   1.175      /* If we have constant TSCs then scale factor can be shared. */
   1.176 -    if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
   1.177 +    if ( opt_consistent_tscs )
   1.178      {
   1.179          int cpu;
   1.180          for_each_cpu ( cpu )