ia64/xen-unstable

changeset 11006:7ce412dde1be

[XEN] Place per-cpu timer info in PER_CPU space.
Signed-of-fby: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Aug 08 12:04:46 2006 +0100 (2006-08-08)
parents f3fb7727cb9a
children 5e8c254c9dcd
files xen/common/timer.c
line diff
     1.1 --- a/xen/common/timer.c	Tue Aug 08 11:45:37 2006 +0100
     1.2 +++ b/xen/common/timer.c	Tue Aug 08 12:04:46 2006 +0100
     1.3 @@ -32,7 +32,7 @@ struct timers {
     1.4      struct timer  *running;
     1.5  } __cacheline_aligned;
     1.6  
     1.7 -struct timers timers[NR_CPUS];
     1.8 +static DEFINE_PER_CPU(struct timers, timers);
     1.9  
    1.10  extern int reprogram_timer(s_time_t timeout);
    1.11  
    1.12 @@ -149,7 +149,7 @@ static int add_entry(struct timer ***phe
    1.13  static inline void __add_timer(struct timer *timer)
    1.14  {
    1.15      int cpu = timer->cpu;
    1.16 -    if ( add_entry(&timers[cpu].heap, timer) )
    1.17 +    if ( add_entry(&per_cpu(timers, cpu).heap, timer) )
    1.18          cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
    1.19  }
    1.20  
    1.21 @@ -157,7 +157,7 @@ static inline void __add_timer(struct ti
    1.22  static inline void __stop_timer(struct timer *timer)
    1.23  {
    1.24      int cpu = timer->cpu;
    1.25 -    if ( remove_entry(timers[cpu].heap, timer) )
    1.26 +    if ( remove_entry(per_cpu(timers, cpu).heap, timer) )
    1.27          cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
    1.28  }
    1.29  
    1.30 @@ -168,10 +168,10 @@ static inline void timer_lock(struct tim
    1.31      for ( ; ; )
    1.32      {
    1.33          cpu = timer->cpu;
    1.34 -        spin_lock(&timers[cpu].lock);
    1.35 +        spin_lock(&per_cpu(timers, cpu).lock);
    1.36          if ( likely(timer->cpu == cpu) )
    1.37              break;
    1.38 -        spin_unlock(&timers[cpu].lock);
    1.39 +        spin_unlock(&per_cpu(timers, cpu).lock);
    1.40      }
    1.41  }
    1.42  
    1.43 @@ -182,7 +182,7 @@ static inline void timer_lock(struct tim
    1.44  
    1.45  static inline void timer_unlock(struct timer *timer)
    1.46  {
    1.47 -        spin_unlock(&timers[timer->cpu].lock);
    1.48 +        spin_unlock(&per_cpu(timers, timer->cpu).lock);
    1.49  }
    1.50  
    1.51  #define timer_unlock_irq(t) \
    1.52 @@ -234,20 +234,20 @@ void migrate_timer(struct timer *timer, 
    1.53  
    1.54          if ( old_cpu < new_cpu )
    1.55          {
    1.56 -            spin_lock_irqsave(&timers[old_cpu].lock, flags);
    1.57 -            spin_lock(&timers[new_cpu].lock);
    1.58 +            spin_lock_irqsave(&per_cpu(timers, old_cpu).lock, flags);
    1.59 +            spin_lock(&per_cpu(timers, new_cpu).lock);
    1.60          }
    1.61          else
    1.62          {
    1.63 -            spin_lock_irqsave(&timers[new_cpu].lock, flags);
    1.64 -            spin_lock(&timers[old_cpu].lock);
    1.65 +            spin_lock_irqsave(&per_cpu(timers, new_cpu).lock, flags);
    1.66 +            spin_lock(&per_cpu(timers, old_cpu).lock);
    1.67          }
    1.68  
    1.69          if ( likely(timer->cpu == old_cpu) )
    1.70               break;
    1.71  
    1.72 -        spin_unlock(&timers[old_cpu].lock);
    1.73 -        spin_unlock_irqrestore(&timers[new_cpu].lock, flags);
    1.74 +        spin_unlock(&per_cpu(timers, old_cpu).lock);
    1.75 +        spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
    1.76      }
    1.77  
    1.78      if ( active_timer(timer) )
    1.79 @@ -261,8 +261,8 @@ void migrate_timer(struct timer *timer, 
    1.80          timer->cpu = new_cpu;
    1.81      }
    1.82  
    1.83 -    spin_unlock(&timers[old_cpu].lock);
    1.84 -    spin_unlock_irqrestore(&timers[new_cpu].lock, flags);
    1.85 +    spin_unlock(&per_cpu(timers, old_cpu).lock);
    1.86 +    spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
    1.87  }
    1.88  
    1.89  
    1.90 @@ -271,7 +271,7 @@ void kill_timer(struct timer *timer)
    1.91      int           cpu;
    1.92      unsigned long flags;
    1.93  
    1.94 -    BUG_ON(timers[smp_processor_id()].running == timer);
    1.95 +    BUG_ON(this_cpu(timers).running == timer);
    1.96  
    1.97      timer_lock_irqsave(timer, flags);
    1.98  
    1.99 @@ -282,23 +282,25 @@ void kill_timer(struct timer *timer)
   1.100      timer_unlock_irqrestore(timer, flags);
   1.101  
   1.102      for_each_online_cpu ( cpu )
   1.103 -        while ( timers[cpu].running == timer )
   1.104 +        while ( per_cpu(timers, cpu).running == timer )
   1.105              cpu_relax();
   1.106  }
   1.107  
   1.108  
   1.109  static void timer_softirq_action(void)
   1.110  {
   1.111 -    int           cpu = smp_processor_id();
   1.112 -    struct timer *t, **heap;
   1.113 -    s_time_t      now;
   1.114 -    void        (*fn)(void *);
   1.115 -    void         *data;
   1.116 +    struct timer  *t, **heap;
   1.117 +    struct timers *ts;
   1.118 +    s_time_t       now;
   1.119 +    void         (*fn)(void *);
   1.120 +    void          *data;
   1.121  
   1.122 -    spin_lock_irq(&timers[cpu].lock);
   1.123 +    ts = &this_cpu(timers);
   1.124 +
   1.125 +    spin_lock_irq(&ts->lock);
   1.126      
   1.127      do {
   1.128 -        heap = timers[cpu].heap;
   1.129 +        heap = ts->heap;
   1.130          now  = NOW();
   1.131  
   1.132          while ( (GET_HEAP_SIZE(heap) != 0) &&
   1.133 @@ -306,24 +308,24 @@ static void timer_softirq_action(void)
   1.134          {
   1.135              remove_entry(heap, t);
   1.136  
   1.137 -            timers[cpu].running = t;
   1.138 +            ts->running = t;
   1.139  
   1.140              fn   = t->function;
   1.141              data = t->data;
   1.142  
   1.143 -            spin_unlock_irq(&timers[cpu].lock);
   1.144 +            spin_unlock_irq(&ts->lock);
   1.145              (*fn)(data);
   1.146 -            spin_lock_irq(&timers[cpu].lock);
   1.147 +            spin_lock_irq(&ts->lock);
   1.148  
   1.149              /* Heap may have grown while the lock was released. */
   1.150 -            heap = timers[cpu].heap;
   1.151 +            heap = ts->heap;
   1.152          }
   1.153  
   1.154 -        timers[cpu].running = NULL;
   1.155 +        ts->running = NULL;
   1.156      }
   1.157      while ( !reprogram_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
   1.158  
   1.159 -    spin_unlock_irq(&timers[cpu].lock);
   1.160 +    spin_unlock_irq(&ts->lock);
   1.161  }
   1.162  
   1.163  
   1.164 @@ -338,25 +340,28 @@ void process_pending_timers(void)
   1.165  
   1.166  static void dump_timerq(unsigned char key)
   1.167  {
   1.168 -    struct timer *t;
   1.169 -    unsigned long flags; 
   1.170 -    s_time_t      now = NOW();
   1.171 -    int           i, j;
   1.172 +    struct timer  *t;
   1.173 +    struct timers *ts;
   1.174 +    unsigned long  flags; 
   1.175 +    s_time_t       now = NOW();
   1.176 +    int            i, j;
   1.177  
   1.178      printk("Dumping timer queues: NOW=0x%08X%08X\n",
   1.179             (u32)(now>>32), (u32)now); 
   1.180  
   1.181      for_each_online_cpu( i )
   1.182      {
   1.183 +        ts = &per_cpu(timers, i);
   1.184 +
   1.185          printk("CPU[%02d] ", i);
   1.186 -        spin_lock_irqsave(&timers[i].lock, flags);
   1.187 -        for ( j = 1; j <= GET_HEAP_SIZE(timers[i].heap); j++ )
   1.188 +        spin_lock_irqsave(&ts->lock, flags);
   1.189 +        for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ )
   1.190          {
   1.191 -            t = timers[i].heap[j];
   1.192 +            t = ts->heap[j];
   1.193              printk ("  %d : %p ex=0x%08X%08X %p\n",
   1.194                      j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
   1.195          }
   1.196 -        spin_unlock_irqrestore(&timers[i].lock, flags);
   1.197 +        spin_unlock_irqrestore(&ts->lock, flags);
   1.198          printk("\n");
   1.199      }
   1.200  }
   1.201 @@ -378,8 +383,8 @@ void __init timer_init(void)
   1.202  
   1.203      for ( i = 0; i < NR_CPUS; i++ )
   1.204      {
   1.205 -        spin_lock_init(&timers[i].lock);
   1.206 -        timers[i].heap = &dummy_heap;
   1.207 +        spin_lock_init(&per_cpu(timers, i).lock);
   1.208 +        per_cpu(timers, i).heap = &dummy_heap;
   1.209      }
   1.210  
   1.211      register_keyhandler('a', dump_timerq, "dump timer queues");