ia64/xen-unstable

changeset 8800:974ed9f73641

Fix Xen timer interface to allow migration of timers
among CPUs (using new migrate_timer() call). Fix the
locking protocol in light of this addition.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Feb 08 17:27:32 2006 +0100 (2006-02-08)
parents b246f429f683
children 65127e18d821
files xen/common/timer.c xen/include/xen/timer.h
line diff
     1.1 --- a/xen/common/timer.c	Wed Feb 08 17:26:20 2006 +0100
     1.2 +++ b/xen/common/timer.c	Wed Feb 08 17:27:32 2006 +0100
     1.3 @@ -161,46 +161,122 @@ static inline void __stop_timer(struct t
     1.4          cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
     1.5  }
     1.6  
     1.7 +static inline void timer_lock(struct timer *timer)
     1.8 +{
     1.9 +    unsigned int cpu;
    1.10 +
    1.11 +    for ( ; ; )
    1.12 +    {
    1.13 +        cpu = timer->cpu;
    1.14 +        spin_lock(&timers[cpu].lock);
    1.15 +        if ( likely(timer->cpu == cpu) )
    1.16 +            break;
    1.17 +        spin_unlock(&timers[cpu].lock);
    1.18 +    }
    1.19 +}
    1.20 +
    1.21 +#define timer_lock_irq(t) \
    1.22 +    do { local_irq_disable(); timer_lock(t); } while ( 0 )
    1.23 +#define timer_lock_irqsave(t, flags) \
    1.24 +    do { local_irq_save(flags); timer_lock(t); } while ( 0 )
    1.25 +
    1.26 +static inline void timer_unlock(struct timer *timer)
    1.27 +{
    1.28 +        spin_unlock(&timers[timer->cpu].lock);
    1.29 +}
    1.30 +
    1.31 +#define timer_unlock_irq(t) \
    1.32 +    do { timer_unlock(t); local_irq_enable(); } while ( 0 )
    1.33 +#define timer_unlock_irqrestore(t, flags) \
    1.34 +    do { timer_unlock(t); local_irq_restore(flags); } while ( 0 )
    1.35 +
    1.36  
    1.37  void set_timer(struct timer *timer, s_time_t expires)
    1.38  {
    1.39 -    int           cpu = timer->cpu;
    1.40      unsigned long flags;
    1.41  
    1.42 -    spin_lock_irqsave(&timers[cpu].lock, flags);
    1.43 +    timer_lock_irqsave(timer, flags);
    1.44 +
    1.45      if ( active_timer(timer) )
    1.46          __stop_timer(timer);
    1.47 +
    1.48      timer->expires = expires;
    1.49 +
    1.50      if ( likely(!timer->killed) )
    1.51          __add_timer(timer);
    1.52 -    spin_unlock_irqrestore(&timers[cpu].lock, flags);
    1.53 +
    1.54 +    timer_unlock_irqrestore(timer, flags);
    1.55  }
    1.56  
    1.57  
    1.58  void stop_timer(struct timer *timer)
    1.59  {
    1.60 -    int           cpu = timer->cpu;
    1.61 +    unsigned long flags;
    1.62 +
    1.63 +    timer_lock_irqsave(timer, flags);
    1.64 +
    1.65 +    if ( active_timer(timer) )
    1.66 +        __stop_timer(timer);
    1.67 +
    1.68 +    timer_unlock_irqrestore(timer, flags);
    1.69 +}
    1.70 +
    1.71 +
    1.72 +void migrate_timer(struct timer *timer, unsigned int new_cpu)
    1.73 +{
    1.74 +    int           old_cpu;
    1.75      unsigned long flags;
    1.76  
    1.77 -    spin_lock_irqsave(&timers[cpu].lock, flags);
    1.78 +    for ( ; ; )
    1.79 +    {
    1.80 +        if ( (old_cpu = timer->cpu) == new_cpu )
    1.81 +            return;
    1.82 +
    1.83 +        if ( old_cpu < new_cpu )
    1.84 +        {
    1.85 +            spin_lock_irqsave(&timers[old_cpu].lock, flags);
    1.86 +            spin_lock(&timers[new_cpu].lock);
    1.87 +        }
    1.88 +        else
    1.89 +        {
    1.90 +            spin_lock_irqsave(&timers[new_cpu].lock, flags);
    1.91 +            spin_lock(&timers[old_cpu].lock);
    1.92 +        }
    1.93 +
    1.94 +        if ( likely(timer->cpu == old_cpu) )
    1.95 +             break;
    1.96 +
    1.97 +        spin_unlock(&timers[old_cpu].lock);
    1.98 +        spin_unlock_irqrestore(&timers[new_cpu].lock, flags);
    1.99 +    }
   1.100 +
   1.101      if ( active_timer(timer) )
   1.102          __stop_timer(timer);
   1.103 -    spin_unlock_irqrestore(&timers[cpu].lock, flags);
   1.104 +
   1.105 +    timer->cpu = new_cpu;
   1.106 +
   1.107 +    if ( likely(!timer->killed) )
   1.108 +        __add_timer(timer);
   1.109 +
   1.110 +    spin_unlock(&timers[old_cpu].lock);
   1.111 +    spin_unlock_irqrestore(&timers[new_cpu].lock, flags);
   1.112  }
   1.113  
   1.114  
   1.115  void kill_timer(struct timer *timer)
   1.116  {
   1.117 -    int           cpu = timer->cpu;
   1.118 +    int           cpu;
   1.119      unsigned long flags;
   1.120  
   1.121 -    BUG_ON(timers[cpu].running == timer);
   1.122 +    BUG_ON(timers[smp_processor_id()].running == timer);
   1.123  
   1.124 -    spin_lock_irqsave(&timers[cpu].lock, flags);
   1.125 +    timer_lock_irqsave(timer, flags);
   1.126 +
   1.127      if ( active_timer(timer) )
   1.128          __stop_timer(timer);
   1.129      timer->killed = 1;
   1.130 -    spin_unlock_irqrestore(&timers[cpu].lock, flags);
   1.131 +
   1.132 +    timer_unlock_irqrestore(timer, flags);
   1.133  
   1.134      for_each_online_cpu ( cpu )
   1.135          while ( timers[cpu].running == timer )
     2.1 --- a/xen/include/xen/timer.h	Wed Feb 08 17:26:20 2006 +0100
     2.2 +++ b/xen/include/xen/timer.h	Wed Feb 08 17:27:32 2006 +0100
     2.3 @@ -66,6 +66,12 @@ extern void set_timer(struct timer *time
     2.4  extern void stop_timer(struct timer *timer);
     2.5  
     2.6  /*
     2.7 + * Migrate a timer to a different CPU. The timer must have been previously
     2.8 + * initialised by init_timer(). The timer may be active.
     2.9 + */
    2.10 +extern void migrate_timer(struct timer *timer, unsigned int new_cpu);
    2.11 +
    2.12 +/*
    2.13   * Deactivate a timer and prevent it from being re-set (future calls to
    2.14   * set_timer will silently fail). When this function returns it is guaranteed
    2.15   * that the timer callback handler is not running on any CPU.