ia64/xen-unstable

changeset 330:74c24fb522be

bitkeeper revision 1.147 (3e78b275x-BoAnufenNxz28sriWdcQ)

interrupt.h, ac_timer.h, softirq.c, schedule.c, ac_timer.c, time.c:
Fixes to timer and softirq code. Can now update timers on remote CPUs.
author kaf24@scramble.cl.cam.ac.uk
date Wed Mar 19 18:09:57 2003 +0000 (2003-03-19)
parents 79e370880245
children 03dc7864109b
files xen/arch/i386/time.c xen/common/ac_timer.c xen/common/schedule.c xen/common/softirq.c xen/include/xeno/ac_timer.h xen/include/xeno/interrupt.h
line diff
     1.1 --- a/xen/arch/i386/time.c	Mon Mar 17 18:57:15 2003 +0000
     1.2 +++ b/xen/arch/i386/time.c	Wed Mar 19 18:09:57 2003 +0000
     1.3 @@ -364,10 +364,8 @@ static void update_time(unsigned long fo
     1.4                 wall_clock_time.tv_usec));
     1.5  
     1.6      /* Reload the timer. */
     1.7 - again:
     1.8      update_timer.expires  = new_st + MILLISECS(200);
     1.9 -    if(add_ac_timer(&update_timer) == 1)
    1.10 -        goto again;
    1.11 +    add_ac_timer(&update_timer);
    1.12  }
    1.13  
    1.14  /***************************************************************************
    1.15 @@ -402,7 +400,7 @@ int __init init_xeno_time()
    1.16      wctime_st = NOW();
    1.17  
    1.18      /* start timer to update time periodically */
    1.19 -    init_ac_timer(&update_timer);
    1.20 +    init_ac_timer(&update_timer, 0);
    1.21      update_timer.function = &update_time;
    1.22      update_time(0);
    1.23  
     2.1 --- a/xen/common/ac_timer.c	Mon Mar 17 18:57:15 2003 +0000
     2.2 +++ b/xen/common/ac_timer.c	Wed Mar 19 18:09:57 2003 +0000
     2.3 @@ -24,13 +24,11 @@
     2.4  #include <xeno/sched.h>
     2.5  #include <xeno/lib.h>
     2.6  #include <xeno/smp.h>
     2.7 -
     2.8  #include <xeno/perfc.h>
     2.9 -
    2.10  #include <xeno/time.h>
    2.11 +#include <xeno/interrupt.h>
    2.12  #include <xeno/ac_timer.h>
    2.13  #include <xeno/keyhandler.h>
    2.14 -
    2.15  #include <asm/system.h>
    2.16  #include <asm/desc.h>
    2.17  
    2.18 @@ -55,47 +53,24 @@ typedef struct ac_timers_st
    2.19  } __cacheline_aligned ac_timers_t;
    2.20  static ac_timers_t ac_timers[NR_CPUS];
    2.21  
    2.22 -/* local prototypes */
    2.23 -static int  detach_ac_timer(struct ac_timer *timer);
    2.24 -
    2.25  
    2.26  /*****************************************************************************
    2.27   * add a timer.
    2.28 - * return value:
    2.29 - *  0: success
    2.30 - *  1: failure, timer in the past or timeout value to small
    2.31 - * -1: failure, timer uninitialised
    2.32 - * fail
    2.33 + * return value: CPU mask of remote processors to send an event to
    2.34   *****************************************************************************/
    2.35 -int add_ac_timer(struct ac_timer *timer)
    2.36 +static inline unsigned long __add_ac_timer(struct ac_timer *timer)
    2.37  {
    2.38 -    int              cpu = smp_processor_id();
    2.39 -    unsigned long    flags;
    2.40 -    s_time_t         now;
    2.41 -
    2.42 -    /* make sure timeout value is in the future */
    2.43 -    
    2.44 -    now = NOW();
    2.45 -    if (timer->expires <= now) {    
    2.46 -        TRC(printk("ACT[%02d] add_ac_timer:now=0x%08X%08X>expire=0x%08X%08X\n",
    2.47 -                   cpu, (u32)(now>>32), (u32)now,
    2.48 -                   (u32)(timer->expires>>32), (u32)timer->expires));
    2.49 -        return 1;
    2.50 -    }
    2.51 -
    2.52 -    spin_lock_irqsave(&ac_timers[cpu].lock, flags);
    2.53 +    int cpu = timer->cpu;
    2.54  
    2.55      /*
    2.56 -     * Add timer to the list. If it gets added to the front we have to
    2.57 -     * reprogramm the timer
    2.58 +     * Add timer to the list. If it gets added to the front we schedule
    2.59 +     * a softirq. This will reprogram the timer, or handle the timer event
    2.60 +     * imemdiately, depending on whether alarm is sufficiently ahead in the
    2.61 +     * future.
    2.62       */
    2.63      if (list_empty(&ac_timers[cpu].timers)) {
    2.64 -        if (!reprogram_ac_timer(timer->expires)) {
    2.65 -            printk("ACT[%02d] add at head failed\n", cpu);
    2.66 -            spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
    2.67 -            return 1; /* failed */
    2.68 -        }
    2.69          list_add(&timer->timer_list, &ac_timers[cpu].timers);
    2.70 +        goto send_softirq;
    2.71      } else {
    2.72          struct list_head *pos;
    2.73          struct ac_timer  *t;
    2.74 @@ -105,88 +80,122 @@ int add_ac_timer(struct ac_timer *timer)
    2.75              if (t->expires > timer->expires)
    2.76                  break;
    2.77          }
    2.78 -        list_add (&(timer->timer_list), pos->prev);
    2.79 +        list_add(&(timer->timer_list), pos->prev);
    2.80 +
    2.81 +        if (timer->timer_list.prev == &ac_timers[cpu].timers)
    2.82 +            goto send_softirq;
    2.83 +    }
    2.84 +
    2.85 +    return 0;
    2.86 +
    2.87 + send_softirq:
    2.88 +    __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
    2.89 +    return (cpu != smp_processor_id()) ? 1<<cpu : 0;
    2.90 +}
    2.91  
    2.92 -        if (timer->timer_list.prev == &ac_timers[cpu].timers) {
    2.93 -            /* added at head */
    2.94 -            if (!reprogram_ac_timer(timer->expires)) {
    2.95 -                printk("ACT[%02d] add at head failed\n", cpu);
    2.96 -                detach_ac_timer(timer);
    2.97 -                spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
    2.98 -                return 1; /* failed */
    2.99 -            }
   2.100 -        }
   2.101 -    }
   2.102 +void add_ac_timer(struct ac_timer *timer) 
   2.103 +{
   2.104 +    int           cpu = timer->cpu;
   2.105 +    unsigned long flags, cpu_mask;
   2.106 +
   2.107 +    spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   2.108 +    ASSERT(timer != NULL);
   2.109 +    ASSERT(!active_ac_timer(timer));
   2.110 +    cpu_mask = __add_ac_timer(timer);
   2.111      spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
   2.112 -    return 0;
   2.113 +
   2.114 +    if ( cpu_mask ) smp_send_event_check_mask(cpu_mask);
   2.115  }
   2.116  
   2.117 +
   2.118  /*****************************************************************************
   2.119   * detach a timer (no locking)
   2.120   * return values:
   2.121   *  0: success
   2.122   * -1: bogus timer
   2.123   *****************************************************************************/
   2.124 -static int detach_ac_timer(struct ac_timer *timer)
   2.125 +static inline void detach_ac_timer(struct ac_timer *timer)
   2.126  {  
   2.127      TRC(printk("ACT  [%02d] detach(): \n", cpu));
   2.128      list_del(&timer->timer_list);
   2.129      timer->timer_list.next = NULL;
   2.130 -    return 0;
   2.131  }
   2.132  
   2.133 +
   2.134  /*****************************************************************************
   2.135   * remove a timer
   2.136 - * return values:
   2.137 - *  0: success
   2.138 - * -1: bogus timer
   2.139 + * return values: CPU mask of remote processors to send an event to
   2.140   *****************************************************************************/
   2.141 -int rem_ac_timer(struct ac_timer *timer)
   2.142 +static inline unsigned long __rem_ac_timer(struct ac_timer *timer)
   2.143  {
   2.144 -    int           cpu = smp_processor_id();
   2.145 -    int           res = 0;
   2.146 -    unsigned long flags;
   2.147 +    int cpu = timer->cpu;
   2.148  
   2.149      TRC(printk("ACT  [%02d] remove(): timo=%lld \n", cpu, timer->expires));
   2.150 -    spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   2.151 -    if (timer->timer_list.next) {
   2.152 -        res = detach_ac_timer(timer);
   2.153 +    ASSERT(timer->timer_list.next);
   2.154 +
   2.155 +    detach_ac_timer(timer);
   2.156 +    
   2.157 +    if (timer->timer_list.prev == &ac_timers[cpu].timers) {
   2.158 +        /* just removed the head */
   2.159 +        if (list_empty(&ac_timers[cpu].timers)) {
   2.160 +            goto send_softirq;
   2.161 +        } else {
   2.162 +            timer = list_entry(ac_timers[cpu].timers.next,
   2.163 +                               struct ac_timer, timer_list);
   2.164 +            if ( timer->expires > (NOW() + TIMER_SLOP) )
   2.165 +                goto send_softirq;
   2.166 +        }
   2.167 +    }
   2.168  
   2.169 -        if (timer->timer_list.prev == &ac_timers[cpu].timers) {
   2.170 -            /* just removed the head */
   2.171 -            if (list_empty(&ac_timers[cpu].timers)) {
   2.172 -                reprogram_ac_timer((s_time_t) 0);
   2.173 -            } else {
   2.174 -                timer = list_entry(ac_timers[cpu].timers.next,
   2.175 -                                   struct ac_timer, timer_list);
   2.176 -                if ( timer->expires > (NOW() + TIMER_SLOP) )
   2.177 -                    reprogram_ac_timer(timer->expires);
   2.178 -            }
   2.179 -        }
   2.180 -    } else
   2.181 -        res = -1;
   2.182 +    return 0;
   2.183 +
   2.184 + send_softirq:
   2.185 +    __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
   2.186 +    return (cpu != smp_processor_id()) ? 1<<cpu : 0;
   2.187 +}
   2.188  
   2.189 +void rem_ac_timer(struct ac_timer *timer)
   2.190 +{
   2.191 +    int           cpu = timer->cpu;
   2.192 +    unsigned long flags, cpu_mask = 0;
   2.193 +
   2.194 +    spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   2.195 +    ASSERT(timer != NULL);
   2.196 +    if ( active_ac_timer(timer) )
   2.197 +        cpu_mask = __rem_ac_timer(timer);
   2.198      spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
   2.199  
   2.200 -    return res;
   2.201 +    if ( cpu_mask ) smp_send_event_check_mask(cpu_mask);
   2.202  }
   2.203  
   2.204 +
   2.205  /*****************************************************************************
   2.206   * modify a timer, i.e., set a new timeout value
   2.207   * return value:
   2.208   *  0: sucess
   2.209 - * -1: error
   2.210 + *  1: timeout error
   2.211 + * -1: bogus timer
   2.212   *****************************************************************************/
   2.213 -int mod_ac_timer(struct ac_timer *timer, s_time_t new_time)
   2.214 +void mod_ac_timer(struct ac_timer *timer, s_time_t new_time)
   2.215  {
   2.216 -    if (rem_ac_timer(timer) != 0)
   2.217 -        return -1;
   2.218 +    int           cpu = timer->cpu;
   2.219 +    unsigned long flags, cpu_mask = 0;
   2.220 +
   2.221 +    spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   2.222 +
   2.223 +    ASSERT(timer != NULL);
   2.224 +
   2.225 +    if ( active_ac_timer(timer) )
   2.226 +        cpu_mask = __rem_ac_timer(timer);
   2.227      timer->expires = new_time;
   2.228 -    if (add_ac_timer(timer) != 0)
   2.229 -        return -1;
   2.230 -    return 0;
   2.231 +    cpu_mask |= __add_ac_timer(timer);
   2.232 +
   2.233 +    spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
   2.234 +
   2.235 +    if ( cpu_mask ) smp_send_event_check_mask(cpu_mask);
   2.236  }
   2.237  
   2.238 +
   2.239  /*****************************************************************************
   2.240   * do_ac_timer
   2.241   * deal with timeouts and run the handlers
   2.242 @@ -202,15 +211,10 @@ void do_ac_timer(void)
   2.243      spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   2.244  
   2.245   do_timer_again:
   2.246 -
   2.247      TRC(printk("ACT  [%02d] do(): now=%lld\n", cpu, NOW()));
   2.248          
   2.249      /* Sanity: is the timer list empty? */
   2.250 -    if ( list_empty(&ac_timers[cpu].timers) ) {
   2.251 -        /* This does sometimes happen: race condition in resetting timeout? */
   2.252 -        spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
   2.253 -        return;
   2.254 -    }
   2.255 +    if ( list_empty(&ac_timers[cpu].timers) ) goto out;
   2.256  
   2.257      /* Handle all timeouts in the near future. */
   2.258      while ( !list_empty(&ac_timers[cpu].timers) )
   2.259 @@ -218,6 +222,8 @@ void do_ac_timer(void)
   2.260          t = list_entry(ac_timers[cpu].timers.next,struct ac_timer, timer_list);
   2.261          if ( t->expires > (NOW() + TIMER_SLOP) ) break;
   2.262  
   2.263 +        ASSERT(t->cpu == cpu);
   2.264 +
   2.265          /* do some stats */
   2.266          diff = (now - t->expires);
   2.267          if (diff > 0x7fffffff) diff =  0x7fffffff; /* THIS IS BAD! */
   2.268 @@ -234,23 +240,22 @@ void do_ac_timer(void)
   2.269      if ( !list_empty(&ac_timers[cpu].timers) )
   2.270      {
   2.271          t = list_entry(ac_timers[cpu].timers.next,struct ac_timer, timer_list);
   2.272 -        if ( t->expires > 0 )
   2.273 +        TRC(printk("ACT  [%02d] do(): reprog timo=%lld\n",cpu,t->expires));
   2.274 +        if ( !reprogram_ac_timer(t->expires) )
   2.275          {
   2.276 -            TRC(printk("ACT  [%02d] do(): reprog timo=%lld\n",cpu,t->expires));
   2.277 -            if ( !reprogram_ac_timer(t->expires) )
   2.278 -            {
   2.279 -                TRC(printk("ACT  [%02d] do(): again\n", cpu));
   2.280 -                goto do_timer_again;
   2.281 -            }
   2.282 +            TRC(printk("ACT  [%02d] do(): again\n", cpu));
   2.283 +            goto do_timer_again;
   2.284          }
   2.285      } else {
   2.286          reprogram_ac_timer((s_time_t) 0);
   2.287      }
   2.288  
   2.289 + out:
   2.290      spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
   2.291      TRC(printk("ACT  [%02d] do(): end\n", cpu));
   2.292  }
   2.293  
   2.294 +
   2.295  /*****************************************************************************
   2.296   * debug dump_queue
   2.297   * arguments: queue head, name of queue
   2.298 @@ -274,6 +279,40 @@ static void dump_tqueue(struct list_head
   2.299      return; 
   2.300  }
   2.301  
   2.302 +
   2.303 +static void ac_timer_softirq_action(struct softirq_action *a)
   2.304 +{
   2.305 +    int           cpu = smp_processor_id();
   2.306 +    unsigned long flags;
   2.307 +    struct ac_timer *t;
   2.308 +    struct list_head *tlist;
   2.309 +
   2.310 +    spin_lock_irqsave(&ac_timers[cpu].lock, flags);
   2.311 +    
   2.312 +    tlist = &ac_timers[cpu].timers;
   2.313 +    if ( list_empty(tlist) ) 
   2.314 +    {
   2.315 +        reprogram_ac_timer((s_time_t)0);
   2.316 +        spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
   2.317 +        return;
   2.318 +    }
   2.319 +
   2.320 +    t = list_entry(tlist, struct ac_timer, timer_list);
   2.321 +
   2.322 +    if ( (t->expires < (NOW() + TIMER_SLOP)) ||
   2.323 +         !reprogram_ac_timer(t->expires) ) 
   2.324 +    {
   2.325 +        /*
   2.326 +         * Timer handler needs protecting from local APIC interrupts, but takes
   2.327 +         * the spinlock itself, so we release that before calling in.
   2.328 +         */
   2.329 +        spin_unlock(&ac_timers[cpu].lock);
   2.330 +        do_ac_timer();
   2.331 +        local_irq_restore(flags);
   2.332 +    }
   2.333 +}
   2.334 +
   2.335 +
   2.336  void dump_timerq(u_char key, void *dev_id, struct pt_regs *regs)
   2.337  {
   2.338      u_long   flags; 
   2.339 @@ -299,6 +338,8 @@ void __init ac_timer_init(void)
   2.340  
   2.341      printk ("ACT: Initialising Accurate timers\n");
   2.342  
   2.343 +    open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action, NULL);
   2.344 +
   2.345      for (i = 0; i < NR_CPUS; i++)
   2.346      {
   2.347          INIT_LIST_HEAD(&ac_timers[i].timers);
   2.348 @@ -306,6 +347,7 @@ void __init ac_timer_init(void)
   2.349      }
   2.350  }
   2.351  
   2.352 +
   2.353  /*****************************************************************************
   2.354   * GRAVEYARD
   2.355   *****************************************************************************/
     3.1 --- a/xen/common/schedule.c	Mon Mar 17 18:57:15 2003 +0000
     3.2 +++ b/xen/common/schedule.c	Wed Mar 19 18:09:57 2003 +0000
     3.3 @@ -369,13 +369,6 @@ asmlinkage void schedule(void)
     3.4   sched_done:
     3.5      ASSERT(r_time >= ctx_allow);
     3.6  
     3.7 -#ifndef NDEBUG
     3.8 -    if (r_time < ctx_allow) {
     3.9 -        printk("[%02d]: %lx\n", this_cpu, r_time);
    3.10 -        dump_rqueue(&schedule_data[this_cpu].runqueue, "foo");
    3.11 -    }
    3.12 -#endif
    3.13 -
    3.14      prev->has_cpu = 0;
    3.15      next->has_cpu = 1;
    3.16  
    3.17 @@ -385,14 +378,8 @@ asmlinkage void schedule(void)
    3.18      next->lastschd = now;
    3.19  
    3.20      /* reprogramm the timer */
    3.21 - timer_redo:
    3.22      schedule_data[this_cpu].s_timer.expires  = now + r_time;
    3.23 -    if (add_ac_timer(&schedule_data[this_cpu].s_timer) == 1) {
    3.24 -        printk("SCHED[%02d]: timeout already happened! r_time=%u\n",
    3.25 -               this_cpu, r_time);
    3.26 -        now = NOW();
    3.27 -        goto timer_redo;
    3.28 -    }
    3.29 +    add_ac_timer(&schedule_data[this_cpu].s_timer);
    3.30  
    3.31      spin_unlock_irq(&schedule_data[this_cpu].lock);
    3.32  
    3.33 @@ -450,7 +437,6 @@ static void virt_timer(unsigned long foo
    3.34      unsigned long cpu_mask = 0;
    3.35      struct task_struct *p;
    3.36      s_time_t now;
    3.37 -    int res;
    3.38  
    3.39      /* send virtual timer interrupt */
    3.40      read_lock(&tasklist_lock);
    3.41 @@ -463,12 +449,9 @@ static void virt_timer(unsigned long foo
    3.42      read_unlock(&tasklist_lock);
    3.43      guest_event_notify(cpu_mask);
    3.44  
    3.45 -    again:
    3.46      now = NOW();
    3.47      v_timer.expires  = now + MILLISECS(10);
    3.48 -    res=add_ac_timer(&v_timer);
    3.49 -    if (res==1)
    3.50 -        goto again;
    3.51 +    add_ac_timer(&v_timer);
    3.52  }
    3.53  
    3.54  /*
    3.55 @@ -488,12 +471,12 @@ void __init scheduler_init(void)
    3.56          schedule_data[i].curr = &idle0_task;
    3.57          
    3.58          /* a timer for each CPU  */
    3.59 -        init_ac_timer(&schedule_data[i].s_timer);
    3.60 +        init_ac_timer(&schedule_data[i].s_timer, i);
    3.61          schedule_data[i].s_timer.function = &sched_timer;
    3.62  
    3.63      }
    3.64      schedule_data[0].idle = &idle0_task; /* idle on CPU 0 is special */
    3.65 -    init_ac_timer(&v_timer);
    3.66 +    init_ac_timer(&v_timer, 0);
    3.67      v_timer.function = &virt_timer;
    3.68  }
    3.69  
     4.1 --- a/xen/common/softirq.c	Mon Mar 17 18:57:15 2003 +0000
     4.2 +++ b/xen/common/softirq.c	Wed Mar 19 18:09:57 2003 +0000
     4.3 @@ -48,44 +48,42 @@ static struct softirq_action softirq_vec
     4.4  
     4.5  asmlinkage void do_softirq()
     4.6  {
     4.7 -	int cpu = smp_processor_id();
     4.8 -	__u32 pending;
     4.9 -	long flags;
    4.10 -
    4.11 -	if (in_interrupt())
    4.12 -		return;
    4.13 -
    4.14 -	local_irq_save(flags);
    4.15 +    int cpu = smp_processor_id();
    4.16 +    struct softirq_action *h;
    4.17 +    __u32 pending;
    4.18 +    long flags;
    4.19  
    4.20 -	pending = softirq_pending(cpu);
    4.21 +    if (in_interrupt())
    4.22 +        return;
    4.23  
    4.24 -	while (pending) {
    4.25 -		struct softirq_action *h;
    4.26 +    local_irq_save(flags);
    4.27  
    4.28 -		local_bh_disable();
    4.29 -restart:
    4.30 -		/* Reset the pending bitmask before enabling irqs */
    4.31 -		softirq_pending(cpu) = 0;
    4.32 +    pending = xchg(&softirq_pending(cpu), 0);
    4.33 +    if ( !pending ) goto out;
    4.34 +
    4.35 +    local_bh_disable();
    4.36  
    4.37 -		local_irq_enable();
    4.38 -
    4.39 -		h = softirq_vec;
    4.40 +    do {
    4.41 +        local_irq_enable();
    4.42 +        
    4.43 +        h = softirq_vec;
    4.44 +        
    4.45 +        do {
    4.46 +            if (pending & 1)
    4.47 +                h->action(h);
    4.48 +            h++;
    4.49 +            pending >>= 1;
    4.50 +        } while (pending);
    4.51 +        
    4.52 +        local_irq_disable();
    4.53 +        
    4.54 +        pending = xchg(&softirq_pending(cpu), 0);
    4.55 +    } while ( pending );
    4.56  
    4.57 -		do {
    4.58 -			if (pending & 1)
    4.59 -				h->action(h);
    4.60 -			h++;
    4.61 -			pending >>= 1;
    4.62 -		} while (pending);
    4.63 +    __local_bh_enable();
    4.64  
    4.65 -		local_irq_disable();
    4.66 -
    4.67 -		pending = softirq_pending(cpu);
    4.68 -		if (pending) goto restart;
    4.69 -		__local_bh_enable();
    4.70 -	}
    4.71 -
    4.72 -	local_irq_restore(flags);
    4.73 +out:
    4.74 +    local_irq_restore(flags);
    4.75  }
    4.76  
    4.77  /*
    4.78 @@ -93,27 +91,27 @@ restart:
    4.79   */
    4.80  inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
    4.81  {
    4.82 -	__cpu_raise_softirq(cpu, nr);
    4.83 +    __cpu_raise_softirq(cpu, nr);
    4.84  
    4.85  #ifdef CONFIG_SMP
    4.86 -        if ( cpu != smp_processor_id() )
    4.87 -            smp_send_event_check_cpu(cpu);
    4.88 +    if ( cpu != smp_processor_id() )
    4.89 +        smp_send_event_check_cpu(cpu);
    4.90  #endif
    4.91  }
    4.92  
    4.93  void raise_softirq(unsigned int nr)
    4.94  {
    4.95 -	long flags;
    4.96 +    long flags;
    4.97  
    4.98 -	local_irq_save(flags);
    4.99 -	cpu_raise_softirq(smp_processor_id(), nr);
   4.100 -	local_irq_restore(flags);
   4.101 +    local_irq_save(flags);
   4.102 +    cpu_raise_softirq(smp_processor_id(), nr);
   4.103 +    local_irq_restore(flags);
   4.104  }
   4.105  
   4.106  void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
   4.107  {
   4.108 -	softirq_vec[nr].data = data;
   4.109 -	softirq_vec[nr].action = action;
   4.110 +    softirq_vec[nr].data = data;
   4.111 +    softirq_vec[nr].action = action;
   4.112  }
   4.113  
   4.114  
   4.115 @@ -124,119 +122,119 @@ struct tasklet_head tasklet_hi_vec[NR_CP
   4.116  
   4.117  void __tasklet_schedule(struct tasklet_struct *t)
   4.118  {
   4.119 -	int cpu = smp_processor_id();
   4.120 -	unsigned long flags;
   4.121 +    int cpu = smp_processor_id();
   4.122 +    unsigned long flags;
   4.123  
   4.124 -	local_irq_save(flags);
   4.125 -	t->next = tasklet_vec[cpu].list;
   4.126 -	tasklet_vec[cpu].list = t;
   4.127 -	cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
   4.128 -	local_irq_restore(flags);
   4.129 +    local_irq_save(flags);
   4.130 +    t->next = tasklet_vec[cpu].list;
   4.131 +    tasklet_vec[cpu].list = t;
   4.132 +    cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
   4.133 +    local_irq_restore(flags);
   4.134  }
   4.135  
   4.136  void __tasklet_hi_schedule(struct tasklet_struct *t)
   4.137  {
   4.138 -	int cpu = smp_processor_id();
   4.139 -	unsigned long flags;
   4.140 +    int cpu = smp_processor_id();
   4.141 +    unsigned long flags;
   4.142  
   4.143 -	local_irq_save(flags);
   4.144 -	t->next = tasklet_hi_vec[cpu].list;
   4.145 -	tasklet_hi_vec[cpu].list = t;
   4.146 -	cpu_raise_softirq(cpu, HI_SOFTIRQ);
   4.147 -	local_irq_restore(flags);
   4.148 +    local_irq_save(flags);
   4.149 +    t->next = tasklet_hi_vec[cpu].list;
   4.150 +    tasklet_hi_vec[cpu].list = t;
   4.151 +    cpu_raise_softirq(cpu, HI_SOFTIRQ);
   4.152 +    local_irq_restore(flags);
   4.153  }
   4.154  
   4.155  static void tasklet_action(struct softirq_action *a)
   4.156  {
   4.157 -	int cpu = smp_processor_id();
   4.158 -	struct tasklet_struct *list;
   4.159 +    int cpu = smp_processor_id();
   4.160 +    struct tasklet_struct *list;
   4.161  
   4.162 -	local_irq_disable();
   4.163 -	list = tasklet_vec[cpu].list;
   4.164 -	tasklet_vec[cpu].list = NULL;
   4.165 -	local_irq_enable();
   4.166 +    local_irq_disable();
   4.167 +    list = tasklet_vec[cpu].list;
   4.168 +    tasklet_vec[cpu].list = NULL;
   4.169 +    local_irq_enable();
   4.170  
   4.171 -	while (list) {
   4.172 -		struct tasklet_struct *t = list;
   4.173 +    while (list) {
   4.174 +        struct tasklet_struct *t = list;
   4.175  
   4.176 -		list = list->next;
   4.177 +        list = list->next;
   4.178  
   4.179 -		if (tasklet_trylock(t)) {
   4.180 -			if (!atomic_read(&t->count)) {
   4.181 -				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
   4.182 -					BUG();
   4.183 -				t->func(t->data);
   4.184 -			}
   4.185 -			tasklet_unlock(t);
   4.186 -			continue;
   4.187 -		}
   4.188 +        if (tasklet_trylock(t)) {
   4.189 +            if (!atomic_read(&t->count)) {
   4.190 +                if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
   4.191 +                    BUG();
   4.192 +                t->func(t->data);
   4.193 +            }
   4.194 +            tasklet_unlock(t);
   4.195 +            continue;
   4.196 +        }
   4.197  
   4.198 -		local_irq_disable();
   4.199 -		t->next = tasklet_vec[cpu].list;
   4.200 -		tasklet_vec[cpu].list = t;
   4.201 -		__cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
   4.202 -		local_irq_enable();
   4.203 -	}
   4.204 +        local_irq_disable();
   4.205 +        t->next = tasklet_vec[cpu].list;
   4.206 +        tasklet_vec[cpu].list = t;
   4.207 +        __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
   4.208 +        local_irq_enable();
   4.209 +    }
   4.210  }
   4.211  
   4.212  static void tasklet_hi_action(struct softirq_action *a)
   4.213  {
   4.214 -	int cpu = smp_processor_id();
   4.215 -	struct tasklet_struct *list;
   4.216 +    int cpu = smp_processor_id();
   4.217 +    struct tasklet_struct *list;
   4.218  
   4.219 -	local_irq_disable();
   4.220 -	list = tasklet_hi_vec[cpu].list;
   4.221 -	tasklet_hi_vec[cpu].list = NULL;
   4.222 -	local_irq_enable();
   4.223 +    local_irq_disable();
   4.224 +    list = tasklet_hi_vec[cpu].list;
   4.225 +    tasklet_hi_vec[cpu].list = NULL;
   4.226 +    local_irq_enable();
   4.227  
   4.228 -	while (list) {
   4.229 -		struct tasklet_struct *t = list;
   4.230 +    while (list) {
   4.231 +        struct tasklet_struct *t = list;
   4.232  
   4.233 -		list = list->next;
   4.234 +        list = list->next;
   4.235  
   4.236 -		if (tasklet_trylock(t)) {
   4.237 -			if (!atomic_read(&t->count)) {
   4.238 -				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
   4.239 -					BUG();
   4.240 -				t->func(t->data);
   4.241 -			}
   4.242 -			tasklet_unlock(t);
   4.243 -			continue;
   4.244 -		}
   4.245 +        if (tasklet_trylock(t)) {
   4.246 +            if (!atomic_read(&t->count)) {
   4.247 +                if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
   4.248 +                    BUG();
   4.249 +                t->func(t->data);
   4.250 +            }
   4.251 +            tasklet_unlock(t);
   4.252 +            continue;
   4.253 +        }
   4.254  
   4.255 -		local_irq_disable();
   4.256 -		t->next = tasklet_hi_vec[cpu].list;
   4.257 -		tasklet_hi_vec[cpu].list = t;
   4.258 -		__cpu_raise_softirq(cpu, HI_SOFTIRQ);
   4.259 -		local_irq_enable();
   4.260 -	}
   4.261 +        local_irq_disable();
   4.262 +        t->next = tasklet_hi_vec[cpu].list;
   4.263 +        tasklet_hi_vec[cpu].list = t;
   4.264 +        __cpu_raise_softirq(cpu, HI_SOFTIRQ);
   4.265 +        local_irq_enable();
   4.266 +    }
   4.267  }
   4.268  
   4.269  
   4.270  void tasklet_init(struct tasklet_struct *t,
   4.271  		  void (*func)(unsigned long), unsigned long data)
   4.272  {
   4.273 -	t->next = NULL;
   4.274 -	t->state = 0;
   4.275 -	atomic_set(&t->count, 0);
   4.276 -	t->func = func;
   4.277 -	t->data = data;
   4.278 +    t->next = NULL;
   4.279 +    t->state = 0;
   4.280 +    atomic_set(&t->count, 0);
   4.281 +    t->func = func;
   4.282 +    t->data = data;
   4.283  }
   4.284  
   4.285  void tasklet_kill(struct tasklet_struct *t)
   4.286  {
   4.287 -	if (in_interrupt())
   4.288 -		printk("Attempt to kill tasklet from interrupt\n");
   4.289 +    if (in_interrupt())
   4.290 +        printk("Attempt to kill tasklet from interrupt\n");
   4.291  
   4.292 -	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
   4.293 -		set_current_state(TASK_RUNNING);
   4.294 -		do {
   4.295 -			current->policy |= SCHED_YIELD;
   4.296 -			schedule();
   4.297 -		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
   4.298 -	}
   4.299 -	tasklet_unlock_wait(t);
   4.300 -	clear_bit(TASKLET_STATE_SCHED, &t->state);
   4.301 +    while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
   4.302 +        set_current_state(TASK_RUNNING);
   4.303 +        do {
   4.304 +            current->policy |= SCHED_YIELD;
   4.305 +            schedule();
   4.306 +        } while (test_bit(TASKLET_STATE_SCHED, &t->state));
   4.307 +    }
   4.308 +    tasklet_unlock_wait(t);
   4.309 +    clear_bit(TASKLET_STATE_SCHED, &t->state);
   4.310  }
   4.311  
   4.312  
   4.313 @@ -259,74 +257,74 @@ spinlock_t global_bh_lock = SPIN_LOCK_UN
   4.314  
   4.315  static void bh_action(unsigned long nr)
   4.316  {
   4.317 -	int cpu = smp_processor_id();
   4.318 +    int cpu = smp_processor_id();
   4.319  
   4.320 -	if (!spin_trylock(&global_bh_lock))
   4.321 -		goto resched;
   4.322 +    if (!spin_trylock(&global_bh_lock))
   4.323 +        goto resched;
   4.324  
   4.325 -	if (!hardirq_trylock(cpu))
   4.326 -		goto resched_unlock;
   4.327 +    if (!hardirq_trylock(cpu))
   4.328 +        goto resched_unlock;
   4.329  
   4.330 -	if (bh_base[nr])
   4.331 -		bh_base[nr]();
   4.332 +    if (bh_base[nr])
   4.333 +        bh_base[nr]();
   4.334  
   4.335 -	hardirq_endlock(cpu);
   4.336 -	spin_unlock(&global_bh_lock);
   4.337 -	return;
   4.338 +    hardirq_endlock(cpu);
   4.339 +    spin_unlock(&global_bh_lock);
   4.340 +    return;
   4.341  
   4.342 -resched_unlock:
   4.343 -	spin_unlock(&global_bh_lock);
   4.344 -resched:
   4.345 -	mark_bh(nr);
   4.346 + resched_unlock:
   4.347 +    spin_unlock(&global_bh_lock);
   4.348 + resched:
   4.349 +    mark_bh(nr);
   4.350  }
   4.351  
   4.352  void init_bh(int nr, void (*routine)(void))
   4.353  {
   4.354 -	bh_base[nr] = routine;
   4.355 -	mb();
   4.356 +    bh_base[nr] = routine;
   4.357 +    mb();
   4.358  }
   4.359  
   4.360  void remove_bh(int nr)
   4.361  {
   4.362 -	tasklet_kill(bh_task_vec+nr);
   4.363 -	bh_base[nr] = NULL;
   4.364 +    tasklet_kill(bh_task_vec+nr);
   4.365 +    bh_base[nr] = NULL;
   4.366  }
   4.367  
   4.368  void __init softirq_init()
   4.369  {
   4.370 -	int i;
   4.371 +    int i;
   4.372  
   4.373 -	for (i=0; i<32; i++)
   4.374 -		tasklet_init(bh_task_vec+i, bh_action, i);
   4.375 +    for (i=0; i<32; i++)
   4.376 +        tasklet_init(bh_task_vec+i, bh_action, i);
   4.377  
   4.378 -	open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
   4.379 -	open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
   4.380 +    open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
   4.381 +    open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
   4.382  }
   4.383  
   4.384  void __run_task_queue(task_queue *list)
   4.385  {
   4.386 -	struct list_head head, *next;
   4.387 -	unsigned long flags;
   4.388 +    struct list_head head, *next;
   4.389 +    unsigned long flags;
   4.390  
   4.391 -	spin_lock_irqsave(&tqueue_lock, flags);
   4.392 -	list_add(&head, list);
   4.393 -	list_del_init(list);
   4.394 -	spin_unlock_irqrestore(&tqueue_lock, flags);
   4.395 +    spin_lock_irqsave(&tqueue_lock, flags);
   4.396 +    list_add(&head, list);
   4.397 +    list_del_init(list);
   4.398 +    spin_unlock_irqrestore(&tqueue_lock, flags);
   4.399  
   4.400 -	next = head.next;
   4.401 -	while (next != &head) {
   4.402 -		void (*f) (void *);
   4.403 -		struct tq_struct *p;
   4.404 -		void *data;
   4.405 +    next = head.next;
   4.406 +    while (next != &head) {
   4.407 +        void (*f) (void *);
   4.408 +        struct tq_struct *p;
   4.409 +        void *data;
   4.410  
   4.411 -		p = list_entry(next, struct tq_struct, list);
   4.412 -		next = next->next;
   4.413 -		f = p->routine;
   4.414 -		data = p->data;
   4.415 -		wmb();
   4.416 -		p->sync = 0;
   4.417 -		if (f)
   4.418 -			f(data);
   4.419 -	}
   4.420 +        p = list_entry(next, struct tq_struct, list);
   4.421 +        next = next->next;
   4.422 +        f = p->routine;
   4.423 +        data = p->data;
   4.424 +        wmb();
   4.425 +        p->sync = 0;
   4.426 +        if (f)
   4.427 +            f(data);
   4.428 +    }
   4.429  }
   4.430  
     5.1 --- a/xen/include/xeno/ac_timer.h	Mon Mar 17 18:57:15 2003 +0000
     5.2 +++ b/xen/include/xeno/ac_timer.h	Wed Mar 19 18:09:57 2003 +0000
     5.3 @@ -47,16 +47,23 @@ struct ac_timer {
     5.4      s_time_t         expires;   /* system time time out value */
     5.5      unsigned long    data;
     5.6      void             (*function)(unsigned long);
     5.7 +    int              cpu;
     5.8  };
     5.9  
    5.10  /* interface for "clients" */
    5.11 -extern int add_ac_timer(struct ac_timer *timer);
    5.12 -extern int rem_ac_timer(struct ac_timer *timer);
    5.13 -extern int mod_ac_timer(struct ac_timer *timer, s_time_t new_time);
    5.14 -static inline void init_ac_timer(struct ac_timer *timer)
    5.15 +extern void add_ac_timer(struct ac_timer *timer);
    5.16 +extern void rem_ac_timer(struct ac_timer *timer);
    5.17 +extern void mod_ac_timer(struct ac_timer *timer, s_time_t new_time);
    5.18 +static __inline__ void init_ac_timer(struct ac_timer *timer, int cpu)
    5.19  {
    5.20 +    timer->cpu = cpu;
    5.21      timer->timer_list.next = NULL;
    5.22  }
    5.23 +/* check if ac_timer is active, i.e., on the list */
    5.24 +static __inline__ int active_ac_timer(struct ac_timer *timer)
    5.25 +{
    5.26 +    return (timer->timer_list.next != NULL);
    5.27 +}
    5.28  
    5.29  /* interface used by programmable timer, implemented hardware dependent */
    5.30  extern int  reprogram_ac_timer(s_time_t timeout);
     6.1 --- a/xen/include/xeno/interrupt.h	Mon Mar 17 18:57:15 2003 +0000
     6.2 +++ b/xen/include/xeno/interrupt.h	Wed Mar 19 18:09:57 2003 +0000
     6.3 @@ -3,7 +3,7 @@
     6.4  #define _LINUX_INTERRUPT_H
     6.5  
     6.6  #include <linux/config.h>
     6.7 -//#include <linux/kernel.h>
     6.8 +#include <linux/lib.h>
     6.9  #include <linux/smp.h>
    6.10  #include <linux/cache.h>
    6.11  
    6.12 @@ -45,6 +45,7 @@ enum {
    6.13  enum
    6.14  {
    6.15  	HI_SOFTIRQ=0,
    6.16 +	AC_TIMER_SOFTIRQ,
    6.17  	TASKLET_SOFTIRQ
    6.18  };
    6.19  
    6.20 @@ -61,7 +62,7 @@ struct softirq_action
    6.21  asmlinkage void do_softirq(void);
    6.22  extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
    6.23  extern void softirq_init(void);
    6.24 -#define __cpu_raise_softirq(cpu, nr) do { softirq_pending(cpu) |= 1UL << (nr); } while (0)
    6.25 +#define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
    6.26  extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
    6.27  extern void FASTCALL(raise_softirq(unsigned int nr));
    6.28