ia64/xen-unstable

changeset 982:433e0c504cbe

bitkeeper revision 1.635 (3fc28bc1lCtZ5EmJM-IipOV5JESnng)

softirq.h, softirq.c:
Cleanups.
author kaf24@scramble.cl.cam.ac.uk
date Mon Nov 24 22:52:49 2003 +0000 (2003-11-24)
parents 88ef6048499a
children c6cfb98dc402
files xen/common/softirq.c xen/include/asm-i386/softirq.h
line diff
     1.1 --- a/xen/common/softirq.c	Mon Nov 24 20:18:29 2003 +0000
     1.2 +++ b/xen/common/softirq.c	Mon Nov 24 22:52:49 2003 +0000
     1.3 @@ -1,73 +1,53 @@
     1.4 -/*
     1.5 - *	linux/kernel/softirq.c
     1.6 - *
     1.7 - *	Copyright (C) 1992 Linus Torvalds
     1.8 - *
     1.9 - * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
    1.10 - * due bh_mask_count not atomic handling. Copyright (C) 1998  Andrea Arcangeli
    1.11 - *
    1.12 - * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
    1.13 +/******************************************************************************
    1.14 + * common/softirq.c
    1.15 + * 
    1.16 + * Modified from the Linux original. Softirqs in Xen are only executed in
    1.17 + * an outermost activation (e.g., never within an interrupt activation).
    1.18 + * This simplifies some things and generally seems a good thing.
    1.19 + * 
    1.20 + * Copyright (c) 2003, K A Fraser
    1.21 + * 
    1.22 + * Copyright (C) 1992 Linus Torvalds
    1.23   */
    1.24  
    1.25 -#include <linux/config.h>
    1.26 -#include <linux/mm.h>
    1.27 -#include <linux/sched.h>
    1.28 -#include <linux/interrupt.h>
    1.29 -#include <linux/init.h>
    1.30 -#include <linux/tqueue.h>
    1.31 -
    1.32 -/*
    1.33 -   - No shared variables, all the data are CPU local.
    1.34 -   - If a softirq needs serialization, let it serialize itself
    1.35 -     by its own spinlocks.
    1.36 -   - Even if softirq is serialized, only local cpu is marked for
    1.37 -     execution. Hence, we get something sort of weak cpu binding.
    1.38 -     Though it is still not clear, will it result in better locality
    1.39 -     or will not.
    1.40 -   - These softirqs are not masked by global cli() and start_bh_atomic()
    1.41 -     (by clear reasons). Hence, old parts of code still using global locks
    1.42 -     MUST NOT use softirqs, but insert interfacing routines acquiring
    1.43 -     global locks. F.e. look at BHs implementation.
    1.44 -
    1.45 -   Examples:
    1.46 -   - NET RX softirq. It is multithreaded and does not require
    1.47 -     any global serialization.
    1.48 -   - NET TX softirq. It kicks software netdevice queues, hence
    1.49 -     it is logically serialized per device, but this serialization
    1.50 -     is invisible to common code.
    1.51 -   - Tasklets: serialized wrt itself.
    1.52 -   - Bottom halves: globally serialized, grr...
    1.53 - */
    1.54 +#include <xeno/config.h>
    1.55 +#include <xeno/mm.h>
    1.56 +#include <xeno/sched.h>
    1.57 +#include <xeno/interrupt.h>
    1.58 +#include <xeno/init.h>
    1.59 +#include <xeno/tqueue.h>
    1.60  
    1.61  irq_cpustat_t irq_stat[NR_CPUS];
    1.62  
    1.63  static struct softirq_action softirq_vec[32] __cacheline_aligned;
    1.64  
    1.65 -
    1.66  asmlinkage void do_softirq()
    1.67  {
    1.68 -    int cpu = smp_processor_id();
    1.69 +    unsigned int pending, cpu = smp_processor_id();
    1.70      struct softirq_action *h;
    1.71 -    __u32 pending;
    1.72  
    1.73 -    if ( in_interrupt() )
    1.74 +    if ( unlikely(in_interrupt()) )
    1.75          BUG();
    1.76  
    1.77 -    local_bh_disable();
    1.78 +    /*
    1.79 +     * XEN: This isn't real mutual-exclusion: it just ensures that in_softirq()
    1.80 +     * and in_interrupt() are both TRUE, allowing checks for erroneous reentry.
    1.81 +     */
    1.82 +    cpu_bh_disable(cpu);
    1.83  
    1.84      while ( (pending = xchg(&softirq_pending(cpu), 0)) != 0 )
    1.85      {
    1.86          h = softirq_vec;
    1.87          while ( pending )
    1.88          {
    1.89 -            if (pending & 1)
    1.90 +            if ( pending & 1 )
    1.91                  h->action(h);
    1.92              h++;
    1.93              pending >>= 1;
    1.94          }
    1.95      }
    1.96  
    1.97 -    __local_bh_enable();
    1.98 +    cpu_bh_enable(cpu);
    1.99  }
   1.100  
   1.101  inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
   1.102 @@ -130,14 +110,18 @@ static void tasklet_action(struct softir
   1.103      tasklet_vec[cpu].list = NULL;
   1.104      local_irq_enable();
   1.105  
   1.106 -    while (list) {
   1.107 +    while ( list != NULL )
   1.108 +    {
   1.109          struct tasklet_struct *t = list;
   1.110  
   1.111          list = list->next;
   1.112  
   1.113 -        if (tasklet_trylock(t)) {
   1.114 -            if (!atomic_read(&t->count)) {
   1.115 -                if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
   1.116 +        if ( likely(tasklet_trylock(t)) )
   1.117 +        {
   1.118 +            if ( likely(!atomic_read(&t->count)) )
   1.119 +            {
   1.120 +                if ( unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, 
   1.121 +                                                  &t->state)) )
   1.122                      BUG();
   1.123                  t->func(t->data);
   1.124              }
   1.125 @@ -163,14 +147,18 @@ static void tasklet_hi_action(struct sof
   1.126      tasklet_hi_vec[cpu].list = NULL;
   1.127      local_irq_enable();
   1.128  
   1.129 -    while (list) {
   1.130 +    while ( list != NULL )
   1.131 +    {
   1.132          struct tasklet_struct *t = list;
   1.133  
   1.134          list = list->next;
   1.135  
   1.136 -        if (tasklet_trylock(t)) {
   1.137 -            if (!atomic_read(&t->count)) {
   1.138 -                if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
   1.139 +        if ( likely(tasklet_trylock(t)) )
   1.140 +        {
   1.141 +            if ( likely(!atomic_read(&t->count)) )
   1.142 +            {
   1.143 +                if ( unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, 
   1.144 +                                                  &t->state)) )
   1.145                      BUG();
   1.146                  t->func(t->data);
   1.147              }
   1.148 @@ -199,10 +187,10 @@ void tasklet_init(struct tasklet_struct 
   1.149  
   1.150  void tasklet_kill(struct tasklet_struct *t)
   1.151  {
   1.152 -    if (in_interrupt())
   1.153 +    if ( in_interrupt() )
   1.154          BUG();
   1.155 -    while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
   1.156 -        while (test_bit(TASKLET_STATE_SCHED, &t->state))
   1.157 +    while ( test_and_set_bit(TASKLET_STATE_SCHED, &t->state) )
   1.158 +        while ( test_bit(TASKLET_STATE_SCHED, &t->state) )
   1.159              do_softirq();
   1.160      tasklet_unlock_wait(t);
   1.161      clear_bit(TASKLET_STATE_SCHED, &t->state);
   1.162 @@ -215,28 +203,19 @@ void tasklet_kill(struct tasklet_struct 
   1.163  static void (*bh_base[32])(void);
   1.164  struct tasklet_struct bh_task_vec[32];
   1.165  
   1.166 -/* BHs are serialized by spinlock global_bh_lock.
   1.167 -
   1.168 -   It is still possible to make synchronize_bh() as
   1.169 -   spin_unlock_wait(&global_bh_lock). This operation is not used
   1.170 -   by kernel now, so that this lock is not made private only
   1.171 -   due to wait_on_irq().
   1.172 -
   1.173 -   It can be removed only after auditing all the BHs.
   1.174 - */
   1.175  spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
   1.176  
   1.177  static void bh_action(unsigned long nr)
   1.178  {
   1.179      int cpu = smp_processor_id();
   1.180  
   1.181 -    if (!spin_trylock(&global_bh_lock))
   1.182 +    if ( !spin_trylock(&global_bh_lock) )
   1.183          goto resched;
   1.184  
   1.185 -    if (!hardirq_trylock(cpu))
   1.186 +    if ( !hardirq_trylock(cpu) )
   1.187          goto resched_unlock;
   1.188  
   1.189 -    if (bh_base[nr])
   1.190 +    if ( likely(bh_base[nr] != NULL) )
   1.191          bh_base[nr]();
   1.192  
   1.193      hardirq_endlock(cpu);
   1.194 @@ -265,7 +244,7 @@ void __init softirq_init()
   1.195  {
   1.196      int i;
   1.197  
   1.198 -    for (i=0; i<32; i++)
   1.199 +    for ( i = 0; i < 32; i++)
   1.200          tasklet_init(bh_task_vec+i, bh_action, i);
   1.201  
   1.202      open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
   1.203 @@ -274,8 +253,11 @@ void __init softirq_init()
   1.204  
   1.205  void __run_task_queue(task_queue *list)
   1.206  {
   1.207 -    struct list_head head, *next;
   1.208 -    unsigned long flags;
   1.209 +    struct list_head  head, *next;
   1.210 +    unsigned long     flags;
   1.211 +    void              (*f) (void *);
   1.212 +    struct tq_struct *p;
   1.213 +    void             *data;
   1.214  
   1.215      spin_lock_irqsave(&tqueue_lock, flags);
   1.216      list_add(&head, list);
   1.217 @@ -283,18 +265,15 @@ void __run_task_queue(task_queue *list)
   1.218      spin_unlock_irqrestore(&tqueue_lock, flags);
   1.219  
   1.220      next = head.next;
   1.221 -    while (next != &head) {
   1.222 -        void (*f) (void *);
   1.223 -        struct tq_struct *p;
   1.224 -        void *data;
   1.225 -
   1.226 +    while ( next != &head )
   1.227 +    {
   1.228          p = list_entry(next, struct tq_struct, list);
   1.229          next = next->next;
   1.230          f = p->routine;
   1.231          data = p->data;
   1.232          wmb();
   1.233          p->sync = 0;
   1.234 -        if (f)
   1.235 +        if ( likely(f != NULL) )
   1.236              f(data);
   1.237      }
   1.238  }
     2.1 --- a/xen/include/asm-i386/softirq.h	Mon Nov 24 20:18:29 2003 +0000
     2.2 +++ b/xen/include/asm-i386/softirq.h	Mon Nov 24 22:52:49 2003 +0000
     2.3 @@ -4,14 +4,13 @@
     2.4  #include <asm/atomic.h>
     2.5  #include <asm/hardirq.h>
     2.6  
     2.7 -#define __cpu_bh_enable(cpu) \
     2.8 +#define cpu_bh_enable(cpu) \
     2.9  		do { barrier(); local_bh_count(cpu)--; } while (0)
    2.10  #define cpu_bh_disable(cpu) \
    2.11  		do { local_bh_count(cpu)++; barrier(); } while (0)
    2.12  
    2.13  #define local_bh_disable()  cpu_bh_disable(smp_processor_id())
    2.14 -#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
    2.15 -#define local_bh_enable()   __local_bh_enable()
    2.16 +#define local_bh_enable()   cpu_bh_enable(smp_processor_id())
    2.17  
    2.18  #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
    2.19