ia64/xen-unstable

view xen/common/softirq.c @ 945:db2e1ea917df

bitkeeper revision 1.596.1.3 (3fb3b41eWUoRU0H8A0jEX5roXjxKkA)

Many files:
Greatly simplified Xen softirqs. They are now only executed in outermost Xen activation; they are never called within an irq context.
author kaf24@scramble.cl.cam.ac.uk
date Thu Nov 13 16:41:02 2003 +0000 (2003-11-13)
parents d666e315c859
children 7a554cbf0f58
line source
1 /*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
6 * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
7 * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli
8 *
9 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 */
12 #include <linux/config.h>
13 #include <linux/mm.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/tqueue.h>
19 /*
20 - No shared variables, all the data are CPU local.
21 - If a softirq needs serialization, let it serialize itself
22 by its own spinlocks.
23 - Even if softirq is serialized, only local cpu is marked for
24 execution. Hence, we get something sort of weak cpu binding.
25 Though it is still not clear, will it result in better locality
26 or will not.
27 - These softirqs are not masked by global cli() and start_bh_atomic()
28 (by clear reasons). Hence, old parts of code still using global locks
29 MUST NOT use softirqs, but insert interfacing routines acquiring
30 global locks. F.e. look at BHs implementation.
32 Examples:
33 - NET RX softirq. It is multithreaded and does not require
34 any global serialization.
35 - NET TX softirq. It kicks software netdevice queues, hence
36 it is logically serialized per device, but this serialization
37 is invisible to common code.
38 - Tasklets: serialized wrt itself.
39 - Bottom halves: globally serialized, grr...
40 */
42 irq_cpustat_t irq_stat[NR_CPUS];
44 static struct softirq_action softirq_vec[32] __cacheline_aligned;
47 asmlinkage void do_softirq()
48 {
49 int cpu = smp_processor_id();
50 struct softirq_action *h;
51 __u32 pending;
53 if ( in_interrupt() )
54 BUG();
56 local_bh_disable();
58 while ( (pending = xchg(&softirq_pending(cpu), 0)) != 0 )
59 {
60 h = softirq_vec;
61 while ( pending )
62 {
63 if (pending & 1)
64 h->action(h);
65 h++;
66 pending >>= 1;
67 }
68 }
70 __local_bh_enable();
71 }
73 inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
74 {
75 __cpu_raise_softirq(cpu, nr);
76 #ifdef CONFIG_SMP
77 if ( cpu != smp_processor_id() )
78 smp_send_event_check_cpu(cpu);
79 #endif
80 }
82 void raise_softirq(unsigned int nr)
83 {
84 cpu_raise_softirq(smp_processor_id(), nr);
85 }
87 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
88 {
89 softirq_vec[nr].data = data;
90 softirq_vec[nr].action = action;
91 }
94 /* Tasklets */
96 struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
97 struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned;
99 void __tasklet_schedule(struct tasklet_struct *t)
100 {
101 int cpu = smp_processor_id();
102 unsigned long flags;
104 local_irq_save(flags);
105 t->next = tasklet_vec[cpu].list;
106 tasklet_vec[cpu].list = t;
107 cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
108 local_irq_restore(flags);
109 }
111 void __tasklet_hi_schedule(struct tasklet_struct *t)
112 {
113 int cpu = smp_processor_id();
114 unsigned long flags;
116 local_irq_save(flags);
117 t->next = tasklet_hi_vec[cpu].list;
118 tasklet_hi_vec[cpu].list = t;
119 cpu_raise_softirq(cpu, HI_SOFTIRQ);
120 local_irq_restore(flags);
121 }
123 static void tasklet_action(struct softirq_action *a)
124 {
125 int cpu = smp_processor_id();
126 struct tasklet_struct *list;
128 local_irq_disable();
129 list = tasklet_vec[cpu].list;
130 tasklet_vec[cpu].list = NULL;
131 local_irq_enable();
133 while (list) {
134 struct tasklet_struct *t = list;
136 list = list->next;
138 if (tasklet_trylock(t)) {
139 if (!atomic_read(&t->count)) {
140 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
141 BUG();
142 t->func(t->data);
143 }
144 tasklet_unlock(t);
145 continue;
146 }
148 local_irq_disable();
149 t->next = tasklet_vec[cpu].list;
150 tasklet_vec[cpu].list = t;
151 __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
152 local_irq_enable();
153 }
154 }
156 static void tasklet_hi_action(struct softirq_action *a)
157 {
158 int cpu = smp_processor_id();
159 struct tasklet_struct *list;
161 local_irq_disable();
162 list = tasklet_hi_vec[cpu].list;
163 tasklet_hi_vec[cpu].list = NULL;
164 local_irq_enable();
166 while (list) {
167 struct tasklet_struct *t = list;
169 list = list->next;
171 if (tasklet_trylock(t)) {
172 if (!atomic_read(&t->count)) {
173 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
174 BUG();
175 t->func(t->data);
176 }
177 tasklet_unlock(t);
178 continue;
179 }
181 local_irq_disable();
182 t->next = tasklet_hi_vec[cpu].list;
183 tasklet_hi_vec[cpu].list = t;
184 __cpu_raise_softirq(cpu, HI_SOFTIRQ);
185 local_irq_enable();
186 }
187 }
190 void tasklet_init(struct tasklet_struct *t,
191 void (*func)(unsigned long), unsigned long data)
192 {
193 t->next = NULL;
194 t->state = 0;
195 atomic_set(&t->count, 0);
196 t->func = func;
197 t->data = data;
198 }
200 void tasklet_kill(struct tasklet_struct *t)
201 {
202 if (in_interrupt())
203 BUG();
204 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
205 while (test_bit(TASKLET_STATE_SCHED, &t->state))
206 do_softirq();
207 tasklet_unlock_wait(t);
208 clear_bit(TASKLET_STATE_SCHED, &t->state);
209 }
213 /* Old style BHs */
215 static void (*bh_base[32])(void);
216 struct tasklet_struct bh_task_vec[32];
218 /* BHs are serialized by spinlock global_bh_lock.
220 It is still possible to make synchronize_bh() as
221 spin_unlock_wait(&global_bh_lock). This operation is not used
222 by kernel now, so that this lock is not made private only
223 due to wait_on_irq().
225 It can be removed only after auditing all the BHs.
226 */
227 spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
229 static void bh_action(unsigned long nr)
230 {
231 int cpu = smp_processor_id();
233 if (!spin_trylock(&global_bh_lock))
234 goto resched;
236 if (!hardirq_trylock(cpu))
237 goto resched_unlock;
239 if (bh_base[nr])
240 bh_base[nr]();
242 hardirq_endlock(cpu);
243 spin_unlock(&global_bh_lock);
244 return;
246 resched_unlock:
247 spin_unlock(&global_bh_lock);
248 resched:
249 mark_bh(nr);
250 }
252 void init_bh(int nr, void (*routine)(void))
253 {
254 bh_base[nr] = routine;
255 mb();
256 }
258 void remove_bh(int nr)
259 {
260 tasklet_kill(bh_task_vec+nr);
261 bh_base[nr] = NULL;
262 }
264 void __init softirq_init()
265 {
266 int i;
268 for (i=0; i<32; i++)
269 tasklet_init(bh_task_vec+i, bh_action, i);
271 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
272 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
273 }
275 void __run_task_queue(task_queue *list)
276 {
277 struct list_head head, *next;
278 unsigned long flags;
280 spin_lock_irqsave(&tqueue_lock, flags);
281 list_add(&head, list);
282 list_del_init(list);
283 spin_unlock_irqrestore(&tqueue_lock, flags);
285 next = head.next;
286 while (next != &head) {
287 void (*f) (void *);
288 struct tq_struct *p;
289 void *data;
291 p = list_entry(next, struct tq_struct, list);
292 next = next->next;
293 f = p->routine;
294 data = p->data;
295 wmb();
296 p->sync = 0;
297 if (f)
298 f(data);
299 }
300 }