ia64/xen-unstable

view xen/include/asm-ia64/linux-xen/linux/interrupt.h @ 6457:d34925e4144b

Stil more cleanup and moving to 2.6.13 base
author djm@kirby.fc.hp.com
date Thu Sep 01 11:09:27 2005 -0600 (2005-09-01)
parents 9312a3e8a6f8
children b2f4823b6ff0
line source
1 /* interrupt.h */
2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/linkage.h>
8 #include <linux/bitops.h>
9 #include <linux/preempt.h>
10 #include <linux/cpumask.h>
11 #include <linux/hardirq.h>
12 #include <asm/atomic.h>
13 #include <asm/ptrace.h>
14 #include <asm/system.h>
16 /*
17 * For 2.4.x compatibility, 2.4.x can use
18 *
19 * typedef void irqreturn_t;
20 * #define IRQ_NONE
21 * #define IRQ_HANDLED
22 * #define IRQ_RETVAL(x)
23 *
24 * To mix old-style and new-style irq handler returns.
25 *
26 * IRQ_NONE means we didn't handle it.
27 * IRQ_HANDLED means that we did have a valid interrupt and handled it.
28 * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
29 */
30 typedef int irqreturn_t;
32 #define IRQ_NONE (0)
33 #define IRQ_HANDLED (1)
34 #define IRQ_RETVAL(x) ((x) != 0)
36 #ifndef XEN
37 struct irqaction {
38 irqreturn_t (*handler)(int, void *, struct pt_regs *);
39 unsigned long flags;
40 cpumask_t mask;
41 const char *name;
42 void *dev_id;
43 struct irqaction *next;
44 int irq;
45 struct proc_dir_entry *dir;
46 };
48 extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
49 extern int request_irq(unsigned int,
50 irqreturn_t (*handler)(int, void *, struct pt_regs *),
51 unsigned long, const char *, void *);
52 extern void free_irq(unsigned int, void *);
53 #endif
56 #ifdef CONFIG_GENERIC_HARDIRQS
57 extern void disable_irq_nosync(unsigned int irq);
58 extern void disable_irq(unsigned int irq);
59 extern void enable_irq(unsigned int irq);
60 #endif
62 /*
63 * Temporary defines for UP kernels, until all code gets fixed.
64 */
65 #ifndef CONFIG_SMP
66 static inline void __deprecated cli(void)
67 {
68 local_irq_disable();
69 }
70 static inline void __deprecated sti(void)
71 {
72 local_irq_enable();
73 }
74 static inline void __deprecated save_flags(unsigned long *x)
75 {
76 local_save_flags(*x);
77 }
78 #define save_flags(x) save_flags(&x);
79 static inline void __deprecated restore_flags(unsigned long x)
80 {
81 local_irq_restore(x);
82 }
84 static inline void __deprecated save_and_cli(unsigned long *x)
85 {
86 local_irq_save(*x);
87 }
88 #define save_and_cli(x) save_and_cli(&x)
89 #endif /* CONFIG_SMP */
91 /* SoftIRQ primitives. */
92 #define local_bh_disable() \
93 do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
94 #define __local_bh_enable() \
95 do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
97 extern void local_bh_enable(void);
99 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
100 frequency threaded job scheduling. For almost all the purposes
101 tasklets are more than enough. F.e. all serial device BHs et
102 al. should be converted to tasklets, not to softirqs.
103 */
105 enum
106 {
107 HI_SOFTIRQ=0,
108 TIMER_SOFTIRQ,
109 NET_TX_SOFTIRQ,
110 NET_RX_SOFTIRQ,
111 SCSI_SOFTIRQ,
112 TASKLET_SOFTIRQ
113 };
115 /* softirq mask and active fields moved to irq_cpustat_t in
116 * asm/hardirq.h to get better cache usage. KAO
117 */
119 struct softirq_action
120 {
121 void (*action)(struct softirq_action *);
122 void *data;
123 };
125 asmlinkage void do_softirq(void);
126 #ifndef XEN
127 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
128 #endif
129 extern void softirq_init(void);
130 #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
131 extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
132 extern void FASTCALL(raise_softirq(unsigned int nr));
135 /* Tasklets --- multithreaded analogue of BHs.
137 Main feature differing them of generic softirqs: tasklet
138 is running only on one CPU simultaneously.
140 Main feature differing them of BHs: different tasklets
141 may be run simultaneously on different CPUs.
143 Properties:
144 * If tasklet_schedule() is called, then tasklet is guaranteed
145 to be executed on some cpu at least once after this.
146 * If the tasklet is already scheduled, but its excecution is still not
147 started, it will be executed only once.
148 * If this tasklet is already running on another CPU (or schedule is called
149 from tasklet itself), it is rescheduled for later.
150 * Tasklet is strictly serialized wrt itself, but not
151 wrt another tasklets. If client needs some intertask synchronization,
152 he makes it with spinlocks.
153 */
155 struct tasklet_struct
156 {
157 struct tasklet_struct *next;
158 unsigned long state;
159 atomic_t count;
160 void (*func)(unsigned long);
161 unsigned long data;
162 };
164 #define DECLARE_TASKLET(name, func, data) \
165 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
167 #define DECLARE_TASKLET_DISABLED(name, func, data) \
168 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
171 enum
172 {
173 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
174 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
175 };
177 #ifdef CONFIG_SMP
178 static inline int tasklet_trylock(struct tasklet_struct *t)
179 {
180 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
181 }
183 static inline void tasklet_unlock(struct tasklet_struct *t)
184 {
185 smp_mb__before_clear_bit();
186 clear_bit(TASKLET_STATE_RUN, &(t)->state);
187 }
189 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
190 {
191 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
192 }
193 #else
194 #define tasklet_trylock(t) 1
195 #define tasklet_unlock_wait(t) do { } while (0)
196 #define tasklet_unlock(t) do { } while (0)
197 #endif
199 extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
201 static inline void tasklet_schedule(struct tasklet_struct *t)
202 {
203 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
204 __tasklet_schedule(t);
205 }
207 extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
209 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
210 {
211 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
212 __tasklet_hi_schedule(t);
213 }
216 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
217 {
218 atomic_inc(&t->count);
219 smp_mb__after_atomic_inc();
220 }
222 static inline void tasklet_disable(struct tasklet_struct *t)
223 {
224 tasklet_disable_nosync(t);
225 tasklet_unlock_wait(t);
226 smp_mb();
227 }
229 static inline void tasklet_enable(struct tasklet_struct *t)
230 {
231 smp_mb__before_atomic_dec();
232 atomic_dec(&t->count);
233 }
235 static inline void tasklet_hi_enable(struct tasklet_struct *t)
236 {
237 smp_mb__before_atomic_dec();
238 atomic_dec(&t->count);
239 }
241 extern void tasklet_kill(struct tasklet_struct *t);
242 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
243 extern void tasklet_init(struct tasklet_struct *t,
244 void (*func)(unsigned long), unsigned long data);
246 /*
247 * Autoprobing for irqs:
248 *
249 * probe_irq_on() and probe_irq_off() provide robust primitives
250 * for accurate IRQ probing during kernel initialization. They are
251 * reasonably simple to use, are not "fooled" by spurious interrupts,
252 * and, unlike other attempts at IRQ probing, they do not get hung on
253 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
254 *
255 * For reasonably foolproof probing, use them as follows:
256 *
257 * 1. clear and/or mask the device's internal interrupt.
258 * 2. sti();
259 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
260 * 4. enable the device and cause it to trigger an interrupt.
261 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
262 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
263 * 7. service the device to clear its pending interrupt.
264 * 8. loop again if paranoia is required.
265 *
266 * probe_irq_on() returns a mask of allocated irq's.
267 *
268 * probe_irq_off() takes the mask as a parameter,
269 * and returns the irq number which occurred,
270 * or zero if none occurred, or a negative irq number
271 * if more than one irq occurred.
272 */
274 #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
275 static inline unsigned long probe_irq_on(void)
276 {
277 return 0;
278 }
279 static inline int probe_irq_off(unsigned long val)
280 {
281 return 0;
282 }
283 static inline unsigned int probe_irq_mask(unsigned long val)
284 {
285 return 0;
286 }
287 #else
288 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
289 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
290 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
291 #endif
293 #endif