ia64/xen-unstable

view linux-2.4.26-xen-sparse/arch/xen/kernel/evtchn.c @ 1774:131c48baa117

bitkeeper revision 1.1071.1.5 (40f41ae00utn5d2f3tlNLcvG_QhiBA)

Fairly major fixes to the network frontend driver.
Much saner now.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jul 13 17:24:48 2004 +0000 (2004-07-13)
parents f3123052268f
children
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 */
9 #include <linux/config.h>
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/kernel_stat.h>
14 #include <asm/atomic.h>
15 #include <asm/system.h>
16 #include <asm/ptrace.h>
17 #include <asm/synch_bitops.h>
18 #include <asm/ctrl_if.h>
19 #include <asm/hypervisor.h>
20 #include <asm/hypervisor-ifs/event_channel.h>
21 #include <asm/hypervisor-ifs/physdev.h>
23 /*
24 * This lock protects updates to the following mapping and reference-count
25 * arrays. The lock does not need to be acquired to read the mapping tables.
26 */
27 static spinlock_t irq_mapping_update_lock;
29 /* IRQ <-> event-channel mappings. */
30 static int evtchn_to_irq[NR_EVENT_CHANNELS];
31 static int irq_to_evtchn[NR_IRQS];
33 /* IRQ <-> VIRQ mapping. */
34 static int virq_to_irq[NR_VIRQS];
36 /* Reference counts for bindings to IRQs. */
37 static int irq_bindcount[NR_IRQS];
39 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
40 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
42 /* Upcall to generic IRQ layer. */
43 extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
45 #define VALID_EVTCHN(_chn) ((_chn) != -1)
47 void evtchn_do_upcall(struct pt_regs *regs)
48 {
49 unsigned long l1, l2;
50 unsigned int l1i, l2i, port;
51 int irq;
52 unsigned long flags;
53 shared_info_t *s = HYPERVISOR_shared_info;
55 local_irq_save(flags);
57 while ( s->vcpu_data[0].evtchn_upcall_pending )
58 {
59 s->vcpu_data[0].evtchn_upcall_pending = 0;
60 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
61 l1 = xchg(&s->evtchn_pending_sel, 0);
62 while ( (l1i = ffs(l1)) != 0 )
63 {
64 l1i--;
65 l1 &= ~(1 << l1i);
67 l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
68 while ( (l2i = ffs(l2)) != 0 )
69 {
70 l2i--;
71 l2 &= ~(1 << l2i);
73 port = (l1i << 5) + l2i;
74 if ( (irq = evtchn_to_irq[port]) != -1 )
75 do_IRQ(irq, regs);
76 else
77 evtchn_device_upcall(port);
78 }
79 }
80 }
82 local_irq_restore(flags);
83 }
86 static int find_unbound_irq(void)
87 {
88 int irq;
90 for ( irq = 0; irq < NR_IRQS; irq++ )
91 if ( irq_bindcount[irq] == 0 )
92 break;
94 if ( irq == NR_IRQS )
95 panic("No available IRQ to bind to: increase NR_IRQS!\n");
97 return irq;
98 }
100 int bind_virq_to_irq(int virq)
101 {
102 evtchn_op_t op;
103 int evtchn, irq;
105 spin_lock(&irq_mapping_update_lock);
107 if ( (irq = virq_to_irq[virq]) == -1 )
108 {
109 op.cmd = EVTCHNOP_bind_virq;
110 op.u.bind_virq.virq = virq;
111 if ( HYPERVISOR_event_channel_op(&op) != 0 )
112 panic("Failed to bind virtual IRQ %d\n", virq);
113 evtchn = op.u.bind_virq.port;
115 irq = find_unbound_irq();
116 evtchn_to_irq[evtchn] = irq;
117 irq_to_evtchn[irq] = evtchn;
119 virq_to_irq[virq] = irq;
120 }
122 irq_bindcount[irq]++;
124 spin_unlock(&irq_mapping_update_lock);
126 return irq;
127 }
129 void unbind_virq_from_irq(int virq)
130 {
131 evtchn_op_t op;
132 int irq = virq_to_irq[virq];
133 int evtchn = irq_to_evtchn[irq];
135 spin_lock(&irq_mapping_update_lock);
137 if ( --irq_bindcount[irq] == 0 )
138 {
139 op.cmd = EVTCHNOP_close;
140 op.u.close.dom = DOMID_SELF;
141 op.u.close.port = evtchn;
142 if ( HYPERVISOR_event_channel_op(&op) != 0 )
143 panic("Failed to unbind virtual IRQ %d\n", virq);
145 evtchn_to_irq[evtchn] = -1;
146 irq_to_evtchn[irq] = -1;
147 virq_to_irq[virq] = -1;
148 }
150 spin_unlock(&irq_mapping_update_lock);
151 }
153 int bind_evtchn_to_irq(int evtchn)
154 {
155 int irq;
157 spin_lock(&irq_mapping_update_lock);
159 if ( (irq = evtchn_to_irq[evtchn]) == -1 )
160 {
161 irq = find_unbound_irq();
162 evtchn_to_irq[evtchn] = irq;
163 irq_to_evtchn[irq] = evtchn;
164 }
166 irq_bindcount[irq]++;
168 spin_unlock(&irq_mapping_update_lock);
170 return irq;
171 }
173 void unbind_evtchn_from_irq(int evtchn)
174 {
175 int irq = evtchn_to_irq[evtchn];
177 spin_lock(&irq_mapping_update_lock);
179 if ( --irq_bindcount[irq] == 0 )
180 {
181 evtchn_to_irq[evtchn] = -1;
182 irq_to_evtchn[irq] = -1;
183 }
185 spin_unlock(&irq_mapping_update_lock);
186 }
189 /*
190 * Interface to generic handling in irq.c
191 */
193 static unsigned int startup_dynirq(unsigned int irq)
194 {
195 unmask_evtchn(irq_to_evtchn[irq]);
196 return 0;
197 }
199 static void shutdown_dynirq(unsigned int irq)
200 {
201 mask_evtchn(irq_to_evtchn[irq]);
202 }
204 static void enable_dynirq(unsigned int irq)
205 {
206 unmask_evtchn(irq_to_evtchn[irq]);
207 }
209 static void disable_dynirq(unsigned int irq)
210 {
211 mask_evtchn(irq_to_evtchn[irq]);
212 }
214 static void ack_dynirq(unsigned int irq)
215 {
216 mask_evtchn(irq_to_evtchn[irq]);
217 clear_evtchn(irq_to_evtchn[irq]);
218 }
220 static void end_dynirq(unsigned int irq)
221 {
222 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
223 unmask_evtchn(irq_to_evtchn[irq]);
224 }
226 static struct hw_interrupt_type dynirq_type = {
227 "Dynamic-irq",
228 startup_dynirq,
229 shutdown_dynirq,
230 enable_dynirq,
231 disable_dynirq,
232 ack_dynirq,
233 end_dynirq,
234 NULL
235 };
237 static inline void pirq_unmask_notify(int pirq)
238 {
239 physdev_op_t op;
240 if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
241 {
242 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
243 (void)HYPERVISOR_physdev_op(&op);
244 }
245 }
247 static inline void pirq_query_unmask(int pirq)
248 {
249 physdev_op_t op;
250 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
251 op.u.irq_status_query.irq = pirq;
252 (void)HYPERVISOR_physdev_op(&op);
253 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
254 if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
255 set_bit(pirq, &pirq_needs_unmask_notify[0]);
256 }
258 /*
259 * On startup, if there is no action associated with the IRQ then we are
260 * probing. In this case we should not share with others as it will confuse us.
261 */
262 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
264 static unsigned int startup_pirq(unsigned int irq)
265 {
266 evtchn_op_t op;
267 int evtchn;
269 op.cmd = EVTCHNOP_bind_pirq;
270 op.u.bind_pirq.pirq = irq;
271 /* NB. We are happy to share unless we are probing. */
272 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
273 if ( HYPERVISOR_event_channel_op(&op) != 0 )
274 {
275 if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
276 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
277 return 0;
278 }
279 evtchn = op.u.bind_pirq.port;
281 pirq_query_unmask(irq_to_pirq(irq));
283 evtchn_to_irq[evtchn] = irq;
284 irq_to_evtchn[irq] = evtchn;
286 unmask_evtchn(evtchn);
287 pirq_unmask_notify(irq_to_pirq(irq));
289 return 0;
290 }
292 static void shutdown_pirq(unsigned int irq)
293 {
294 evtchn_op_t op;
295 int evtchn = irq_to_evtchn[irq];
297 if ( !VALID_EVTCHN(evtchn) )
298 return;
300 mask_evtchn(evtchn);
302 op.cmd = EVTCHNOP_close;
303 op.u.close.dom = DOMID_SELF;
304 op.u.close.port = evtchn;
305 if ( HYPERVISOR_event_channel_op(&op) != 0 )
306 panic("Failed to unbind physical IRQ %d\n", irq);
308 evtchn_to_irq[evtchn] = -1;
309 irq_to_evtchn[irq] = -1;
310 }
312 static void enable_pirq(unsigned int irq)
313 {
314 int evtchn = irq_to_evtchn[irq];
315 if ( !VALID_EVTCHN(evtchn) )
316 return;
317 unmask_evtchn(evtchn);
318 pirq_unmask_notify(irq_to_pirq(irq));
319 }
321 static void disable_pirq(unsigned int irq)
322 {
323 int evtchn = irq_to_evtchn[irq];
324 if ( !VALID_EVTCHN(evtchn) )
325 return;
326 mask_evtchn(evtchn);
327 }
329 static void ack_pirq(unsigned int irq)
330 {
331 int evtchn = irq_to_evtchn[irq];
332 if ( !VALID_EVTCHN(evtchn) )
333 return;
334 mask_evtchn(evtchn);
335 clear_evtchn(evtchn);
336 }
338 static void end_pirq(unsigned int irq)
339 {
340 int evtchn = irq_to_evtchn[irq];
341 if ( !VALID_EVTCHN(evtchn) )
342 return;
343 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
344 {
345 unmask_evtchn(evtchn);
346 pirq_unmask_notify(irq_to_pirq(irq));
347 }
348 }
350 static struct hw_interrupt_type pirq_type = {
351 "Phys-irq",
352 startup_pirq,
353 shutdown_pirq,
354 enable_pirq,
355 disable_pirq,
356 ack_pirq,
357 end_pirq,
358 NULL
359 };
361 static void misdirect_interrupt(int irq, void *dev_id, struct pt_regs *regs)
362 {
363 /* nothing */
364 }
366 static struct irqaction misdirect_action = {
367 misdirect_interrupt,
368 SA_INTERRUPT,
369 0,
370 "misdirect",
371 NULL,
372 NULL
373 };
375 void irq_suspend(void)
376 {
377 int virq, irq, evtchn;
379 /* Unbind VIRQs from event channels. */
380 for ( virq = 0; virq < NR_VIRQS; virq++ )
381 {
382 if ( (irq = virq_to_irq[virq]) == -1 )
383 continue;
384 evtchn = irq_to_evtchn[irq];
386 /* Mark the event channel as unused in our table. */
387 evtchn_to_irq[evtchn] = -1;
388 irq_to_evtchn[irq] = -1;
389 }
391 /*
392 * We should now be unbound from all event channels. Stale bindings to
393 * PIRQs and/or inter-domain event channels will cause us to barf here.
394 */
395 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
396 if ( evtchn_to_irq[evtchn] != -1 )
397 panic("Suspend attempted while bound to evtchn %d.\n", evtchn);
398 }
401 void irq_resume(void)
402 {
403 evtchn_op_t op;
404 int virq, irq, evtchn;
406 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
407 mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
409 for ( virq = 0; virq < NR_VIRQS; virq++ )
410 {
411 if ( (irq = virq_to_irq[virq]) == -1 )
412 continue;
414 /* Get a new binding from Xen. */
415 op.cmd = EVTCHNOP_bind_virq;
416 op.u.bind_virq.virq = virq;
417 if ( HYPERVISOR_event_channel_op(&op) != 0 )
418 panic("Failed to bind virtual IRQ %d\n", virq);
419 evtchn = op.u.bind_virq.port;
421 /* Record the new mapping. */
422 evtchn_to_irq[evtchn] = irq;
423 irq_to_evtchn[irq] = evtchn;
425 /* Ready for use. */
426 unmask_evtchn(evtchn);
427 }
428 }
430 void __init init_IRQ(void)
431 {
432 int i;
434 spin_lock_init(&irq_mapping_update_lock);
436 /* No VIRQ -> IRQ mappings. */
437 for ( i = 0; i < NR_VIRQS; i++ )
438 virq_to_irq[i] = -1;
440 /* No event-channel -> IRQ mappings. */
441 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
442 {
443 evtchn_to_irq[i] = -1;
444 mask_evtchn(i); /* No event channels are 'live' right now. */
445 }
447 /* No IRQ -> event-channel mappings. */
448 for ( i = 0; i < NR_IRQS; i++ )
449 irq_to_evtchn[i] = -1;
451 for ( i = 0; i < NR_DYNIRQS; i++ )
452 {
453 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
454 irq_bindcount[dynirq_to_irq(i)] = 0;
456 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
457 irq_desc[dynirq_to_irq(i)].action = 0;
458 irq_desc[dynirq_to_irq(i)].depth = 1;
459 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
460 }
462 for ( i = 0; i < NR_PIRQS; i++ )
463 {
464 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
465 irq_bindcount[pirq_to_irq(i)] = 1;
467 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
468 irq_desc[pirq_to_irq(i)].action = 0;
469 irq_desc[pirq_to_irq(i)].depth = 1;
470 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
471 }
473 (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
475 /* This needs to be done early, but after the IRQ subsystem is alive. */
476 ctrl_if_init();
477 }