ia64/xen-unstable

view linux-2.6.10-xen-sparse/arch/xen/kernel/evtchn.c @ 3889:6184c07ee434

bitkeeper revision 1.1230.2.5 (421afdf6gp9lAUBMAFeH4-ZMa0jMpg)

Manual merge.
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 22 09:40:06 2005 +0000 (2005-02-22)
parents 0a4b76b6b5a0 606f04db7033
children afd4d93e8891
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/irq.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/version.h>
37 #include <asm/atomic.h>
38 #include <asm/system.h>
39 #include <asm/ptrace.h>
40 #include <asm/synch_bitops.h>
41 #include <asm-xen/xen-public/event_channel.h>
42 #include <asm-xen/xen-public/physdev.h>
43 #include <asm-xen/ctrl_if.h>
44 #include <asm-xen/hypervisor.h>
45 #include <asm-xen/evtchn.h>
47 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
48 EXPORT_SYMBOL(force_evtchn_callback);
49 EXPORT_SYMBOL(evtchn_do_upcall);
50 #endif
52 /*
53 * This lock protects updates to the following mapping and reference-count
54 * arrays. The lock does not need to be acquired to read the mapping tables.
55 */
56 static spinlock_t irq_mapping_update_lock;
58 /* IRQ <-> event-channel mappings. */
59 static int evtchn_to_irq[NR_EVENT_CHANNELS];
60 static int irq_to_evtchn[NR_IRQS];
62 /* IRQ <-> VIRQ mapping. */
63 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
65 /* evtchn <-> IPI mapping. */
66 #ifndef NR_IPIS // XXX SMH: temp fix for 2.4
67 #define NR_IPIS 1
68 #endif
69 DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
71 /* Reference counts for bindings to IRQs. */
72 static int irq_bindcount[NR_IRQS];
74 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
75 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
77 /* Upcall to generic IRQ layer. */
78 #ifdef CONFIG_X86
79 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
80 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
81 #else
82 extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
83 #endif
84 #define do_IRQ(irq, regs) do { \
85 (regs)->orig_eax = (irq); \
86 do_IRQ((regs)); \
87 } while (0)
88 #endif
90 #define VALID_EVTCHN(_chn) ((_chn) >= 0)
92 /*
93 * Force a proper event-channel callback from Xen after clearing the
94 * callback mask. We do this in a very simple manner, by making a call
95 * down into Xen. The pending flag will be checked by Xen on return.
96 */
97 void force_evtchn_callback(void)
98 {
99 (void)HYPERVISOR_xen_version(0);
100 }
102 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
103 {
104 unsigned long l1, l2;
105 unsigned int l1i, l2i, port;
106 int irq;
107 unsigned long flags;
108 shared_info_t *s = HYPERVISOR_shared_info;
109 vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
111 local_irq_save(flags);
113 while ( vcpu_info->evtchn_upcall_pending )
114 {
115 vcpu_info->evtchn_upcall_pending = 0;
116 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
117 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
118 while ( (l1i = ffs(l1)) != 0 )
119 {
120 l1i--;
121 l1 &= ~(1 << l1i);
123 for ( ;; )
124 {
125 l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
126 l2i = ffs(l2);
127 if ( l2i == 0 )
128 break;
129 l2i--;
130 l2 &= ~(1 << l2i);
132 port = (l1i << 5) + l2i;
133 if ( (irq = evtchn_to_irq[port]) != -1 )
134 do_IRQ(irq, regs);
135 else
136 evtchn_device_upcall(port);
137 }
138 }
139 }
141 local_irq_restore(flags);
142 }
144 static int find_unbound_irq(void)
145 {
146 int irq;
148 for ( irq = 0; irq < NR_IRQS; irq++ )
149 if ( irq_bindcount[irq] == 0 )
150 break;
152 if ( irq == NR_IRQS )
153 panic("No available IRQ to bind to: increase NR_IRQS!\n");
155 return irq;
156 }
158 int bind_virq_to_irq(int virq)
159 {
160 evtchn_op_t op;
161 int evtchn, irq;
162 int cpu = smp_processor_id();
164 spin_lock(&irq_mapping_update_lock);
166 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
167 {
168 op.cmd = EVTCHNOP_bind_virq;
169 op.u.bind_virq.virq = virq;
170 if ( HYPERVISOR_event_channel_op(&op) != 0 )
171 panic("Failed to bind virtual IRQ %d\n", virq);
172 evtchn = op.u.bind_virq.port;
174 irq = find_unbound_irq();
175 evtchn_to_irq[evtchn] = irq;
176 irq_to_evtchn[irq] = evtchn;
178 per_cpu(virq_to_irq, cpu)[virq] = irq;
179 }
181 irq_bindcount[irq]++;
183 spin_unlock(&irq_mapping_update_lock);
185 return irq;
186 }
188 void unbind_virq_from_irq(int virq)
189 {
190 evtchn_op_t op;
191 int cpu = smp_processor_id();
192 int irq = per_cpu(virq_to_irq, cpu)[virq];
193 int evtchn = irq_to_evtchn[irq];
195 spin_lock(&irq_mapping_update_lock);
197 if ( --irq_bindcount[irq] == 0 )
198 {
199 op.cmd = EVTCHNOP_close;
200 op.u.close.dom = DOMID_SELF;
201 op.u.close.port = evtchn;
202 if ( HYPERVISOR_event_channel_op(&op) != 0 )
203 panic("Failed to unbind virtual IRQ %d\n", virq);
205 evtchn_to_irq[evtchn] = -1;
206 irq_to_evtchn[irq] = -1;
207 per_cpu(virq_to_irq, cpu)[virq] = -1;
208 }
210 spin_unlock(&irq_mapping_update_lock);
211 }
213 int bind_ipi_on_cpu_to_irq(int cpu, int ipi)
214 {
215 evtchn_op_t op;
216 int evtchn, irq;
218 spin_lock(&irq_mapping_update_lock);
220 if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
221 {
222 op.cmd = EVTCHNOP_bind_ipi;
223 op.u.bind_ipi.ipi_edom = cpu;
224 if ( HYPERVISOR_event_channel_op(&op) != 0 )
225 panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
226 evtchn = op.u.bind_ipi.port;
228 irq = find_unbound_irq();
229 evtchn_to_irq[evtchn] = irq;
230 irq_to_evtchn[irq] = evtchn;
232 per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
233 } else
234 irq = evtchn_to_irq[evtchn];
236 irq_bindcount[irq]++;
238 spin_unlock(&irq_mapping_update_lock);
240 return irq;
241 }
243 void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
244 {
245 evtchn_op_t op;
246 int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
247 int irq = irq_to_evtchn[evtchn];
249 spin_lock(&irq_mapping_update_lock);
251 if ( --irq_bindcount[irq] == 0 )
252 {
253 op.cmd = EVTCHNOP_close;
254 op.u.close.dom = DOMID_SELF;
255 op.u.close.port = evtchn;
256 if ( HYPERVISOR_event_channel_op(&op) != 0 )
257 panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
259 evtchn_to_irq[evtchn] = -1;
260 irq_to_evtchn[irq] = -1;
261 per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
262 }
264 spin_unlock(&irq_mapping_update_lock);
265 }
267 int bind_evtchn_to_irq(int evtchn)
268 {
269 int irq;
271 spin_lock(&irq_mapping_update_lock);
273 if ( (irq = evtchn_to_irq[evtchn]) == -1 )
274 {
275 irq = find_unbound_irq();
276 evtchn_to_irq[evtchn] = irq;
277 irq_to_evtchn[irq] = evtchn;
278 }
280 irq_bindcount[irq]++;
282 spin_unlock(&irq_mapping_update_lock);
284 return irq;
285 }
287 void unbind_evtchn_from_irq(int evtchn)
288 {
289 int irq = evtchn_to_irq[evtchn];
291 spin_lock(&irq_mapping_update_lock);
293 if ( --irq_bindcount[irq] == 0 )
294 {
295 evtchn_to_irq[evtchn] = -1;
296 irq_to_evtchn[irq] = -1;
297 }
299 spin_unlock(&irq_mapping_update_lock);
300 }
303 /*
304 * Interface to generic handling in irq.c
305 */
307 static unsigned int startup_dynirq(unsigned int irq)
308 {
309 int evtchn = irq_to_evtchn[irq];
311 if ( !VALID_EVTCHN(evtchn) )
312 return 0;
313 unmask_evtchn(evtchn);
314 return 0;
315 }
317 static void shutdown_dynirq(unsigned int irq)
318 {
319 int evtchn = irq_to_evtchn[irq];
321 if ( !VALID_EVTCHN(evtchn) )
322 return;
323 mask_evtchn(evtchn);
324 }
326 static void enable_dynirq(unsigned int irq)
327 {
328 int evtchn = irq_to_evtchn[irq];
330 unmask_evtchn(evtchn);
331 }
333 static void disable_dynirq(unsigned int irq)
334 {
335 int evtchn = irq_to_evtchn[irq];
337 mask_evtchn(evtchn);
338 }
340 static void ack_dynirq(unsigned int irq)
341 {
342 int evtchn = irq_to_evtchn[irq];
344 mask_evtchn(evtchn);
345 clear_evtchn(evtchn);
346 }
348 static void end_dynirq(unsigned int irq)
349 {
350 int evtchn = irq_to_evtchn[irq];
352 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
353 unmask_evtchn(evtchn);
354 }
356 static struct hw_interrupt_type dynirq_type = {
357 "Dynamic-irq",
358 startup_dynirq,
359 shutdown_dynirq,
360 enable_dynirq,
361 disable_dynirq,
362 ack_dynirq,
363 end_dynirq,
364 NULL
365 };
367 static inline void pirq_unmask_notify(int pirq)
368 {
369 physdev_op_t op;
370 if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
371 {
372 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
373 (void)HYPERVISOR_physdev_op(&op);
374 }
375 }
377 static inline void pirq_query_unmask(int pirq)
378 {
379 physdev_op_t op;
380 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
381 op.u.irq_status_query.irq = pirq;
382 (void)HYPERVISOR_physdev_op(&op);
383 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
384 if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
385 set_bit(pirq, &pirq_needs_unmask_notify[0]);
386 }
388 /*
389 * On startup, if there is no action associated with the IRQ then we are
390 * probing. In this case we should not share with others as it will confuse us.
391 */
392 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
394 static unsigned int startup_pirq(unsigned int irq)
395 {
396 evtchn_op_t op;
397 int evtchn;
399 op.cmd = EVTCHNOP_bind_pirq;
400 op.u.bind_pirq.pirq = irq;
401 /* NB. We are happy to share unless we are probing. */
402 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
403 if ( HYPERVISOR_event_channel_op(&op) != 0 )
404 {
405 if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
406 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
407 return 0;
408 }
409 evtchn = op.u.bind_pirq.port;
411 pirq_query_unmask(irq_to_pirq(irq));
413 evtchn_to_irq[evtchn] = irq;
414 irq_to_evtchn[irq] = evtchn;
416 unmask_evtchn(evtchn);
417 pirq_unmask_notify(irq_to_pirq(irq));
419 return 0;
420 }
422 static void shutdown_pirq(unsigned int irq)
423 {
424 evtchn_op_t op;
425 int evtchn = irq_to_evtchn[irq];
427 if ( !VALID_EVTCHN(evtchn) )
428 return;
430 mask_evtchn(evtchn);
432 op.cmd = EVTCHNOP_close;
433 op.u.close.dom = DOMID_SELF;
434 op.u.close.port = evtchn;
435 if ( HYPERVISOR_event_channel_op(&op) != 0 )
436 panic("Failed to unbind physical IRQ %d\n", irq);
438 evtchn_to_irq[evtchn] = -1;
439 irq_to_evtchn[irq] = -1;
440 }
442 static void enable_pirq(unsigned int irq)
443 {
444 int evtchn = irq_to_evtchn[irq];
445 if ( !VALID_EVTCHN(evtchn) )
446 return;
447 unmask_evtchn(evtchn);
448 pirq_unmask_notify(irq_to_pirq(irq));
449 }
451 static void disable_pirq(unsigned int irq)
452 {
453 int evtchn = irq_to_evtchn[irq];
454 if ( !VALID_EVTCHN(evtchn) )
455 return;
456 mask_evtchn(evtchn);
457 }
459 static void ack_pirq(unsigned int irq)
460 {
461 int evtchn = irq_to_evtchn[irq];
462 if ( !VALID_EVTCHN(evtchn) )
463 return;
464 mask_evtchn(evtchn);
465 clear_evtchn(evtchn);
466 }
468 static void end_pirq(unsigned int irq)
469 {
470 int evtchn = irq_to_evtchn[irq];
471 if ( !VALID_EVTCHN(evtchn) )
472 return;
473 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
474 {
475 unmask_evtchn(evtchn);
476 pirq_unmask_notify(irq_to_pirq(irq));
477 }
478 }
480 static struct hw_interrupt_type pirq_type = {
481 "Phys-irq",
482 startup_pirq,
483 shutdown_pirq,
484 enable_pirq,
485 disable_pirq,
486 ack_pirq,
487 end_pirq,
488 NULL
489 };
491 void irq_suspend(void)
492 {
493 int pirq, virq, irq, evtchn;
494 int cpu = smp_processor_id(); /* XXX */
496 /* Unbind VIRQs from event channels. */
497 for ( virq = 0; virq < NR_VIRQS; virq++ )
498 {
499 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
500 continue;
501 evtchn = irq_to_evtchn[irq];
503 /* Mark the event channel as unused in our table. */
504 evtchn_to_irq[evtchn] = -1;
505 irq_to_evtchn[irq] = -1;
506 }
508 /* Check that no PIRQs are still bound. */
509 for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
510 if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
511 panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
512 pirq, evtchn);
513 }
515 void irq_resume(void)
516 {
517 evtchn_op_t op;
518 int virq, irq, evtchn;
519 int cpu = smp_processor_id(); /* XXX */
521 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
522 mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
524 for ( virq = 0; virq < NR_VIRQS; virq++ )
525 {
526 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
527 continue;
529 /* Get a new binding from Xen. */
530 op.cmd = EVTCHNOP_bind_virq;
531 op.u.bind_virq.virq = virq;
532 if ( HYPERVISOR_event_channel_op(&op) != 0 )
533 panic("Failed to bind virtual IRQ %d\n", virq);
534 evtchn = op.u.bind_virq.port;
536 /* Record the new mapping. */
537 evtchn_to_irq[evtchn] = irq;
538 irq_to_evtchn[irq] = evtchn;
540 /* Ready for use. */
541 unmask_evtchn(evtchn);
542 }
543 }
545 void __init init_IRQ(void)
546 {
547 int i;
548 int cpu;
550 irq_ctx_init(0);
552 spin_lock_init(&irq_mapping_update_lock);
554 for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
555 /* No VIRQ -> IRQ mappings. */
556 for ( i = 0; i < NR_VIRQS; i++ )
557 per_cpu(virq_to_irq, cpu)[i] = -1;
558 }
560 /* No event-channel -> IRQ mappings. */
561 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
562 {
563 evtchn_to_irq[i] = -1;
564 mask_evtchn(i); /* No event channels are 'live' right now. */
565 }
567 /* No IRQ -> event-channel mappings. */
568 for ( i = 0; i < NR_IRQS; i++ )
569 irq_to_evtchn[i] = -1;
571 for ( i = 0; i < NR_DYNIRQS; i++ )
572 {
573 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
574 irq_bindcount[dynirq_to_irq(i)] = 0;
576 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
577 irq_desc[dynirq_to_irq(i)].action = 0;
578 irq_desc[dynirq_to_irq(i)].depth = 1;
579 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
580 }
582 for ( i = 0; i < NR_PIRQS; i++ )
583 {
584 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
585 irq_bindcount[pirq_to_irq(i)] = 1;
587 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
588 irq_desc[pirq_to_irq(i)].action = 0;
589 irq_desc[pirq_to_irq(i)].depth = 1;
590 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
591 }
593 /* This needs to be done early, but after the IRQ subsystem is alive. */
594 ctrl_if_init();
595 }