ia64/xen-unstable

view linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c @ 5443:085461ee5cd6

bitkeeper revision 1.1713.1.3 (42ad95fdAjhhuDM4A-oDzz_3fc_elA)

export evtchn irq symbols
author ach61@arcadians.cl.cam.ac.uk
date Mon Jun 13 14:19:41 2005 +0000 (2005-06-13)
parents e982699aa8cd
children 999293916aa7
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/irq.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/version.h>
37 #include <asm/atomic.h>
38 #include <asm/system.h>
39 #include <asm/ptrace.h>
40 #include <asm-xen/synch_bitops.h>
41 #include <asm-xen/xen-public/event_channel.h>
42 #include <asm-xen/xen-public/physdev.h>
43 #include <asm-xen/ctrl_if.h>
44 #include <asm-xen/hypervisor.h>
45 #include <asm-xen/evtchn.h>
47 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
48 EXPORT_SYMBOL(force_evtchn_callback);
49 EXPORT_SYMBOL(evtchn_do_upcall);
50 EXPORT_SYMBOL(bind_evtchn_to_irq);
51 EXPORT_SYMBOL(unbind_evtchn_from_irq);
52 #endif
54 /*
55 * This lock protects updates to the following mapping and reference-count
56 * arrays. The lock does not need to be acquired to read the mapping tables.
57 */
58 static spinlock_t irq_mapping_update_lock;
60 /* IRQ <-> event-channel mappings. */
61 static int evtchn_to_irq[NR_EVENT_CHANNELS];
62 static int irq_to_evtchn[NR_IRQS];
64 /* IRQ <-> VIRQ mapping. */
65 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
67 /* evtchn <-> IPI mapping. */
68 #ifndef NR_IPIS
69 #define NR_IPIS 1
70 #endif
71 DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
73 /* Reference counts for bindings to IRQs. */
74 static int irq_bindcount[NR_IRQS];
76 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
77 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
79 #ifdef CONFIG_SMP
81 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
82 static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
84 #define active_evtchns(cpu,sh,idx) \
85 ((sh)->evtchn_pending[idx] & \
86 cpu_evtchn_mask[cpu][idx] & \
87 ~(sh)->evtchn_mask[idx])
89 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
90 {
91 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
92 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
93 cpu_evtchn[chn] = cpu;
94 }
96 #else
98 #define active_evtchns(cpu,sh,idx) \
99 ((sh)->evtchn_pending[idx] & \
100 ~(sh)->evtchn_mask[idx])
102 #define bind_evtchn_to_cpu(chn,cpu) ((void)0)
104 #endif
106 /* Upcall to generic IRQ layer. */
107 #ifdef CONFIG_X86
108 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
109 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
110 #else
111 extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
112 #endif
113 #if defined (__i386__)
114 #define IRQ_REG orig_eax
115 #elif defined (__x86_64__)
116 #define IRQ_REG orig_rax
117 #endif
118 #define do_IRQ(irq, regs) do { \
119 (regs)->IRQ_REG = (irq); \
120 do_IRQ((regs)); \
121 } while (0)
122 #endif
124 #define VALID_EVTCHN(_chn) ((_chn) >= 0)
126 /*
127 * Force a proper event-channel callback from Xen after clearing the
128 * callback mask. We do this in a very simple manner, by making a call
129 * down into Xen. The pending flag will be checked by Xen on return.
130 */
131 void force_evtchn_callback(void)
132 {
133 (void)HYPERVISOR_xen_version(0);
134 }
136 /* NB. Interrupts are disabled on entry. */
137 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
138 {
139 u32 l1, l2;
140 unsigned int l1i, l2i, port;
141 int irq, cpu = smp_processor_id();
142 shared_info_t *s = HYPERVISOR_shared_info;
143 vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
145 vcpu_info->evtchn_upcall_pending = 0;
147 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
148 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
149 while ( l1 != 0 )
150 {
151 l1i = __ffs(l1);
152 l1 &= ~(1 << l1i);
154 while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
155 {
156 l2i = __ffs(l2);
157 l2 &= ~(1 << l2i);
159 port = (l1i << 5) + l2i;
160 if ( (irq = evtchn_to_irq[port]) != -1 )
161 do_IRQ(irq, regs);
162 else
163 evtchn_device_upcall(port);
164 }
165 }
166 }
168 static int find_unbound_irq(void)
169 {
170 int irq;
172 for ( irq = 0; irq < NR_IRQS; irq++ )
173 if ( irq_bindcount[irq] == 0 )
174 break;
176 if ( irq == NR_IRQS )
177 panic("No available IRQ to bind to: increase NR_IRQS!\n");
179 return irq;
180 }
182 int bind_virq_to_irq(int virq)
183 {
184 evtchn_op_t op;
185 int evtchn, irq;
186 int cpu = smp_processor_id();
188 spin_lock(&irq_mapping_update_lock);
190 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
191 {
192 op.cmd = EVTCHNOP_bind_virq;
193 op.u.bind_virq.virq = virq;
194 if ( HYPERVISOR_event_channel_op(&op) != 0 )
195 panic("Failed to bind virtual IRQ %d\n", virq);
196 evtchn = op.u.bind_virq.port;
198 irq = find_unbound_irq();
199 evtchn_to_irq[evtchn] = irq;
200 irq_to_evtchn[irq] = evtchn;
202 per_cpu(virq_to_irq, cpu)[virq] = irq;
204 bind_evtchn_to_cpu(evtchn, cpu);
205 }
207 irq_bindcount[irq]++;
209 spin_unlock(&irq_mapping_update_lock);
211 return irq;
212 }
214 void unbind_virq_from_irq(int virq)
215 {
216 evtchn_op_t op;
217 int cpu = smp_processor_id();
218 int irq = per_cpu(virq_to_irq, cpu)[virq];
219 int evtchn = irq_to_evtchn[irq];
221 spin_lock(&irq_mapping_update_lock);
223 if ( --irq_bindcount[irq] == 0 )
224 {
225 op.cmd = EVTCHNOP_close;
226 op.u.close.dom = DOMID_SELF;
227 op.u.close.port = evtchn;
228 if ( HYPERVISOR_event_channel_op(&op) != 0 )
229 panic("Failed to unbind virtual IRQ %d\n", virq);
231 evtchn_to_irq[evtchn] = -1;
232 irq_to_evtchn[irq] = -1;
233 per_cpu(virq_to_irq, cpu)[virq] = -1;
234 }
236 spin_unlock(&irq_mapping_update_lock);
237 }
239 int bind_ipi_on_cpu_to_irq(int cpu, int ipi)
240 {
241 evtchn_op_t op;
242 int evtchn, irq;
244 spin_lock(&irq_mapping_update_lock);
246 if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
247 {
248 op.cmd = EVTCHNOP_bind_ipi;
249 op.u.bind_ipi.ipi_vcpu = cpu;
250 if ( HYPERVISOR_event_channel_op(&op) != 0 )
251 panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
252 evtchn = op.u.bind_ipi.port;
254 irq = find_unbound_irq();
255 evtchn_to_irq[evtchn] = irq;
256 irq_to_evtchn[irq] = evtchn;
258 per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
260 bind_evtchn_to_cpu(evtchn, cpu);
261 }
262 else
263 {
264 irq = evtchn_to_irq[evtchn];
265 }
267 irq_bindcount[irq]++;
269 spin_unlock(&irq_mapping_update_lock);
271 return irq;
272 }
274 void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
275 {
276 evtchn_op_t op;
277 int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
278 int irq = irq_to_evtchn[evtchn];
280 spin_lock(&irq_mapping_update_lock);
282 if ( --irq_bindcount[irq] == 0 )
283 {
284 op.cmd = EVTCHNOP_close;
285 op.u.close.dom = DOMID_SELF;
286 op.u.close.port = evtchn;
287 if ( HYPERVISOR_event_channel_op(&op) != 0 )
288 panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
290 evtchn_to_irq[evtchn] = -1;
291 irq_to_evtchn[irq] = -1;
292 per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
293 }
295 spin_unlock(&irq_mapping_update_lock);
296 }
298 int bind_evtchn_to_irq(int evtchn)
299 {
300 int irq;
302 spin_lock(&irq_mapping_update_lock);
304 if ( (irq = evtchn_to_irq[evtchn]) == -1 )
305 {
306 irq = find_unbound_irq();
307 evtchn_to_irq[evtchn] = irq;
308 irq_to_evtchn[irq] = evtchn;
309 }
311 irq_bindcount[irq]++;
313 spin_unlock(&irq_mapping_update_lock);
315 return irq;
316 }
318 void unbind_evtchn_from_irq(int evtchn)
319 {
320 int irq = evtchn_to_irq[evtchn];
322 spin_lock(&irq_mapping_update_lock);
324 if ( --irq_bindcount[irq] == 0 )
325 {
326 evtchn_to_irq[evtchn] = -1;
327 irq_to_evtchn[irq] = -1;
328 }
330 spin_unlock(&irq_mapping_update_lock);
331 }
334 /*
335 * Interface to generic handling in irq.c
336 */
338 static unsigned int startup_dynirq(unsigned int irq)
339 {
340 int evtchn = irq_to_evtchn[irq];
342 if ( !VALID_EVTCHN(evtchn) )
343 return 0;
344 unmask_evtchn(evtchn);
345 return 0;
346 }
348 static void shutdown_dynirq(unsigned int irq)
349 {
350 int evtchn = irq_to_evtchn[irq];
352 if ( !VALID_EVTCHN(evtchn) )
353 return;
354 mask_evtchn(evtchn);
355 }
357 static void enable_dynirq(unsigned int irq)
358 {
359 int evtchn = irq_to_evtchn[irq];
361 unmask_evtchn(evtchn);
362 }
364 static void disable_dynirq(unsigned int irq)
365 {
366 int evtchn = irq_to_evtchn[irq];
368 mask_evtchn(evtchn);
369 }
371 static void ack_dynirq(unsigned int irq)
372 {
373 int evtchn = irq_to_evtchn[irq];
375 mask_evtchn(evtchn);
376 clear_evtchn(evtchn);
377 }
379 static void end_dynirq(unsigned int irq)
380 {
381 int evtchn = irq_to_evtchn[irq];
383 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
384 unmask_evtchn(evtchn);
385 }
387 static struct hw_interrupt_type dynirq_type = {
388 "Dynamic-irq",
389 startup_dynirq,
390 shutdown_dynirq,
391 enable_dynirq,
392 disable_dynirq,
393 ack_dynirq,
394 end_dynirq,
395 NULL
396 };
398 static inline void pirq_unmask_notify(int pirq)
399 {
400 physdev_op_t op;
401 if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
402 {
403 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
404 (void)HYPERVISOR_physdev_op(&op);
405 }
406 }
408 static inline void pirq_query_unmask(int pirq)
409 {
410 physdev_op_t op;
411 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
412 op.u.irq_status_query.irq = pirq;
413 (void)HYPERVISOR_physdev_op(&op);
414 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
415 if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
416 set_bit(pirq, &pirq_needs_unmask_notify[0]);
417 }
419 /*
420 * On startup, if there is no action associated with the IRQ then we are
421 * probing. In this case we should not share with others as it will confuse us.
422 */
423 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
425 static unsigned int startup_pirq(unsigned int irq)
426 {
427 evtchn_op_t op;
428 int evtchn;
430 op.cmd = EVTCHNOP_bind_pirq;
431 op.u.bind_pirq.pirq = irq;
432 /* NB. We are happy to share unless we are probing. */
433 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
434 if ( HYPERVISOR_event_channel_op(&op) != 0 )
435 {
436 if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
437 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
438 return 0;
439 }
440 evtchn = op.u.bind_pirq.port;
442 pirq_query_unmask(irq_to_pirq(irq));
444 evtchn_to_irq[evtchn] = irq;
445 irq_to_evtchn[irq] = evtchn;
447 unmask_evtchn(evtchn);
448 pirq_unmask_notify(irq_to_pirq(irq));
450 return 0;
451 }
453 static void shutdown_pirq(unsigned int irq)
454 {
455 evtchn_op_t op;
456 int evtchn = irq_to_evtchn[irq];
458 if ( !VALID_EVTCHN(evtchn) )
459 return;
461 mask_evtchn(evtchn);
463 op.cmd = EVTCHNOP_close;
464 op.u.close.dom = DOMID_SELF;
465 op.u.close.port = evtchn;
466 if ( HYPERVISOR_event_channel_op(&op) != 0 )
467 panic("Failed to unbind physical IRQ %d\n", irq);
469 evtchn_to_irq[evtchn] = -1;
470 irq_to_evtchn[irq] = -1;
471 }
473 static void enable_pirq(unsigned int irq)
474 {
475 int evtchn = irq_to_evtchn[irq];
476 if ( !VALID_EVTCHN(evtchn) )
477 return;
478 unmask_evtchn(evtchn);
479 pirq_unmask_notify(irq_to_pirq(irq));
480 }
482 static void disable_pirq(unsigned int irq)
483 {
484 int evtchn = irq_to_evtchn[irq];
485 if ( !VALID_EVTCHN(evtchn) )
486 return;
487 mask_evtchn(evtchn);
488 }
490 static void ack_pirq(unsigned int irq)
491 {
492 int evtchn = irq_to_evtchn[irq];
493 if ( !VALID_EVTCHN(evtchn) )
494 return;
495 mask_evtchn(evtchn);
496 clear_evtchn(evtchn);
497 }
499 static void end_pirq(unsigned int irq)
500 {
501 int evtchn = irq_to_evtchn[irq];
502 if ( !VALID_EVTCHN(evtchn) )
503 return;
504 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
505 {
506 unmask_evtchn(evtchn);
507 pirq_unmask_notify(irq_to_pirq(irq));
508 }
509 }
511 static struct hw_interrupt_type pirq_type = {
512 "Phys-irq",
513 startup_pirq,
514 shutdown_pirq,
515 enable_pirq,
516 disable_pirq,
517 ack_pirq,
518 end_pirq,
519 NULL
520 };
522 void irq_suspend(void)
523 {
524 int pirq, virq, irq, evtchn;
525 int cpu = smp_processor_id(); /* XXX */
527 /* Unbind VIRQs from event channels. */
528 for ( virq = 0; virq < NR_VIRQS; virq++ )
529 {
530 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
531 continue;
532 evtchn = irq_to_evtchn[irq];
534 /* Mark the event channel as unused in our table. */
535 evtchn_to_irq[evtchn] = -1;
536 irq_to_evtchn[irq] = -1;
537 }
539 /* Check that no PIRQs are still bound. */
540 for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
541 if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
542 panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
543 pirq, evtchn);
544 }
546 void irq_resume(void)
547 {
548 evtchn_op_t op;
549 int virq, irq, evtchn;
550 int cpu = smp_processor_id(); /* XXX */
552 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
553 mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
555 for ( virq = 0; virq < NR_VIRQS; virq++ )
556 {
557 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
558 continue;
560 /* Get a new binding from Xen. */
561 op.cmd = EVTCHNOP_bind_virq;
562 op.u.bind_virq.virq = virq;
563 if ( HYPERVISOR_event_channel_op(&op) != 0 )
564 panic("Failed to bind virtual IRQ %d\n", virq);
565 evtchn = op.u.bind_virq.port;
567 /* Record the new mapping. */
568 evtchn_to_irq[evtchn] = irq;
569 irq_to_evtchn[irq] = evtchn;
571 /* Ready for use. */
572 unmask_evtchn(evtchn);
573 }
574 }
576 void __init init_IRQ(void)
577 {
578 int i;
579 int cpu;
581 irq_ctx_init(0);
583 spin_lock_init(&irq_mapping_update_lock);
585 #ifdef CONFIG_SMP
586 /* By default all event channels notify CPU#0. */
587 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
588 #endif
590 for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
591 /* No VIRQ -> IRQ mappings. */
592 for ( i = 0; i < NR_VIRQS; i++ )
593 per_cpu(virq_to_irq, cpu)[i] = -1;
594 }
596 /* No event-channel -> IRQ mappings. */
597 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
598 {
599 evtchn_to_irq[i] = -1;
600 mask_evtchn(i); /* No event channels are 'live' right now. */
601 }
603 /* No IRQ -> event-channel mappings. */
604 for ( i = 0; i < NR_IRQS; i++ )
605 irq_to_evtchn[i] = -1;
607 for ( i = 0; i < NR_DYNIRQS; i++ )
608 {
609 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
610 irq_bindcount[dynirq_to_irq(i)] = 0;
612 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
613 irq_desc[dynirq_to_irq(i)].action = 0;
614 irq_desc[dynirq_to_irq(i)].depth = 1;
615 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
616 }
618 for ( i = 0; i < NR_PIRQS; i++ )
619 {
620 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
621 irq_bindcount[pirq_to_irq(i)] = 1;
623 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
624 irq_desc[pirq_to_irq(i)].action = 0;
625 irq_desc[pirq_to_irq(i)].depth = 1;
626 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
627 }
629 /* This needs to be done early, but after the IRQ subsystem is alive. */
630 ctrl_if_init();
631 }