ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c @ 6780:e17161930711

synch_bitops.h is an arch-specific header file.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Sep 13 10:40:29 2005 +0000 (2005-09-13)
parents f752e0c873a6
children 4d899a738d59 8ca0f98ba8e2
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/irq.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/version.h>
37 #include <asm/atomic.h>
38 #include <asm/system.h>
39 #include <asm/ptrace.h>
40 #include <asm/synch_bitops.h>
41 #include <asm-xen/xen-public/event_channel.h>
42 #include <asm-xen/xen-public/physdev.h>
43 #include <asm-xen/hypervisor.h>
44 #include <asm-xen/evtchn.h>
46 /*
47 * This lock protects updates to the following mapping and reference-count
48 * arrays. The lock does not need to be acquired to read the mapping tables.
49 */
50 static spinlock_t irq_mapping_update_lock;
52 /* IRQ <-> event-channel mappings. */
53 static int evtchn_to_irq[NR_EVENT_CHANNELS];
54 static int irq_to_evtchn[NR_IRQS];
56 /* IRQ <-> VIRQ mapping. */
57 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
59 /* evtchn <-> IPI mapping. */
60 #ifndef NR_IPIS
61 #define NR_IPIS 1
62 #endif
63 DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
65 /* Reference counts for bindings to IRQs. */
66 static int irq_bindcount[NR_IRQS];
68 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
69 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
71 #ifdef CONFIG_SMP
73 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
74 static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
76 #define active_evtchns(cpu,sh,idx) \
77 ((sh)->evtchn_pending[idx] & \
78 cpu_evtchn_mask[cpu][idx] & \
79 ~(sh)->evtchn_mask[idx])
81 void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
82 {
83 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
84 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
85 cpu_evtchn[chn] = cpu;
86 }
88 #else
90 #define active_evtchns(cpu,sh,idx) \
91 ((sh)->evtchn_pending[idx] & \
92 ~(sh)->evtchn_mask[idx])
94 void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
95 {
96 }
97 #endif
99 /* Upcall to generic IRQ layer. */
100 #ifdef CONFIG_X86
101 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
102 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
103 #else
104 extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
105 #endif
106 #if defined (__i386__)
107 #define IRQ_REG orig_eax
108 #elif defined (__x86_64__)
109 #define IRQ_REG orig_rax
110 #endif
111 #define do_IRQ(irq, regs) do { \
112 (regs)->IRQ_REG = (irq); \
113 do_IRQ((regs)); \
114 } while (0)
115 #endif
117 #define VALID_EVTCHN(_chn) ((_chn) >= 0)
119 /*
120 * Force a proper event-channel callback from Xen after clearing the
121 * callback mask. We do this in a very simple manner, by making a call
122 * down into Xen. The pending flag will be checked by Xen on return.
123 */
124 void force_evtchn_callback(void)
125 {
126 (void)HYPERVISOR_xen_version(0, NULL);
127 }
128 EXPORT_SYMBOL(force_evtchn_callback);
130 /* NB. Interrupts are disabled on entry. */
131 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
132 {
133 u32 l1, l2;
134 unsigned int l1i, l2i, port;
135 int irq, cpu = smp_processor_id();
136 shared_info_t *s = HYPERVISOR_shared_info;
137 vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
139 vcpu_info->evtchn_upcall_pending = 0;
141 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
142 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
143 while ( l1 != 0 )
144 {
145 l1i = __ffs(l1);
146 l1 &= ~(1 << l1i);
148 while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
149 {
150 l2i = __ffs(l2);
151 l2 &= ~(1 << l2i);
153 port = (l1i << 5) + l2i;
154 if ( (irq = evtchn_to_irq[port]) != -1 ) {
155 do_IRQ(irq, regs);
156 } else
157 evtchn_device_upcall(port);
158 }
159 }
160 }
161 EXPORT_SYMBOL(evtchn_do_upcall);
163 static int find_unbound_irq(void)
164 {
165 int irq;
167 for ( irq = 0; irq < NR_IRQS; irq++ )
168 if ( irq_bindcount[irq] == 0 )
169 break;
171 if ( irq == NR_IRQS )
172 panic("No available IRQ to bind to: increase NR_IRQS!\n");
174 return irq;
175 }
177 int bind_virq_to_irq(int virq)
178 {
179 evtchn_op_t op;
180 int evtchn, irq;
181 int cpu = smp_processor_id();
183 spin_lock(&irq_mapping_update_lock);
185 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
186 {
187 op.cmd = EVTCHNOP_bind_virq;
188 op.u.bind_virq.virq = virq;
189 if ( HYPERVISOR_event_channel_op(&op) != 0 )
190 panic("Failed to bind virtual IRQ %d\n", virq);
191 evtchn = op.u.bind_virq.port;
193 irq = find_unbound_irq();
194 evtchn_to_irq[evtchn] = irq;
195 irq_to_evtchn[irq] = evtchn;
197 per_cpu(virq_to_irq, cpu)[virq] = irq;
199 bind_evtchn_to_cpu(evtchn, cpu);
200 }
202 irq_bindcount[irq]++;
204 spin_unlock(&irq_mapping_update_lock);
206 return irq;
207 }
208 EXPORT_SYMBOL(bind_virq_to_irq);
210 void unbind_virq_from_irq(int virq)
211 {
212 evtchn_op_t op;
213 int cpu = smp_processor_id();
214 int irq = per_cpu(virq_to_irq, cpu)[virq];
215 int evtchn = irq_to_evtchn[irq];
217 spin_lock(&irq_mapping_update_lock);
219 if ( --irq_bindcount[irq] == 0 )
220 {
221 op.cmd = EVTCHNOP_close;
222 op.u.close.dom = DOMID_SELF;
223 op.u.close.port = evtchn;
224 if ( HYPERVISOR_event_channel_op(&op) != 0 )
225 panic("Failed to unbind virtual IRQ %d\n", virq);
227 /*
228 * This is a slight hack. Interdomain ports can be allocated directly
229 * by userspace, and at that point they get bound by Xen to vcpu 0. We
230 * therefore need to make sure that if we get an event on an event
231 * channel we don't know about vcpu 0 handles it. Binding channels to
232 * vcpu 0 when closing them achieves this.
233 */
234 bind_evtchn_to_cpu(evtchn, 0);
235 evtchn_to_irq[evtchn] = -1;
236 irq_to_evtchn[irq] = -1;
237 per_cpu(virq_to_irq, cpu)[virq] = -1;
238 }
240 spin_unlock(&irq_mapping_update_lock);
241 }
242 EXPORT_SYMBOL(unbind_virq_from_irq);
244 int bind_ipi_to_irq(int ipi)
245 {
246 evtchn_op_t op;
247 int evtchn, irq;
248 int cpu = smp_processor_id();
250 spin_lock(&irq_mapping_update_lock);
252 if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
253 {
254 op.cmd = EVTCHNOP_bind_ipi;
255 if ( HYPERVISOR_event_channel_op(&op) != 0 )
256 panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
257 evtchn = op.u.bind_ipi.port;
259 irq = find_unbound_irq();
260 evtchn_to_irq[evtchn] = irq;
261 irq_to_evtchn[irq] = evtchn;
263 per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
265 bind_evtchn_to_cpu(evtchn, cpu);
266 }
267 else
268 {
269 irq = evtchn_to_irq[evtchn];
270 }
272 irq_bindcount[irq]++;
274 spin_unlock(&irq_mapping_update_lock);
276 return irq;
277 }
278 EXPORT_SYMBOL(bind_ipi_to_irq);
280 void unbind_ipi_from_irq(int ipi)
281 {
282 evtchn_op_t op;
283 int cpu = smp_processor_id();
284 int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
285 int irq = evtchn_to_irq[evtchn];
287 spin_lock(&irq_mapping_update_lock);
289 if ( --irq_bindcount[irq] == 0 )
290 {
291 op.cmd = EVTCHNOP_close;
292 op.u.close.dom = DOMID_SELF;
293 op.u.close.port = evtchn;
294 if ( HYPERVISOR_event_channel_op(&op) != 0 )
295 panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
297 /* See comments in unbind_virq_from_irq */
298 bind_evtchn_to_cpu(evtchn, 0);
299 evtchn_to_irq[evtchn] = -1;
300 irq_to_evtchn[irq] = -1;
301 per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
302 }
304 spin_unlock(&irq_mapping_update_lock);
305 }
306 EXPORT_SYMBOL(unbind_ipi_from_irq);
308 int bind_evtchn_to_irq(unsigned int evtchn)
309 {
310 int irq;
312 spin_lock(&irq_mapping_update_lock);
314 if ( (irq = evtchn_to_irq[evtchn]) == -1 )
315 {
316 irq = find_unbound_irq();
317 evtchn_to_irq[evtchn] = irq;
318 irq_to_evtchn[irq] = evtchn;
319 }
321 irq_bindcount[irq]++;
323 spin_unlock(&irq_mapping_update_lock);
325 return irq;
326 }
327 EXPORT_SYMBOL(bind_evtchn_to_irq);
329 void unbind_evtchn_from_irq(unsigned int evtchn)
330 {
331 int irq = evtchn_to_irq[evtchn];
333 spin_lock(&irq_mapping_update_lock);
335 if ( --irq_bindcount[irq] == 0 )
336 {
337 evtchn_to_irq[evtchn] = -1;
338 irq_to_evtchn[irq] = -1;
339 }
341 spin_unlock(&irq_mapping_update_lock);
342 }
343 EXPORT_SYMBOL(unbind_evtchn_from_irq);
345 int bind_evtchn_to_irqhandler(
346 unsigned int evtchn,
347 irqreturn_t (*handler)(int, void *, struct pt_regs *),
348 unsigned long irqflags,
349 const char *devname,
350 void *dev_id)
351 {
352 unsigned int irq;
353 int retval;
355 irq = bind_evtchn_to_irq(evtchn);
356 retval = request_irq(irq, handler, irqflags, devname, dev_id);
357 if ( retval != 0 )
358 unbind_evtchn_from_irq(evtchn);
360 return retval;
361 }
362 EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
364 void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id)
365 {
366 unsigned int irq = evtchn_to_irq[evtchn];
367 free_irq(irq, dev_id);
368 unbind_evtchn_from_irq(evtchn);
369 }
370 EXPORT_SYMBOL(unbind_evtchn_from_irqhandler);
372 #ifdef CONFIG_SMP
373 static void do_nothing_function(void *ign)
374 {
375 }
376 #endif
378 /* Rebind an evtchn so that it gets delivered to a specific cpu */
379 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
380 {
381 evtchn_op_t op;
382 int evtchn;
384 spin_lock(&irq_mapping_update_lock);
385 evtchn = irq_to_evtchn[irq];
386 if (!VALID_EVTCHN(evtchn)) {
387 spin_unlock(&irq_mapping_update_lock);
388 return;
389 }
391 /* Tell Xen to send future instances of this interrupt to other vcpu. */
392 op.cmd = EVTCHNOP_bind_vcpu;
393 op.u.bind_vcpu.port = evtchn;
394 op.u.bind_vcpu.vcpu = tcpu;
396 /*
397 * If this fails, it usually just indicates that we're dealing with a virq
398 * or IPI channel, which don't actually need to be rebound. Ignore it,
399 * but don't do the xenlinux-level rebind in that case.
400 */
401 if (HYPERVISOR_event_channel_op(&op) >= 0)
402 bind_evtchn_to_cpu(evtchn, tcpu);
404 spin_unlock(&irq_mapping_update_lock);
406 /*
407 * Now send the new target processor a NOP IPI. When this returns, it
408 * will check for any pending interrupts, and so service any that got
409 * delivered to the wrong processor by mistake.
410 *
411 * XXX: The only time this is called with interrupts disabled is from the
412 * hotplug/hotunplug path. In that case, all cpus are stopped with
413 * interrupts disabled, and the missed interrupts will be picked up when
414 * they start again. This is kind of a hack.
415 */
416 if (!irqs_disabled())
417 smp_call_function(do_nothing_function, NULL, 0, 0);
418 }
421 static void set_affinity_irq(unsigned irq, cpumask_t dest)
422 {
423 unsigned tcpu = first_cpu(dest);
424 rebind_irq_to_cpu(irq, tcpu);
425 }
427 /*
428 * Interface to generic handling in irq.c
429 */
431 static unsigned int startup_dynirq(unsigned int irq)
432 {
433 int evtchn = irq_to_evtchn[irq];
435 if ( !VALID_EVTCHN(evtchn) )
436 return 0;
437 unmask_evtchn(evtchn);
438 return 0;
439 }
441 static void shutdown_dynirq(unsigned int irq)
442 {
443 int evtchn = irq_to_evtchn[irq];
445 if ( !VALID_EVTCHN(evtchn) )
446 return;
447 mask_evtchn(evtchn);
448 }
450 static void enable_dynirq(unsigned int irq)
451 {
452 int evtchn = irq_to_evtchn[irq];
454 unmask_evtchn(evtchn);
455 }
457 static void disable_dynirq(unsigned int irq)
458 {
459 int evtchn = irq_to_evtchn[irq];
461 mask_evtchn(evtchn);
462 }
464 static void ack_dynirq(unsigned int irq)
465 {
466 int evtchn = irq_to_evtchn[irq];
468 mask_evtchn(evtchn);
469 clear_evtchn(evtchn);
470 }
472 static void end_dynirq(unsigned int irq)
473 {
474 int evtchn = irq_to_evtchn[irq];
476 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
477 unmask_evtchn(evtchn);
478 }
480 static struct hw_interrupt_type dynirq_type = {
481 "Dynamic-irq",
482 startup_dynirq,
483 shutdown_dynirq,
484 enable_dynirq,
485 disable_dynirq,
486 ack_dynirq,
487 end_dynirq,
488 set_affinity_irq
489 };
491 static inline void pirq_unmask_notify(int pirq)
492 {
493 physdev_op_t op;
494 if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
495 {
496 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
497 (void)HYPERVISOR_physdev_op(&op);
498 }
499 }
501 static inline void pirq_query_unmask(int pirq)
502 {
503 physdev_op_t op;
504 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
505 op.u.irq_status_query.irq = pirq;
506 (void)HYPERVISOR_physdev_op(&op);
507 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
508 if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
509 set_bit(pirq, &pirq_needs_unmask_notify[0]);
510 }
512 /*
513 * On startup, if there is no action associated with the IRQ then we are
514 * probing. In this case we should not share with others as it will confuse us.
515 */
516 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
518 static unsigned int startup_pirq(unsigned int irq)
519 {
520 evtchn_op_t op;
521 int evtchn;
523 op.cmd = EVTCHNOP_bind_pirq;
524 op.u.bind_pirq.pirq = irq;
525 /* NB. We are happy to share unless we are probing. */
526 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
527 if ( HYPERVISOR_event_channel_op(&op) != 0 )
528 {
529 if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
530 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
531 return 0;
532 }
533 evtchn = op.u.bind_pirq.port;
535 pirq_query_unmask(irq_to_pirq(irq));
537 bind_evtchn_to_cpu(evtchn, 0);
538 evtchn_to_irq[evtchn] = irq;
539 irq_to_evtchn[irq] = evtchn;
541 unmask_evtchn(evtchn);
542 pirq_unmask_notify(irq_to_pirq(irq));
544 return 0;
545 }
547 static void shutdown_pirq(unsigned int irq)
548 {
549 evtchn_op_t op;
550 int evtchn = irq_to_evtchn[irq];
552 if ( !VALID_EVTCHN(evtchn) )
553 return;
555 mask_evtchn(evtchn);
557 op.cmd = EVTCHNOP_close;
558 op.u.close.dom = DOMID_SELF;
559 op.u.close.port = evtchn;
560 if ( HYPERVISOR_event_channel_op(&op) != 0 )
561 panic("Failed to unbind physical IRQ %d\n", irq);
563 bind_evtchn_to_cpu(evtchn, 0);
564 evtchn_to_irq[evtchn] = -1;
565 irq_to_evtchn[irq] = -1;
566 }
568 static void enable_pirq(unsigned int irq)
569 {
570 int evtchn = irq_to_evtchn[irq];
571 if ( !VALID_EVTCHN(evtchn) )
572 return;
573 unmask_evtchn(evtchn);
574 pirq_unmask_notify(irq_to_pirq(irq));
575 }
577 static void disable_pirq(unsigned int irq)
578 {
579 int evtchn = irq_to_evtchn[irq];
580 if ( !VALID_EVTCHN(evtchn) )
581 return;
582 mask_evtchn(evtchn);
583 }
585 static void ack_pirq(unsigned int irq)
586 {
587 int evtchn = irq_to_evtchn[irq];
588 if ( !VALID_EVTCHN(evtchn) )
589 return;
590 mask_evtchn(evtchn);
591 clear_evtchn(evtchn);
592 }
594 static void end_pirq(unsigned int irq)
595 {
596 int evtchn = irq_to_evtchn[irq];
597 if ( !VALID_EVTCHN(evtchn) )
598 return;
599 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
600 {
601 unmask_evtchn(evtchn);
602 pirq_unmask_notify(irq_to_pirq(irq));
603 }
604 }
606 static struct hw_interrupt_type pirq_type = {
607 "Phys-irq",
608 startup_pirq,
609 shutdown_pirq,
610 enable_pirq,
611 disable_pirq,
612 ack_pirq,
613 end_pirq,
614 set_affinity_irq
615 };
617 void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
618 {
619 int evtchn = irq_to_evtchn[i];
620 shared_info_t *s = HYPERVISOR_shared_info;
621 if ( !VALID_EVTCHN(evtchn) )
622 return;
623 BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
624 synch_set_bit(evtchn, &s->evtchn_pending[0]);
625 }
627 void irq_suspend(void)
628 {
629 int pirq, virq, irq, evtchn;
630 int cpu = smp_processor_id(); /* XXX */
632 /* Unbind VIRQs from event channels. */
633 for ( virq = 0; virq < NR_VIRQS; virq++ )
634 {
635 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
636 continue;
637 evtchn = irq_to_evtchn[irq];
639 /* Mark the event channel as unused in our table. */
640 evtchn_to_irq[evtchn] = -1;
641 irq_to_evtchn[irq] = -1;
642 }
644 /* Check that no PIRQs are still bound. */
645 for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
646 if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
647 panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
648 pirq, evtchn);
649 }
651 void irq_resume(void)
652 {
653 evtchn_op_t op;
654 int virq, irq, evtchn;
655 int cpu = smp_processor_id(); /* XXX */
657 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
658 mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
660 for ( virq = 0; virq < NR_VIRQS; virq++ )
661 {
662 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
663 continue;
665 /* Get a new binding from Xen. */
666 op.cmd = EVTCHNOP_bind_virq;
667 op.u.bind_virq.virq = virq;
668 if ( HYPERVISOR_event_channel_op(&op) != 0 )
669 panic("Failed to bind virtual IRQ %d\n", virq);
670 evtchn = op.u.bind_virq.port;
672 /* Record the new mapping. */
673 bind_evtchn_to_cpu(evtchn, 0);
674 evtchn_to_irq[evtchn] = irq;
675 irq_to_evtchn[irq] = evtchn;
677 /* Ready for use. */
678 unmask_evtchn(evtchn);
679 }
680 }
682 void __init init_IRQ(void)
683 {
684 int i;
685 int cpu;
687 irq_ctx_init(0);
689 spin_lock_init(&irq_mapping_update_lock);
691 #ifdef CONFIG_SMP
692 /* By default all event channels notify CPU#0. */
693 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
694 #endif
696 for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
697 /* No VIRQ -> IRQ mappings. */
698 for ( i = 0; i < NR_VIRQS; i++ )
699 per_cpu(virq_to_irq, cpu)[i] = -1;
700 }
702 /* No event-channel -> IRQ mappings. */
703 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
704 {
705 evtchn_to_irq[i] = -1;
706 mask_evtchn(i); /* No event channels are 'live' right now. */
707 }
709 /* No IRQ -> event-channel mappings. */
710 for ( i = 0; i < NR_IRQS; i++ )
711 irq_to_evtchn[i] = -1;
713 for ( i = 0; i < NR_DYNIRQS; i++ )
714 {
715 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
716 irq_bindcount[dynirq_to_irq(i)] = 0;
718 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
719 irq_desc[dynirq_to_irq(i)].action = 0;
720 irq_desc[dynirq_to_irq(i)].depth = 1;
721 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
722 }
724 for ( i = 0; i < NR_PIRQS; i++ )
725 {
726 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
727 irq_bindcount[pirq_to_irq(i)] = 1;
729 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
730 irq_desc[pirq_to_irq(i)].action = 0;
731 irq_desc[pirq_to_irq(i)].depth = 1;
732 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
733 }
734 }