ia64/xen-unstable

view linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c @ 5707:05b63285047c

Merge.
author sos22@douglas.cl.cam.ac.uk
date Fri Jul 08 17:38:38 2005 +0000 (2005-07-08)
parents 04d15727e6e8 9b73afea874e
children 215d8b2f3d94
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/irq.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/version.h>
37 #include <asm/atomic.h>
38 #include <asm/system.h>
39 #include <asm/ptrace.h>
40 #include <asm-xen/synch_bitops.h>
41 #include <asm-xen/xen-public/event_channel.h>
42 #include <asm-xen/xen-public/physdev.h>
43 #include <asm-xen/ctrl_if.h>
44 #include <asm-xen/hypervisor.h>
45 #include <asm-xen/evtchn.h>
47 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
48 EXPORT_SYMBOL(force_evtchn_callback);
49 EXPORT_SYMBOL(evtchn_do_upcall);
50 EXPORT_SYMBOL(bind_evtchn_to_irq);
51 EXPORT_SYMBOL(unbind_evtchn_from_irq);
52 #endif
54 /*
55 * This lock protects updates to the following mapping and reference-count
56 * arrays. The lock does not need to be acquired to read the mapping tables.
57 */
58 static spinlock_t irq_mapping_update_lock;
60 /* IRQ <-> event-channel mappings. */
61 static int evtchn_to_irq[NR_EVENT_CHANNELS];
62 static int irq_to_evtchn[NR_IRQS];
64 /* IRQ <-> VIRQ mapping. */
65 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
67 /* evtchn <-> IPI mapping. */
68 #ifndef NR_IPIS
69 #define NR_IPIS 1
70 #endif
71 DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
73 /* Reference counts for bindings to IRQs. */
74 static int irq_bindcount[NR_IRQS];
76 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
77 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
79 #ifdef CONFIG_SMP
81 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
82 static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
84 #define active_evtchns(cpu,sh,idx) \
85 ((sh)->evtchn_pending[idx] & \
86 cpu_evtchn_mask[cpu][idx] & \
87 ~(sh)->evtchn_mask[idx])
89 void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
90 {
91 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
92 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
93 cpu_evtchn[chn] = cpu;
94 }
96 #else
98 #define active_evtchns(cpu,sh,idx) \
99 ((sh)->evtchn_pending[idx] & \
100 ~(sh)->evtchn_mask[idx])
102 void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
103 {
104 }
105 #endif
107 /* Upcall to generic IRQ layer. */
108 #ifdef CONFIG_X86
109 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
110 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
111 #else
112 extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
113 #endif
114 #if defined (__i386__)
115 #define IRQ_REG orig_eax
116 #elif defined (__x86_64__)
117 #define IRQ_REG orig_rax
118 #endif
119 #define do_IRQ(irq, regs) do { \
120 (regs)->IRQ_REG = (irq); \
121 do_IRQ((regs)); \
122 } while (0)
123 #endif
125 #define VALID_EVTCHN(_chn) ((_chn) >= 0)
127 /*
128 * Force a proper event-channel callback from Xen after clearing the
129 * callback mask. We do this in a very simple manner, by making a call
130 * down into Xen. The pending flag will be checked by Xen on return.
131 */
132 void force_evtchn_callback(void)
133 {
134 (void)HYPERVISOR_xen_version(0);
135 }
137 /* NB. Interrupts are disabled on entry. */
138 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
139 {
140 u32 l1, l2;
141 unsigned int l1i, l2i, port;
142 int irq, cpu = smp_processor_id();
143 shared_info_t *s = HYPERVISOR_shared_info;
144 vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
146 vcpu_info->evtchn_upcall_pending = 0;
148 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
149 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
150 while ( l1 != 0 )
151 {
152 l1i = __ffs(l1);
153 l1 &= ~(1 << l1i);
155 while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
156 {
157 l2i = __ffs(l2);
158 l2 &= ~(1 << l2i);
160 port = (l1i << 5) + l2i;
161 if ( (irq = evtchn_to_irq[port]) != -1 )
162 do_IRQ(irq, regs);
163 else
164 evtchn_device_upcall(port);
165 }
166 }
167 }
169 static int find_unbound_irq(void)
170 {
171 int irq;
173 for ( irq = 0; irq < NR_IRQS; irq++ )
174 if ( irq_bindcount[irq] == 0 )
175 break;
177 if ( irq == NR_IRQS )
178 panic("No available IRQ to bind to: increase NR_IRQS!\n");
180 return irq;
181 }
183 int bind_virq_to_irq(int virq)
184 {
185 evtchn_op_t op;
186 int evtchn, irq;
187 int cpu = smp_processor_id();
189 spin_lock(&irq_mapping_update_lock);
191 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
192 {
193 op.cmd = EVTCHNOP_bind_virq;
194 op.u.bind_virq.virq = virq;
195 if ( HYPERVISOR_event_channel_op(&op) != 0 )
196 panic("Failed to bind virtual IRQ %d\n", virq);
197 evtchn = op.u.bind_virq.port;
199 irq = find_unbound_irq();
200 evtchn_to_irq[evtchn] = irq;
201 irq_to_evtchn[irq] = evtchn;
203 per_cpu(virq_to_irq, cpu)[virq] = irq;
205 bind_evtchn_to_cpu(evtchn, cpu);
206 }
208 irq_bindcount[irq]++;
210 spin_unlock(&irq_mapping_update_lock);
212 return irq;
213 }
215 void unbind_virq_from_irq(int virq)
216 {
217 evtchn_op_t op;
218 int cpu = smp_processor_id();
219 int irq = per_cpu(virq_to_irq, cpu)[virq];
220 int evtchn = irq_to_evtchn[irq];
222 spin_lock(&irq_mapping_update_lock);
224 if ( --irq_bindcount[irq] == 0 )
225 {
226 op.cmd = EVTCHNOP_close;
227 op.u.close.dom = DOMID_SELF;
228 op.u.close.port = evtchn;
229 if ( HYPERVISOR_event_channel_op(&op) != 0 )
230 panic("Failed to unbind virtual IRQ %d\n", virq);
232 /* This is a slight hack. Interdomain ports can be allocated
233 directly by userspace, and at that point they get bound by
234 Xen to vcpu 0. We therefore need to make sure that if we
235 get an event on an event channel we don't know about vcpu 0
236 handles it. Binding channels to vcpu 0 when closing them
237 achieves this. */
238 bind_evtchn_to_cpu(evtchn, 0);
239 evtchn_to_irq[evtchn] = -1;
240 irq_to_evtchn[irq] = -1;
241 per_cpu(virq_to_irq, cpu)[virq] = -1;
242 }
244 spin_unlock(&irq_mapping_update_lock);
245 }
247 int bind_ipi_on_cpu_to_irq(int ipi)
248 {
249 evtchn_op_t op;
250 int evtchn, irq;
251 int cpu = smp_processor_id();
253 spin_lock(&irq_mapping_update_lock);
255 if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
256 {
257 op.cmd = EVTCHNOP_bind_ipi;
258 if ( HYPERVISOR_event_channel_op(&op) != 0 )
259 panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
260 evtchn = op.u.bind_ipi.port;
262 irq = find_unbound_irq();
263 evtchn_to_irq[evtchn] = irq;
264 irq_to_evtchn[irq] = evtchn;
266 per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
268 bind_evtchn_to_cpu(evtchn, cpu);
269 }
270 else
271 {
272 irq = evtchn_to_irq[evtchn];
273 }
275 irq_bindcount[irq]++;
277 spin_unlock(&irq_mapping_update_lock);
279 return irq;
280 }
282 void unbind_ipi_from_irq(int ipi)
283 {
284 evtchn_op_t op;
285 int cpu = smp_processor_id();
286 int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
287 int irq = irq_to_evtchn[evtchn];
289 spin_lock(&irq_mapping_update_lock);
291 if ( --irq_bindcount[irq] == 0 )
292 {
293 op.cmd = EVTCHNOP_close;
294 op.u.close.dom = DOMID_SELF;
295 op.u.close.port = evtchn;
296 if ( HYPERVISOR_event_channel_op(&op) != 0 )
297 panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
299 /* See comments in unbind_virq_from_irq */
300 bind_evtchn_to_cpu(evtchn, 0);
301 evtchn_to_irq[evtchn] = -1;
302 irq_to_evtchn[irq] = -1;
303 per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
304 }
306 spin_unlock(&irq_mapping_update_lock);
307 }
309 int bind_evtchn_to_irq(int evtchn)
310 {
311 int irq;
313 spin_lock(&irq_mapping_update_lock);
315 if ( (irq = evtchn_to_irq[evtchn]) == -1 )
316 {
317 irq = find_unbound_irq();
318 evtchn_to_irq[evtchn] = irq;
319 irq_to_evtchn[irq] = evtchn;
320 }
322 irq_bindcount[irq]++;
324 spin_unlock(&irq_mapping_update_lock);
326 return irq;
327 }
329 void unbind_evtchn_from_irq(int evtchn)
330 {
331 int irq = evtchn_to_irq[evtchn];
333 spin_lock(&irq_mapping_update_lock);
335 if ( --irq_bindcount[irq] == 0 )
336 {
337 evtchn_to_irq[evtchn] = -1;
338 irq_to_evtchn[irq] = -1;
339 }
341 spin_unlock(&irq_mapping_update_lock);
342 }
344 static void do_nothing_function(void *ign)
345 {
346 }
348 /* Rebind an evtchn so that it gets delivered to a specific cpu */
349 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
350 {
351 evtchn_op_t op;
352 int evtchn;
354 spin_lock(&irq_mapping_update_lock);
355 evtchn = irq_to_evtchn[irq];
356 if (!VALID_EVTCHN(evtchn)) {
357 spin_unlock(&irq_mapping_update_lock);
358 return;
359 }
361 /* Tell Xen to send future instances of this interrupt to the
362 other vcpu */
363 op.cmd = EVTCHNOP_bind_vcpu;
364 op.u.bind_vcpu.port = evtchn;
365 op.u.bind_vcpu.vcpu = tcpu;
367 /* If this fails, it usually just indicates that we're dealing
368 with a virq or IPI channel, which don't actually need to be
369 rebound. Ignore it, but don't do the xenlinux-level rebind
370 in that case. */
371 if (HYPERVISOR_event_channel_op(&op) >= 0)
372 bind_evtchn_to_cpu(evtchn, tcpu);
374 spin_unlock(&irq_mapping_update_lock);
376 /* Now send the new target processor a NOP IPI. When this
377 returns, it will check for any pending interrupts, and so
378 service any that got delivered to the wrong processor by
379 mistake. */
380 /* XXX: The only time this is called with interrupts disabled is
381 from the hotplug/hotunplug path. In that case, all cpus are
382 stopped with interrupts disabled, and the missed interrupts
383 will be picked up when they start again. This is kind of a
384 hack.
385 */
386 if (!irqs_disabled()) {
387 smp_call_function(do_nothing_function, NULL, 0, 0);
388 }
389 }
392 static void set_affinity_irq(unsigned irq, cpumask_t dest)
393 {
394 unsigned tcpu = first_cpu(dest);
395 rebind_irq_to_cpu(irq, tcpu);
396 }
398 /*
399 * Interface to generic handling in irq.c
400 */
402 static unsigned int startup_dynirq(unsigned int irq)
403 {
404 int evtchn = irq_to_evtchn[irq];
406 if ( !VALID_EVTCHN(evtchn) )
407 return 0;
408 unmask_evtchn(evtchn);
409 return 0;
410 }
412 static void shutdown_dynirq(unsigned int irq)
413 {
414 int evtchn = irq_to_evtchn[irq];
416 if ( !VALID_EVTCHN(evtchn) )
417 return;
418 mask_evtchn(evtchn);
419 }
421 static void enable_dynirq(unsigned int irq)
422 {
423 int evtchn = irq_to_evtchn[irq];
425 unmask_evtchn(evtchn);
426 }
428 static void disable_dynirq(unsigned int irq)
429 {
430 int evtchn = irq_to_evtchn[irq];
432 mask_evtchn(evtchn);
433 }
435 static void ack_dynirq(unsigned int irq)
436 {
437 int evtchn = irq_to_evtchn[irq];
439 mask_evtchn(evtchn);
440 clear_evtchn(evtchn);
441 }
443 static void end_dynirq(unsigned int irq)
444 {
445 int evtchn = irq_to_evtchn[irq];
447 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
448 unmask_evtchn(evtchn);
449 }
451 static struct hw_interrupt_type dynirq_type = {
452 "Dynamic-irq",
453 startup_dynirq,
454 shutdown_dynirq,
455 enable_dynirq,
456 disable_dynirq,
457 ack_dynirq,
458 end_dynirq,
459 set_affinity_irq
460 };
462 static inline void pirq_unmask_notify(int pirq)
463 {
464 physdev_op_t op;
465 if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
466 {
467 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
468 (void)HYPERVISOR_physdev_op(&op);
469 }
470 }
472 static inline void pirq_query_unmask(int pirq)
473 {
474 physdev_op_t op;
475 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
476 op.u.irq_status_query.irq = pirq;
477 (void)HYPERVISOR_physdev_op(&op);
478 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
479 if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
480 set_bit(pirq, &pirq_needs_unmask_notify[0]);
481 }
483 /*
484 * On startup, if there is no action associated with the IRQ then we are
485 * probing. In this case we should not share with others as it will confuse us.
486 */
487 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
489 static unsigned int startup_pirq(unsigned int irq)
490 {
491 evtchn_op_t op;
492 int evtchn;
494 op.cmd = EVTCHNOP_bind_pirq;
495 op.u.bind_pirq.pirq = irq;
496 /* NB. We are happy to share unless we are probing. */
497 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
498 if ( HYPERVISOR_event_channel_op(&op) != 0 )
499 {
500 if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
501 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
502 return 0;
503 }
504 evtchn = op.u.bind_pirq.port;
506 pirq_query_unmask(irq_to_pirq(irq));
508 bind_evtchn_to_cpu(evtchn, 0);
509 evtchn_to_irq[evtchn] = irq;
510 irq_to_evtchn[irq] = evtchn;
512 unmask_evtchn(evtchn);
513 pirq_unmask_notify(irq_to_pirq(irq));
515 return 0;
516 }
518 static void shutdown_pirq(unsigned int irq)
519 {
520 evtchn_op_t op;
521 int evtchn = irq_to_evtchn[irq];
523 if ( !VALID_EVTCHN(evtchn) )
524 return;
526 mask_evtchn(evtchn);
528 op.cmd = EVTCHNOP_close;
529 op.u.close.dom = DOMID_SELF;
530 op.u.close.port = evtchn;
531 if ( HYPERVISOR_event_channel_op(&op) != 0 )
532 panic("Failed to unbind physical IRQ %d\n", irq);
534 bind_evtchn_to_cpu(evtchn, 0);
535 evtchn_to_irq[evtchn] = -1;
536 irq_to_evtchn[irq] = -1;
537 }
539 static void enable_pirq(unsigned int irq)
540 {
541 int evtchn = irq_to_evtchn[irq];
542 if ( !VALID_EVTCHN(evtchn) )
543 return;
544 unmask_evtchn(evtchn);
545 pirq_unmask_notify(irq_to_pirq(irq));
546 }
548 static void disable_pirq(unsigned int irq)
549 {
550 int evtchn = irq_to_evtchn[irq];
551 if ( !VALID_EVTCHN(evtchn) )
552 return;
553 mask_evtchn(evtchn);
554 }
556 static void ack_pirq(unsigned int irq)
557 {
558 int evtchn = irq_to_evtchn[irq];
559 if ( !VALID_EVTCHN(evtchn) )
560 return;
561 mask_evtchn(evtchn);
562 clear_evtchn(evtchn);
563 }
565 static void end_pirq(unsigned int irq)
566 {
567 int evtchn = irq_to_evtchn[irq];
568 if ( !VALID_EVTCHN(evtchn) )
569 return;
570 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
571 {
572 unmask_evtchn(evtchn);
573 pirq_unmask_notify(irq_to_pirq(irq));
574 }
575 }
577 static struct hw_interrupt_type pirq_type = {
578 "Phys-irq",
579 startup_pirq,
580 shutdown_pirq,
581 enable_pirq,
582 disable_pirq,
583 ack_pirq,
584 end_pirq,
585 set_affinity_irq
586 };
588 void irq_suspend(void)
589 {
590 int pirq, virq, irq, evtchn;
591 int cpu = smp_processor_id(); /* XXX */
593 /* Unbind VIRQs from event channels. */
594 for ( virq = 0; virq < NR_VIRQS; virq++ )
595 {
596 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
597 continue;
598 evtchn = irq_to_evtchn[irq];
600 /* Mark the event channel as unused in our table. */
601 evtchn_to_irq[evtchn] = -1;
602 irq_to_evtchn[irq] = -1;
603 }
605 /* Check that no PIRQs are still bound. */
606 for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
607 if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
608 panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
609 pirq, evtchn);
610 }
612 void irq_resume(void)
613 {
614 evtchn_op_t op;
615 int virq, irq, evtchn;
616 int cpu = smp_processor_id(); /* XXX */
618 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
619 mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
621 for ( virq = 0; virq < NR_VIRQS; virq++ )
622 {
623 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
624 continue;
626 /* Get a new binding from Xen. */
627 op.cmd = EVTCHNOP_bind_virq;
628 op.u.bind_virq.virq = virq;
629 if ( HYPERVISOR_event_channel_op(&op) != 0 )
630 panic("Failed to bind virtual IRQ %d\n", virq);
631 evtchn = op.u.bind_virq.port;
633 /* Record the new mapping. */
634 bind_evtchn_to_cpu(evtchn, 0);
635 evtchn_to_irq[evtchn] = irq;
636 irq_to_evtchn[irq] = evtchn;
638 /* Ready for use. */
639 unmask_evtchn(evtchn);
640 }
641 }
643 void __init init_IRQ(void)
644 {
645 int i;
646 int cpu;
648 irq_ctx_init(0);
650 spin_lock_init(&irq_mapping_update_lock);
652 #ifdef CONFIG_SMP
653 /* By default all event channels notify CPU#0. */
654 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
655 #endif
657 for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
658 /* No VIRQ -> IRQ mappings. */
659 for ( i = 0; i < NR_VIRQS; i++ )
660 per_cpu(virq_to_irq, cpu)[i] = -1;
661 }
663 /* No event-channel -> IRQ mappings. */
664 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
665 {
666 evtchn_to_irq[i] = -1;
667 mask_evtchn(i); /* No event channels are 'live' right now. */
668 }
670 /* No IRQ -> event-channel mappings. */
671 for ( i = 0; i < NR_IRQS; i++ )
672 irq_to_evtchn[i] = -1;
674 for ( i = 0; i < NR_DYNIRQS; i++ )
675 {
676 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
677 irq_bindcount[dynirq_to_irq(i)] = 0;
679 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
680 irq_desc[dynirq_to_irq(i)].action = 0;
681 irq_desc[dynirq_to_irq(i)].depth = 1;
682 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
683 }
685 for ( i = 0; i < NR_PIRQS; i++ )
686 {
687 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
688 irq_bindcount[pirq_to_irq(i)] = 1;
690 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
691 irq_desc[pirq_to_irq(i)].action = 0;
692 irq_desc[pirq_to_irq(i)].depth = 1;
693 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
694 }
696 /* This needs to be done early, but after the IRQ subsystem is alive. */
697 ctrl_if_init();
698 }