ia64/xen-unstable

view linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c @ 5691:88c2d410979f

I updated the vcpu_to_cpu string creation to include a field separator,
which gets rid of the -1 -> # hack and works for cpus > 9.

I ran into some issues with stale vcpu_to_cpu lists when running the
hotplug subprogram. I would take a vcpu offline, and then issue the
command to bring it back and the vcpu_to_cpu list would not have changed
to indicate the the vcpu actually went down. If I injected a xm list -v
(which always showed the correct mapping) then subsequent hotplug
commands would see the state change and fire off the hotplug request. I
don't know that not sending the event when not changing state saves that
much work so I took the state check out and now just send the hotplug
event directly.

> Also the whole hotplug stuff is still missing interrupt re-routing
> when a vcpu is taken down. To do this, we need an evtchn operation to
> change the vcpu affinity of a port by changing notify_vcpu_id.

I don't fully understand all of the mappings that are happening, so this
part of the patch might be way off. In any case, I've added a new
evtchn op to set the notify_vcpu_id field of a channel. I updated the
HOTPLUG_CPU code to use the new routines when bringing cpus up and down.
When taking down a cpu, I route the IPI irq channels to CPU 0, and when
the cpu comes up, it re-routes the channels back to the awakened CPU.

From: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: ian@xensource.com
author iap10@freefall.cl.cam.ac.uk
date Wed Jul 06 22:23:18 2005 +0000 (2005-07-06)
parents 999293916aa7
children 707fcf42a5ae 579d1e771025 c1a7ed266c7e
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/irq.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/version.h>
37 #include <asm/atomic.h>
38 #include <asm/system.h>
39 #include <asm/ptrace.h>
40 #include <asm-xen/synch_bitops.h>
41 #include <asm-xen/xen-public/event_channel.h>
42 #include <asm-xen/xen-public/physdev.h>
43 #include <asm-xen/ctrl_if.h>
44 #include <asm-xen/hypervisor.h>
45 #include <asm-xen/evtchn.h>
47 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
48 EXPORT_SYMBOL(force_evtchn_callback);
49 EXPORT_SYMBOL(evtchn_do_upcall);
50 EXPORT_SYMBOL(bind_evtchn_to_irq);
51 EXPORT_SYMBOL(unbind_evtchn_from_irq);
52 #endif
54 /*
55 * This lock protects updates to the following mapping and reference-count
56 * arrays. The lock does not need to be acquired to read the mapping tables.
57 */
58 static spinlock_t irq_mapping_update_lock;
60 /* IRQ <-> event-channel mappings. */
61 static int evtchn_to_irq[NR_EVENT_CHANNELS];
62 static int irq_to_evtchn[NR_IRQS];
64 /* IRQ <-> VIRQ mapping. */
65 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
67 /* evtchn <-> IPI mapping. */
68 #ifndef NR_IPIS
69 #define NR_IPIS 1
70 #endif
71 DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
73 /* Reference counts for bindings to IRQs. */
74 static int irq_bindcount[NR_IRQS];
76 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
77 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
79 #ifdef CONFIG_SMP
81 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
82 static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
84 #define active_evtchns(cpu,sh,idx) \
85 ((sh)->evtchn_pending[idx] & \
86 cpu_evtchn_mask[cpu][idx] & \
87 ~(sh)->evtchn_mask[idx])
89 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
90 {
91 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
92 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
93 cpu_evtchn[chn] = cpu;
94 }
96 #else
98 #define active_evtchns(cpu,sh,idx) \
99 ((sh)->evtchn_pending[idx] & \
100 ~(sh)->evtchn_mask[idx])
102 #define bind_evtchn_to_cpu(chn,cpu) ((void)0)
104 #endif
106 /* Upcall to generic IRQ layer. */
107 #ifdef CONFIG_X86
108 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
109 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
110 #else
111 extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
112 #endif
113 #if defined (__i386__)
114 #define IRQ_REG orig_eax
115 #elif defined (__x86_64__)
116 #define IRQ_REG orig_rax
117 #endif
118 #define do_IRQ(irq, regs) do { \
119 (regs)->IRQ_REG = (irq); \
120 do_IRQ((regs)); \
121 } while (0)
122 #endif
124 #define VALID_EVTCHN(_chn) ((_chn) >= 0)
126 /*
127 * Force a proper event-channel callback from Xen after clearing the
128 * callback mask. We do this in a very simple manner, by making a call
129 * down into Xen. The pending flag will be checked by Xen on return.
130 */
131 void force_evtchn_callback(void)
132 {
133 (void)HYPERVISOR_xen_version(0);
134 }
136 /* NB. Interrupts are disabled on entry. */
137 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
138 {
139 u32 l1, l2;
140 unsigned int l1i, l2i, port;
141 int irq, cpu = smp_processor_id();
142 shared_info_t *s = HYPERVISOR_shared_info;
143 vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
145 vcpu_info->evtchn_upcall_pending = 0;
147 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
148 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
149 while ( l1 != 0 )
150 {
151 l1i = __ffs(l1);
152 l1 &= ~(1 << l1i);
154 while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
155 {
156 l2i = __ffs(l2);
157 l2 &= ~(1 << l2i);
159 port = (l1i << 5) + l2i;
160 if ( (irq = evtchn_to_irq[port]) != -1 )
161 do_IRQ(irq, regs);
162 else
163 evtchn_device_upcall(port);
164 }
165 }
166 }
168 static int find_unbound_irq(void)
169 {
170 int irq;
172 for ( irq = 0; irq < NR_IRQS; irq++ )
173 if ( irq_bindcount[irq] == 0 )
174 break;
176 if ( irq == NR_IRQS )
177 panic("No available IRQ to bind to: increase NR_IRQS!\n");
179 return irq;
180 }
182 int bind_virq_to_irq(int virq)
183 {
184 evtchn_op_t op;
185 int evtchn, irq;
186 int cpu = smp_processor_id();
188 spin_lock(&irq_mapping_update_lock);
190 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
191 {
192 op.cmd = EVTCHNOP_bind_virq;
193 op.u.bind_virq.virq = virq;
194 if ( HYPERVISOR_event_channel_op(&op) != 0 )
195 panic("Failed to bind virtual IRQ %d\n", virq);
196 evtchn = op.u.bind_virq.port;
198 irq = find_unbound_irq();
199 evtchn_to_irq[evtchn] = irq;
200 irq_to_evtchn[irq] = evtchn;
202 per_cpu(virq_to_irq, cpu)[virq] = irq;
204 bind_evtchn_to_cpu(evtchn, cpu);
205 }
207 irq_bindcount[irq]++;
209 spin_unlock(&irq_mapping_update_lock);
211 return irq;
212 }
214 void unbind_virq_from_irq(int virq)
215 {
216 evtchn_op_t op;
217 int cpu = smp_processor_id();
218 int irq = per_cpu(virq_to_irq, cpu)[virq];
219 int evtchn = irq_to_evtchn[irq];
221 spin_lock(&irq_mapping_update_lock);
223 if ( --irq_bindcount[irq] == 0 )
224 {
225 op.cmd = EVTCHNOP_close;
226 op.u.close.dom = DOMID_SELF;
227 op.u.close.port = evtchn;
228 if ( HYPERVISOR_event_channel_op(&op) != 0 )
229 panic("Failed to unbind virtual IRQ %d\n", virq);
231 evtchn_to_irq[evtchn] = -1;
232 irq_to_evtchn[irq] = -1;
233 per_cpu(virq_to_irq, cpu)[virq] = -1;
234 }
236 spin_unlock(&irq_mapping_update_lock);
237 }
239 int bind_ipi_on_cpu_to_irq(int cpu, int ipi)
240 {
241 evtchn_op_t op;
242 int evtchn, irq;
244 spin_lock(&irq_mapping_update_lock);
246 if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
247 {
248 op.cmd = EVTCHNOP_bind_ipi;
249 op.u.bind_ipi.ipi_vcpu = cpu;
250 if ( HYPERVISOR_event_channel_op(&op) != 0 )
251 panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
252 evtchn = op.u.bind_ipi.port;
254 irq = find_unbound_irq();
255 evtchn_to_irq[evtchn] = irq;
256 irq_to_evtchn[irq] = evtchn;
258 per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
260 bind_evtchn_to_cpu(evtchn, cpu);
261 }
262 else
263 {
264 irq = evtchn_to_irq[evtchn];
265 }
267 irq_bindcount[irq]++;
269 spin_unlock(&irq_mapping_update_lock);
271 return irq;
272 }
274 void rebind_evtchn_from_ipi(int cpu, int newcpu, int ipi)
275 {
276 evtchn_op_t op;
277 int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
279 spin_lock(&irq_mapping_update_lock);
281 op.cmd = EVTCHNOP_rebind;
282 op.u.rebind.port = evtchn;
283 op.u.rebind.vcpu = newcpu;
284 if ( HYPERVISOR_event_channel_op(&op) != 0 )
285 printk(KERN_INFO "Failed to rebind IPI%d to CPU%d\n",ipi,newcpu);
287 spin_unlock(&irq_mapping_update_lock);
288 }
290 void rebind_evtchn_from_irq(int cpu, int newcpu, int irq)
291 {
292 evtchn_op_t op;
293 int evtchn = irq_to_evtchn[irq];
295 spin_lock(&irq_mapping_update_lock);
297 op.cmd = EVTCHNOP_rebind;
298 op.u.rebind.port = evtchn;
299 op.u.rebind.vcpu = newcpu;
300 if ( HYPERVISOR_event_channel_op(&op) != 0 )
301 printk(KERN_INFO "Failed to rebind IRQ%d to CPU%d\n",irq,newcpu);
303 spin_unlock(&irq_mapping_update_lock);
304 }
306 void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
307 {
308 evtchn_op_t op;
309 int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
310 int irq = irq_to_evtchn[evtchn];
312 spin_lock(&irq_mapping_update_lock);
314 if ( --irq_bindcount[irq] == 0 )
315 {
316 op.cmd = EVTCHNOP_close;
317 op.u.close.dom = DOMID_SELF;
318 op.u.close.port = evtchn;
319 if ( HYPERVISOR_event_channel_op(&op) != 0 )
320 panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
322 evtchn_to_irq[evtchn] = -1;
323 irq_to_evtchn[irq] = -1;
324 per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
325 }
327 spin_unlock(&irq_mapping_update_lock);
328 }
330 int bind_evtchn_to_irq(int evtchn)
331 {
332 int irq;
334 spin_lock(&irq_mapping_update_lock);
336 if ( (irq = evtchn_to_irq[evtchn]) == -1 )
337 {
338 irq = find_unbound_irq();
339 evtchn_to_irq[evtchn] = irq;
340 irq_to_evtchn[irq] = evtchn;
341 }
343 irq_bindcount[irq]++;
345 spin_unlock(&irq_mapping_update_lock);
347 return irq;
348 }
350 void unbind_evtchn_from_irq(int evtchn)
351 {
352 int irq = evtchn_to_irq[evtchn];
354 spin_lock(&irq_mapping_update_lock);
356 if ( --irq_bindcount[irq] == 0 )
357 {
358 evtchn_to_irq[evtchn] = -1;
359 irq_to_evtchn[irq] = -1;
360 }
362 spin_unlock(&irq_mapping_update_lock);
363 }
366 /*
367 * Interface to generic handling in irq.c
368 */
370 static unsigned int startup_dynirq(unsigned int irq)
371 {
372 int evtchn = irq_to_evtchn[irq];
374 if ( !VALID_EVTCHN(evtchn) )
375 return 0;
376 unmask_evtchn(evtchn);
377 return 0;
378 }
380 static void shutdown_dynirq(unsigned int irq)
381 {
382 int evtchn = irq_to_evtchn[irq];
384 if ( !VALID_EVTCHN(evtchn) )
385 return;
386 mask_evtchn(evtchn);
387 }
389 static void enable_dynirq(unsigned int irq)
390 {
391 int evtchn = irq_to_evtchn[irq];
393 unmask_evtchn(evtchn);
394 }
396 static void disable_dynirq(unsigned int irq)
397 {
398 int evtchn = irq_to_evtchn[irq];
400 mask_evtchn(evtchn);
401 }
403 static void ack_dynirq(unsigned int irq)
404 {
405 int evtchn = irq_to_evtchn[irq];
407 mask_evtchn(evtchn);
408 clear_evtchn(evtchn);
409 }
411 static void end_dynirq(unsigned int irq)
412 {
413 int evtchn = irq_to_evtchn[irq];
415 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
416 unmask_evtchn(evtchn);
417 }
419 static struct hw_interrupt_type dynirq_type = {
420 "Dynamic-irq",
421 startup_dynirq,
422 shutdown_dynirq,
423 enable_dynirq,
424 disable_dynirq,
425 ack_dynirq,
426 end_dynirq,
427 NULL
428 };
430 static inline void pirq_unmask_notify(int pirq)
431 {
432 physdev_op_t op;
433 if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
434 {
435 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
436 (void)HYPERVISOR_physdev_op(&op);
437 }
438 }
440 static inline void pirq_query_unmask(int pirq)
441 {
442 physdev_op_t op;
443 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
444 op.u.irq_status_query.irq = pirq;
445 (void)HYPERVISOR_physdev_op(&op);
446 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
447 if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
448 set_bit(pirq, &pirq_needs_unmask_notify[0]);
449 }
451 /*
452 * On startup, if there is no action associated with the IRQ then we are
453 * probing. In this case we should not share with others as it will confuse us.
454 */
455 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
457 static unsigned int startup_pirq(unsigned int irq)
458 {
459 evtchn_op_t op;
460 int evtchn;
462 op.cmd = EVTCHNOP_bind_pirq;
463 op.u.bind_pirq.pirq = irq;
464 /* NB. We are happy to share unless we are probing. */
465 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
466 if ( HYPERVISOR_event_channel_op(&op) != 0 )
467 {
468 if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
469 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
470 return 0;
471 }
472 evtchn = op.u.bind_pirq.port;
474 pirq_query_unmask(irq_to_pirq(irq));
476 evtchn_to_irq[evtchn] = irq;
477 irq_to_evtchn[irq] = evtchn;
479 unmask_evtchn(evtchn);
480 pirq_unmask_notify(irq_to_pirq(irq));
482 return 0;
483 }
485 static void shutdown_pirq(unsigned int irq)
486 {
487 evtchn_op_t op;
488 int evtchn = irq_to_evtchn[irq];
490 if ( !VALID_EVTCHN(evtchn) )
491 return;
493 mask_evtchn(evtchn);
495 op.cmd = EVTCHNOP_close;
496 op.u.close.dom = DOMID_SELF;
497 op.u.close.port = evtchn;
498 if ( HYPERVISOR_event_channel_op(&op) != 0 )
499 panic("Failed to unbind physical IRQ %d\n", irq);
501 evtchn_to_irq[evtchn] = -1;
502 irq_to_evtchn[irq] = -1;
503 }
505 static void enable_pirq(unsigned int irq)
506 {
507 int evtchn = irq_to_evtchn[irq];
508 if ( !VALID_EVTCHN(evtchn) )
509 return;
510 unmask_evtchn(evtchn);
511 pirq_unmask_notify(irq_to_pirq(irq));
512 }
514 static void disable_pirq(unsigned int irq)
515 {
516 int evtchn = irq_to_evtchn[irq];
517 if ( !VALID_EVTCHN(evtchn) )
518 return;
519 mask_evtchn(evtchn);
520 }
522 static void ack_pirq(unsigned int irq)
523 {
524 int evtchn = irq_to_evtchn[irq];
525 if ( !VALID_EVTCHN(evtchn) )
526 return;
527 mask_evtchn(evtchn);
528 clear_evtchn(evtchn);
529 }
531 static void end_pirq(unsigned int irq)
532 {
533 int evtchn = irq_to_evtchn[irq];
534 if ( !VALID_EVTCHN(evtchn) )
535 return;
536 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
537 {
538 unmask_evtchn(evtchn);
539 pirq_unmask_notify(irq_to_pirq(irq));
540 }
541 }
543 static struct hw_interrupt_type pirq_type = {
544 "Phys-irq",
545 startup_pirq,
546 shutdown_pirq,
547 enable_pirq,
548 disable_pirq,
549 ack_pirq,
550 end_pirq,
551 NULL
552 };
554 void irq_suspend(void)
555 {
556 int pirq, virq, irq, evtchn;
557 int cpu = smp_processor_id(); /* XXX */
559 /* Unbind VIRQs from event channels. */
560 for ( virq = 0; virq < NR_VIRQS; virq++ )
561 {
562 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
563 continue;
564 evtchn = irq_to_evtchn[irq];
566 /* Mark the event channel as unused in our table. */
567 evtchn_to_irq[evtchn] = -1;
568 irq_to_evtchn[irq] = -1;
569 }
571 /* Check that no PIRQs are still bound. */
572 for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
573 if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
574 panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
575 pirq, evtchn);
576 }
578 void irq_resume(void)
579 {
580 evtchn_op_t op;
581 int virq, irq, evtchn;
582 int cpu = smp_processor_id(); /* XXX */
584 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
585 mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
587 for ( virq = 0; virq < NR_VIRQS; virq++ )
588 {
589 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
590 continue;
592 /* Get a new binding from Xen. */
593 op.cmd = EVTCHNOP_bind_virq;
594 op.u.bind_virq.virq = virq;
595 if ( HYPERVISOR_event_channel_op(&op) != 0 )
596 panic("Failed to bind virtual IRQ %d\n", virq);
597 evtchn = op.u.bind_virq.port;
599 /* Record the new mapping. */
600 evtchn_to_irq[evtchn] = irq;
601 irq_to_evtchn[irq] = evtchn;
603 /* Ready for use. */
604 unmask_evtchn(evtchn);
605 }
606 }
608 void __init init_IRQ(void)
609 {
610 int i;
611 int cpu;
613 irq_ctx_init(0);
615 spin_lock_init(&irq_mapping_update_lock);
617 #ifdef CONFIG_SMP
618 /* By default all event channels notify CPU#0. */
619 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
620 #endif
622 for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
623 /* No VIRQ -> IRQ mappings. */
624 for ( i = 0; i < NR_VIRQS; i++ )
625 per_cpu(virq_to_irq, cpu)[i] = -1;
626 }
628 /* No event-channel -> IRQ mappings. */
629 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
630 {
631 evtchn_to_irq[i] = -1;
632 mask_evtchn(i); /* No event channels are 'live' right now. */
633 }
635 /* No IRQ -> event-channel mappings. */
636 for ( i = 0; i < NR_IRQS; i++ )
637 irq_to_evtchn[i] = -1;
639 for ( i = 0; i < NR_DYNIRQS; i++ )
640 {
641 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
642 irq_bindcount[dynirq_to_irq(i)] = 0;
644 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
645 irq_desc[dynirq_to_irq(i)].action = 0;
646 irq_desc[dynirq_to_irq(i)].depth = 1;
647 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
648 }
650 for ( i = 0; i < NR_PIRQS; i++ )
651 {
652 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
653 irq_bindcount[pirq_to_irq(i)] = 1;
655 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
656 irq_desc[pirq_to_irq(i)].action = 0;
657 irq_desc[pirq_to_irq(i)].depth = 1;
658 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
659 }
661 /* This needs to be done early, but after the IRQ subsystem is alive. */
662 ctrl_if_init();
663 }