ia64/xen-unstable

view xen/arch/x86/io_apic.c @ 9737:3c1cd09801c0

Clean up new EOI ack method some more and fix unbinding
IRQ from guest (penidng EOIs must be forcibly flushed).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Apr 16 15:04:21 2006 +0100 (2006-04-16)
parents b39365343de0
children c76daba31026
line source
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
23 #include <xen/config.h>
24 #include <xen/lib.h>
25 #include <xen/init.h>
26 #include <xen/irq.h>
27 #include <xen/delay.h>
28 #include <xen/sched.h>
29 #include <xen/acpi.h>
30 #include <xen/keyhandler.h>
31 #include <asm/io.h>
32 #include <asm/mc146818rtc.h>
33 #include <asm/smp.h>
34 #include <asm/desc.h>
35 #include <mach_apic.h>
36 #include <io_ports.h>
38 #define set_irq_info(irq, mask) ((void)0)
39 #define set_native_irq_info(irq, mask) ((void)0)
41 /* Different to Linux: our implementation can be simpler. */
42 #define make_8259A_irq(irq) (io_apic_irqs &= ~(1<<(irq)))
44 int (*ioapic_renumber_irq)(int ioapic, int irq);
45 atomic_t irq_mis_count;
47 /* Where if anywhere is the i8259 connect in external int mode */
48 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
50 static DEFINE_SPINLOCK(ioapic_lock);
52 int skip_ioapic_setup;
54 /*
55 * # of IRQ routing registers
56 */
57 int nr_ioapic_registers[MAX_IO_APICS];
59 int disable_timer_pin_1 __initdata;
61 /*
62 * Rough estimation of how many shared IRQs there are, can
63 * be changed anytime.
64 */
65 #define MAX_PLUS_SHARED_IRQS NR_IRQS
66 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
68 /*
69 * This is performance-critical, we want to do it O(1)
70 *
71 * the indexing order of this array favors 1:1 mappings
72 * between pins and IRQs.
73 */
75 static struct irq_pin_list {
76 int apic, pin, next;
77 } irq_2_pin[PIN_MAP_SIZE];
78 static int irq_2_pin_free_entry = NR_IRQS;
80 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
82 /*
83 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
84 * shared ISA-space IRQs, so we have to support them. We are super
85 * fast in the common case, and fast for shared ISA-space IRQs.
86 */
87 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
88 {
89 struct irq_pin_list *entry = irq_2_pin + irq;
91 while (entry->next) {
92 BUG_ON((entry->apic == apic) && (entry->pin == pin));
93 entry = irq_2_pin + entry->next;
94 }
96 BUG_ON((entry->apic == apic) && (entry->pin == pin));
98 if (entry->pin != -1) {
99 if (irq_2_pin_free_entry >= PIN_MAP_SIZE)
100 panic("io_apic.c: whoops");
101 entry->next = irq_2_pin_free_entry;
102 entry = irq_2_pin + entry->next;
103 irq_2_pin_free_entry = entry->next;
104 entry->next = 0;
105 }
106 entry->apic = apic;
107 entry->pin = pin;
108 }
110 static void remove_pin_at_irq(unsigned int irq, int apic, int pin)
111 {
112 struct irq_pin_list *entry, *prev;
114 for (entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next]) {
115 if ((entry->apic == apic) && (entry->pin == pin))
116 break;
117 if (!entry->next)
118 BUG();
119 }
121 entry->pin = entry->apic = -1;
123 if (entry != &irq_2_pin[irq]) {
124 /* Removed entry is not at head of list. */
125 prev = &irq_2_pin[irq];
126 while (&irq_2_pin[prev->next] != entry)
127 prev = &irq_2_pin[prev->next];
128 prev->next = entry->next;
129 entry->next = irq_2_pin_free_entry;
130 irq_2_pin_free_entry = entry - irq_2_pin;
131 } else if (entry->next != 0) {
132 /* Removed entry is at head of multi-item list. */
133 prev = entry;
134 entry = &irq_2_pin[entry->next];
135 *prev = *entry;
136 entry->pin = entry->apic = -1;
137 entry->next = irq_2_pin_free_entry;
138 irq_2_pin_free_entry = entry - irq_2_pin;
139 }
140 }
142 /*
143 * Reroute an IRQ to a different pin.
144 */
145 static void __init replace_pin_at_irq(unsigned int irq,
146 int oldapic, int oldpin,
147 int newapic, int newpin)
148 {
149 struct irq_pin_list *entry = irq_2_pin + irq;
151 while (1) {
152 if (entry->apic == oldapic && entry->pin == oldpin) {
153 entry->apic = newapic;
154 entry->pin = newpin;
155 }
156 if (!entry->next)
157 break;
158 entry = irq_2_pin + entry->next;
159 }
160 }
162 static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
163 {
164 struct irq_pin_list *entry = irq_2_pin + irq;
165 unsigned int pin, reg;
167 for (;;) {
168 pin = entry->pin;
169 if (pin == -1)
170 break;
171 reg = io_apic_read(entry->apic, 0x10 + pin*2);
172 reg &= ~disable;
173 reg |= enable;
174 io_apic_modify(entry->apic, 0x10 + pin*2, reg);
175 if (!entry->next)
176 break;
177 entry = irq_2_pin + entry->next;
178 }
179 }
181 /* mask = 1 */
182 static void __mask_IO_APIC_irq (unsigned int irq)
183 {
184 __modify_IO_APIC_irq(irq, 0x00010000, 0);
185 }
187 /* mask = 0 */
188 static void __unmask_IO_APIC_irq (unsigned int irq)
189 {
190 __modify_IO_APIC_irq(irq, 0, 0x00010000);
191 }
193 /* trigger = 0 */
194 static void __edge_IO_APIC_irq (unsigned int irq)
195 {
196 __modify_IO_APIC_irq(irq, 0, 0x00008000);
197 }
199 /* trigger = 1 */
200 static void __level_IO_APIC_irq (unsigned int irq)
201 {
202 __modify_IO_APIC_irq(irq, 0x00008000, 0);
203 }
205 static void mask_IO_APIC_irq (unsigned int irq)
206 {
207 unsigned long flags;
209 spin_lock_irqsave(&ioapic_lock, flags);
210 __mask_IO_APIC_irq(irq);
211 spin_unlock_irqrestore(&ioapic_lock, flags);
212 }
214 static void unmask_IO_APIC_irq (unsigned int irq)
215 {
216 unsigned long flags;
218 spin_lock_irqsave(&ioapic_lock, flags);
219 __unmask_IO_APIC_irq(irq);
220 spin_unlock_irqrestore(&ioapic_lock, flags);
221 }
223 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
224 {
225 struct IO_APIC_route_entry entry;
226 unsigned long flags;
228 /* Check delivery_mode to be sure we're not clearing an SMI pin */
229 spin_lock_irqsave(&ioapic_lock, flags);
230 *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
231 *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
232 spin_unlock_irqrestore(&ioapic_lock, flags);
233 if (entry.delivery_mode == dest_SMI)
234 return;
236 /*
237 * Disable it in the IO-APIC irq-routing table:
238 */
239 memset(&entry, 0, sizeof(entry));
240 entry.mask = 1;
241 spin_lock_irqsave(&ioapic_lock, flags);
242 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
243 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
244 spin_unlock_irqrestore(&ioapic_lock, flags);
245 }
247 static void clear_IO_APIC (void)
248 {
249 int apic, pin;
251 for (apic = 0; apic < nr_ioapics; apic++)
252 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
253 clear_IO_APIC_pin(apic, pin);
254 }
256 #ifdef CONFIG_SMP
257 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
258 {
259 unsigned long flags;
260 int pin;
261 struct irq_pin_list *entry = irq_2_pin + irq;
262 unsigned int apicid_value;
263 cpumask_t tmp;
265 cpus_and(tmp, cpumask, cpu_online_map);
266 if (cpus_empty(tmp))
267 tmp = TARGET_CPUS;
269 cpus_and(cpumask, tmp, CPU_MASK_ALL);
271 apicid_value = cpu_mask_to_apicid(cpumask);
272 /* Prepare to do the io_apic_write */
273 apicid_value = apicid_value << 24;
274 spin_lock_irqsave(&ioapic_lock, flags);
275 for (;;) {
276 pin = entry->pin;
277 if (pin == -1)
278 break;
279 io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
280 if (!entry->next)
281 break;
282 entry = irq_2_pin + entry->next;
283 }
284 set_irq_info(irq, cpumask);
285 spin_unlock_irqrestore(&ioapic_lock, flags);
286 }
287 #endif /* CONFIG_SMP */
289 /*
290 * Find the IRQ entry number of a certain pin.
291 */
292 static int find_irq_entry(int apic, int pin, int type)
293 {
294 int i;
296 for (i = 0; i < mp_irq_entries; i++)
297 if (mp_irqs[i].mpc_irqtype == type &&
298 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
299 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
300 mp_irqs[i].mpc_dstirq == pin)
301 return i;
303 return -1;
304 }
306 /*
307 * Find the pin to which IRQ[irq] (ISA) is connected
308 */
309 static int __init find_isa_irq_pin(int irq, int type)
310 {
311 int i;
313 for (i = 0; i < mp_irq_entries; i++) {
314 int lbus = mp_irqs[i].mpc_srcbus;
316 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
317 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
318 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
319 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
320 ) &&
321 (mp_irqs[i].mpc_irqtype == type) &&
322 (mp_irqs[i].mpc_srcbusirq == irq))
324 return mp_irqs[i].mpc_dstirq;
325 }
326 return -1;
327 }
329 static int __init find_isa_irq_apic(int irq, int type)
330 {
331 int i;
333 for (i = 0; i < mp_irq_entries; i++) {
334 int lbus = mp_irqs[i].mpc_srcbus;
336 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
337 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
338 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
339 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
340 ) &&
341 (mp_irqs[i].mpc_irqtype == type) &&
342 (mp_irqs[i].mpc_srcbusirq == irq))
343 break;
344 }
345 if (i < mp_irq_entries) {
346 int apic;
347 for(apic = 0; apic < nr_ioapics; apic++) {
348 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
349 return apic;
350 }
351 }
353 return -1;
354 }
356 /*
357 * Find a specific PCI IRQ entry.
358 * Not an __init, possibly needed by modules
359 */
360 static int pin_2_irq(int idx, int apic, int pin);
362 /*
363 * This function currently is only a helper for the i386 smp boot process where
364 * we need to reprogram the ioredtbls to cater for the cpus which have come online
365 * so mask in all cases should simply be TARGET_CPUS
366 */
367 #ifdef CONFIG_SMP
368 void __init setup_ioapic_dest(void)
369 {
370 int pin, ioapic, irq, irq_entry;
372 if (skip_ioapic_setup == 1)
373 return;
375 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
376 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
377 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
378 if (irq_entry == -1)
379 continue;
380 irq = pin_2_irq(irq_entry, ioapic, pin);
381 set_ioapic_affinity_irq(irq, TARGET_CPUS);
382 }
384 }
385 }
386 #endif
388 /*
389 * EISA Edge/Level control register, ELCR
390 */
391 static int EISA_ELCR(unsigned int irq)
392 {
393 if (irq < 16) {
394 unsigned int port = 0x4d0 + (irq >> 3);
395 return (inb(port) >> (irq & 7)) & 1;
396 }
397 apic_printk(APIC_VERBOSE, KERN_INFO
398 "Broken MPtable reports ISA irq %d\n", irq);
399 return 0;
400 }
402 /* EISA interrupts are always polarity zero and can be edge or level
403 * trigger depending on the ELCR value. If an interrupt is listed as
404 * EISA conforming in the MP table, that means its trigger type must
405 * be read in from the ELCR */
407 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
408 #define default_EISA_polarity(idx) (0)
410 /* ISA interrupts are always polarity zero edge triggered,
411 * when listed as conforming in the MP table. */
413 #define default_ISA_trigger(idx) (0)
414 #define default_ISA_polarity(idx) (0)
416 /* PCI interrupts are always polarity one level triggered,
417 * when listed as conforming in the MP table. */
419 #define default_PCI_trigger(idx) (1)
420 #define default_PCI_polarity(idx) (1)
422 /* MCA interrupts are always polarity zero level triggered,
423 * when listed as conforming in the MP table. */
425 #define default_MCA_trigger(idx) (1)
426 #define default_MCA_polarity(idx) (0)
428 /* NEC98 interrupts are always polarity zero edge triggered,
429 * when listed as conforming in the MP table. */
431 #define default_NEC98_trigger(idx) (0)
432 #define default_NEC98_polarity(idx) (0)
434 static int __init MPBIOS_polarity(int idx)
435 {
436 int bus = mp_irqs[idx].mpc_srcbus;
437 int polarity;
439 /*
440 * Determine IRQ line polarity (high active or low active):
441 */
442 switch (mp_irqs[idx].mpc_irqflag & 3)
443 {
444 case 0: /* conforms, ie. bus-type dependent polarity */
445 {
446 switch (mp_bus_id_to_type[bus])
447 {
448 case MP_BUS_ISA: /* ISA pin */
449 {
450 polarity = default_ISA_polarity(idx);
451 break;
452 }
453 case MP_BUS_EISA: /* EISA pin */
454 {
455 polarity = default_EISA_polarity(idx);
456 break;
457 }
458 case MP_BUS_PCI: /* PCI pin */
459 {
460 polarity = default_PCI_polarity(idx);
461 break;
462 }
463 case MP_BUS_MCA: /* MCA pin */
464 {
465 polarity = default_MCA_polarity(idx);
466 break;
467 }
468 case MP_BUS_NEC98: /* NEC 98 pin */
469 {
470 polarity = default_NEC98_polarity(idx);
471 break;
472 }
473 default:
474 {
475 printk(KERN_WARNING "broken BIOS!!\n");
476 polarity = 1;
477 break;
478 }
479 }
480 break;
481 }
482 case 1: /* high active */
483 {
484 polarity = 0;
485 break;
486 }
487 case 2: /* reserved */
488 {
489 printk(KERN_WARNING "broken BIOS!!\n");
490 polarity = 1;
491 break;
492 }
493 case 3: /* low active */
494 {
495 polarity = 1;
496 break;
497 }
498 default: /* invalid */
499 {
500 printk(KERN_WARNING "broken BIOS!!\n");
501 polarity = 1;
502 break;
503 }
504 }
505 return polarity;
506 }
508 static int MPBIOS_trigger(int idx)
509 {
510 int bus = mp_irqs[idx].mpc_srcbus;
511 int trigger;
513 /*
514 * Determine IRQ trigger mode (edge or level sensitive):
515 */
516 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
517 {
518 case 0: /* conforms, ie. bus-type dependent */
519 {
520 switch (mp_bus_id_to_type[bus])
521 {
522 case MP_BUS_ISA: /* ISA pin */
523 {
524 trigger = default_ISA_trigger(idx);
525 break;
526 }
527 case MP_BUS_EISA: /* EISA pin */
528 {
529 trigger = default_EISA_trigger(idx);
530 break;
531 }
532 case MP_BUS_PCI: /* PCI pin */
533 {
534 trigger = default_PCI_trigger(idx);
535 break;
536 }
537 case MP_BUS_MCA: /* MCA pin */
538 {
539 trigger = default_MCA_trigger(idx);
540 break;
541 }
542 case MP_BUS_NEC98: /* NEC 98 pin */
543 {
544 trigger = default_NEC98_trigger(idx);
545 break;
546 }
547 default:
548 {
549 printk(KERN_WARNING "broken BIOS!!\n");
550 trigger = 1;
551 break;
552 }
553 }
554 break;
555 }
556 case 1: /* edge */
557 {
558 trigger = 0;
559 break;
560 }
561 case 2: /* reserved */
562 {
563 printk(KERN_WARNING "broken BIOS!!\n");
564 trigger = 1;
565 break;
566 }
567 case 3: /* level */
568 {
569 trigger = 1;
570 break;
571 }
572 default: /* invalid */
573 {
574 printk(KERN_WARNING "broken BIOS!!\n");
575 trigger = 0;
576 break;
577 }
578 }
579 return trigger;
580 }
582 static inline int irq_polarity(int idx)
583 {
584 return MPBIOS_polarity(idx);
585 }
587 static inline int irq_trigger(int idx)
588 {
589 return MPBIOS_trigger(idx);
590 }
592 static int pin_2_irq(int idx, int apic, int pin)
593 {
594 int irq, i;
595 int bus = mp_irqs[idx].mpc_srcbus;
597 /*
598 * Debugging check, we are in big trouble if this message pops up!
599 */
600 if (mp_irqs[idx].mpc_dstirq != pin)
601 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
603 switch (mp_bus_id_to_type[bus])
604 {
605 case MP_BUS_ISA: /* ISA pin */
606 case MP_BUS_EISA:
607 case MP_BUS_MCA:
608 case MP_BUS_NEC98:
609 {
610 irq = mp_irqs[idx].mpc_srcbusirq;
611 break;
612 }
613 case MP_BUS_PCI: /* PCI pin */
614 {
615 /*
616 * PCI IRQs are mapped in order
617 */
618 i = irq = 0;
619 while (i < apic)
620 irq += nr_ioapic_registers[i++];
621 irq += pin;
623 /*
624 * For MPS mode, so far only needed by ES7000 platform
625 */
626 if (ioapic_renumber_irq)
627 irq = ioapic_renumber_irq(apic, irq);
629 break;
630 }
631 default:
632 {
633 printk(KERN_ERR "unknown bus type %d.\n",bus);
634 irq = 0;
635 break;
636 }
637 }
639 return irq;
640 }
642 static inline int IO_APIC_irq_trigger(int irq)
643 {
644 int apic, idx, pin;
646 for (apic = 0; apic < nr_ioapics; apic++) {
647 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
648 idx = find_irq_entry(apic,pin,mp_INT);
649 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
650 return irq_trigger(idx);
651 }
652 }
653 /*
654 * nonexistent IRQs are edge default
655 */
656 return 0;
657 }
659 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
660 u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
662 int assign_irq_vector(int irq)
663 {
664 static int current_vector = FIRST_DYNAMIC_VECTOR, offset = 0;
666 BUG_ON(irq >= NR_IRQ_VECTORS);
667 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
668 return IO_APIC_VECTOR(irq);
669 next:
670 current_vector += 8;
672 /* Skip the hypercall vector. */
673 if (current_vector == HYPERCALL_VECTOR)
674 goto next;
676 /* Skip the Linux/BSD fast-trap vector. */
677 if (current_vector == 0x80)
678 goto next;
680 if (current_vector > LAST_DYNAMIC_VECTOR) {
681 offset++;
682 if (!(offset%8))
683 return -ENOSPC;
684 current_vector = FIRST_DYNAMIC_VECTOR + offset;
685 }
687 vector_irq[current_vector] = irq;
688 if (irq != AUTO_ASSIGN)
689 IO_APIC_VECTOR(irq) = current_vector;
691 return current_vector;
692 }
694 static struct hw_interrupt_type ioapic_level_type;
695 static struct hw_interrupt_type ioapic_edge_type;
697 #define IOAPIC_AUTO -1
698 #define IOAPIC_EDGE 0
699 #define IOAPIC_LEVEL 1
701 static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
702 {
703 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
704 trigger == IOAPIC_LEVEL)
705 irq_desc[vector].handler = &ioapic_level_type;
706 else
707 irq_desc[vector].handler = &ioapic_edge_type;
708 }
710 static void __init setup_IO_APIC_irqs(void)
711 {
712 struct IO_APIC_route_entry entry;
713 int apic, pin, idx, irq, first_notcon = 1, vector;
714 unsigned long flags;
716 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
718 for (apic = 0; apic < nr_ioapics; apic++) {
719 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
721 /*
722 * add it to the IO-APIC irq-routing table:
723 */
724 memset(&entry,0,sizeof(entry));
726 entry.delivery_mode = INT_DELIVERY_MODE;
727 entry.dest_mode = INT_DEST_MODE;
728 entry.mask = 0; /* enable IRQ */
729 entry.dest.logical.logical_dest =
730 cpu_mask_to_apicid(TARGET_CPUS);
732 idx = find_irq_entry(apic,pin,mp_INT);
733 if (idx == -1) {
734 if (first_notcon) {
735 apic_printk(APIC_VERBOSE, KERN_DEBUG
736 " IO-APIC (apicid-pin) %d-%d",
737 mp_ioapics[apic].mpc_apicid,
738 pin);
739 first_notcon = 0;
740 } else
741 apic_printk(APIC_VERBOSE, ", %d-%d",
742 mp_ioapics[apic].mpc_apicid, pin);
743 continue;
744 }
746 entry.trigger = irq_trigger(idx);
747 entry.polarity = irq_polarity(idx);
749 if (irq_trigger(idx)) {
750 entry.trigger = 1;
751 entry.mask = 1;
752 }
754 irq = pin_2_irq(idx, apic, pin);
755 /*
756 * skip adding the timer int on secondary nodes, which causes
757 * a small but painful rift in the time-space continuum
758 */
759 if (multi_timer_check(apic, irq))
760 continue;
761 else
762 add_pin_to_irq(irq, apic, pin);
764 if (!apic && !IO_APIC_IRQ(irq))
765 continue;
767 if (IO_APIC_IRQ(irq)) {
768 vector = assign_irq_vector(irq);
769 entry.vector = vector;
770 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
772 if (!apic && (irq < 16))
773 disable_8259A_irq(irq);
774 }
775 spin_lock_irqsave(&ioapic_lock, flags);
776 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
777 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
778 set_native_irq_info(entry.vector, TARGET_CPUS);
779 spin_unlock_irqrestore(&ioapic_lock, flags);
780 }
781 }
783 if (!first_notcon)
784 apic_printk(APIC_VERBOSE, " not connected.\n");
785 }
787 /*
788 * Set up the 8259A-master output pin:
789 */
790 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
791 {
792 struct IO_APIC_route_entry entry;
793 unsigned long flags;
795 memset(&entry,0,sizeof(entry));
797 disable_8259A_irq(0);
799 /* mask LVT0 */
800 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
802 /*
803 * We use logical delivery to get the timer IRQ
804 * to the first CPU.
805 */
806 entry.dest_mode = INT_DEST_MODE;
807 entry.mask = 0; /* unmask IRQ now */
808 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
809 entry.delivery_mode = INT_DELIVERY_MODE;
810 entry.polarity = 0;
811 entry.trigger = 0;
812 entry.vector = vector;
814 /*
815 * The timer IRQ doesn't have to know that behind the
816 * scene we have a 8259A-master in AEOI mode ...
817 */
818 irq_desc[IO_APIC_VECTOR(0)].handler = &ioapic_edge_type;
820 /*
821 * Add it to the IO-APIC irq-routing table:
822 */
823 spin_lock_irqsave(&ioapic_lock, flags);
824 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
825 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
826 spin_unlock_irqrestore(&ioapic_lock, flags);
828 enable_8259A_irq(0);
829 }
831 static inline void UNEXPECTED_IO_APIC(void)
832 {
833 }
835 void __init __print_IO_APIC(void)
836 {
837 int apic, i;
838 union IO_APIC_reg_00 reg_00;
839 union IO_APIC_reg_01 reg_01;
840 union IO_APIC_reg_02 reg_02;
841 union IO_APIC_reg_03 reg_03;
842 unsigned long flags;
844 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
845 for (i = 0; i < nr_ioapics; i++)
846 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
847 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
849 /*
850 * We are a bit conservative about what we expect. We have to
851 * know about every hardware change ASAP.
852 */
853 printk(KERN_INFO "testing the IO APIC.......................\n");
855 for (apic = 0; apic < nr_ioapics; apic++) {
857 spin_lock_irqsave(&ioapic_lock, flags);
858 reg_00.raw = io_apic_read(apic, 0);
859 reg_01.raw = io_apic_read(apic, 1);
860 if (reg_01.bits.version >= 0x10)
861 reg_02.raw = io_apic_read(apic, 2);
862 if (reg_01.bits.version >= 0x20)
863 reg_03.raw = io_apic_read(apic, 3);
864 spin_unlock_irqrestore(&ioapic_lock, flags);
866 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
867 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
868 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
869 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
870 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
871 if (reg_00.bits.ID >= get_physical_broadcast())
872 UNEXPECTED_IO_APIC();
873 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
874 UNEXPECTED_IO_APIC();
876 printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
877 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
878 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
879 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
880 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
881 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
882 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
883 (reg_01.bits.entries != 0x2E) &&
884 (reg_01.bits.entries != 0x3F)
885 )
886 UNEXPECTED_IO_APIC();
888 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
889 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
890 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
891 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
892 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
893 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
894 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
895 )
896 UNEXPECTED_IO_APIC();
897 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
898 UNEXPECTED_IO_APIC();
900 /*
901 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
902 * but the value of reg_02 is read as the previous read register
903 * value, so ignore it if reg_02 == reg_01.
904 */
905 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
906 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
907 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
908 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
909 UNEXPECTED_IO_APIC();
910 }
912 /*
913 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
914 * or reg_03, but the value of reg_0[23] is read as the previous read
915 * register value, so ignore it if reg_03 == reg_0[12].
916 */
917 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
918 reg_03.raw != reg_01.raw) {
919 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
920 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
921 if (reg_03.bits.__reserved_1)
922 UNEXPECTED_IO_APIC();
923 }
925 printk(KERN_DEBUG ".... IRQ redirection table:\n");
927 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
928 " Stat Dest Deli Vect: \n");
930 for (i = 0; i <= reg_01.bits.entries; i++) {
931 struct IO_APIC_route_entry entry;
933 spin_lock_irqsave(&ioapic_lock, flags);
934 *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
935 *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
936 spin_unlock_irqrestore(&ioapic_lock, flags);
938 printk(KERN_DEBUG " %02x %03X %02X ",
939 i,
940 entry.dest.logical.logical_dest,
941 entry.dest.physical.physical_dest
942 );
944 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
945 entry.mask,
946 entry.trigger,
947 entry.irr,
948 entry.polarity,
949 entry.delivery_status,
950 entry.dest_mode,
951 entry.delivery_mode,
952 entry.vector
953 );
954 }
955 }
956 printk(KERN_INFO "Using vector-based indexing\n");
957 printk(KERN_DEBUG "IRQ to pin mappings:\n");
958 for (i = 0; i < NR_IRQS; i++) {
959 struct irq_pin_list *entry = irq_2_pin + i;
960 if (entry->pin < 0)
961 continue;
962 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
963 for (;;) {
964 printk("-> %d:%d", entry->apic, entry->pin);
965 if (!entry->next)
966 break;
967 entry = irq_2_pin + entry->next;
968 }
969 printk("\n");
970 }
972 printk(KERN_INFO ".................................... done.\n");
974 return;
975 }
977 void print_IO_APIC(void)
978 {
979 if (apic_verbosity != APIC_QUIET)
980 __print_IO_APIC();
981 }
983 void print_IO_APIC_keyhandler(unsigned char key)
984 {
985 __print_IO_APIC();
986 }
988 static void __init enable_IO_APIC(void)
989 {
990 union IO_APIC_reg_01 reg_01;
991 int i8259_apic, i8259_pin;
992 int i, apic;
993 unsigned long flags;
995 for (i = 0; i < PIN_MAP_SIZE; i++) {
996 irq_2_pin[i].pin = -1;
997 irq_2_pin[i].next = 0;
998 }
1000 /* Initialise dynamic irq_2_pin free list. */
1001 for (i = NR_IRQS; i < PIN_MAP_SIZE; i++)
1002 irq_2_pin[i].next = i + 1;
1004 /*
1005 * The number of IO-APIC IRQ registers (== #pins):
1006 */
1007 for (apic = 0; apic < nr_ioapics; apic++) {
1008 spin_lock_irqsave(&ioapic_lock, flags);
1009 reg_01.raw = io_apic_read(apic, 1);
1010 spin_unlock_irqrestore(&ioapic_lock, flags);
1011 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1013 for(apic = 0; apic < nr_ioapics; apic++) {
1014 int pin;
1015 /* See if any of the pins is in ExtINT mode */
1016 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1017 struct IO_APIC_route_entry entry;
1018 spin_lock_irqsave(&ioapic_lock, flags);
1019 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1020 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1021 spin_unlock_irqrestore(&ioapic_lock, flags);
1024 /* If the interrupt line is enabled and in ExtInt mode
1025 * I have found the pin where the i8259 is connected.
1026 */
1027 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1028 ioapic_i8259.apic = apic;
1029 ioapic_i8259.pin = pin;
1030 goto found_i8259;
1034 found_i8259:
1035 /* Look to see what if the MP table has reported the ExtINT */
1036 /* If we could not find the appropriate pin by looking at the ioapic
1037 * the i8259 probably is not connected the ioapic but give the
1038 * mptable a chance anyway.
1039 */
1040 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1041 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1042 /* Trust the MP table if nothing is setup in the hardware */
1043 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1044 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1045 ioapic_i8259.pin = i8259_pin;
1046 ioapic_i8259.apic = i8259_apic;
1048 /* Complain if the MP table and the hardware disagree */
1049 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1050 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1052 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1055 /*
1056 * Do not trust the IO-APIC being empty at bootup
1057 */
1058 clear_IO_APIC();
1061 /*
1062 * Not an __init, needed by the reboot code
1063 */
1064 void disable_IO_APIC(void)
1066 /*
1067 * Clear the IO-APIC before rebooting:
1068 */
1069 clear_IO_APIC();
1071 /*
1072 * If the i8259 is routed through an IOAPIC
1073 * Put that IOAPIC in virtual wire mode
1074 * so legacy interrupts can be delivered.
1075 */
1076 if (ioapic_i8259.pin != -1) {
1077 struct IO_APIC_route_entry entry;
1078 unsigned long flags;
1080 memset(&entry, 0, sizeof(entry));
1081 entry.mask = 0; /* Enabled */
1082 entry.trigger = 0; /* Edge */
1083 entry.irr = 0;
1084 entry.polarity = 0; /* High */
1085 entry.delivery_status = 0;
1086 entry.dest_mode = 0; /* Physical */
1087 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1088 entry.vector = 0;
1089 entry.dest.physical.physical_dest =
1090 GET_APIC_ID(apic_read(APIC_ID));
1092 /*
1093 * Add it to the IO-APIC irq-routing table:
1094 */
1095 spin_lock_irqsave(&ioapic_lock, flags);
1096 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1097 *(((int *)&entry)+1));
1098 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1099 *(((int *)&entry)+0));
1100 spin_unlock_irqrestore(&ioapic_lock, flags);
1102 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1105 /*
1106 * function to set the IO-APIC physical IDs based on the
1107 * values stored in the MPC table.
1109 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1110 */
1112 #ifndef CONFIG_X86_NUMAQ
1113 static void __init setup_ioapic_ids_from_mpc(void)
1115 union IO_APIC_reg_00 reg_00;
1116 physid_mask_t phys_id_present_map;
1117 int apic;
1118 int i;
1119 unsigned char old_id;
1120 unsigned long flags;
1122 /*
1123 * Don't check I/O APIC IDs for xAPIC systems. They have
1124 * no meaning without the serial APIC bus.
1125 */
1126 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15))
1127 return;
1129 /*
1130 * This is broken; anything with a real cpu count has to
1131 * circumvent this idiocy regardless.
1132 */
1133 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
1135 /*
1136 * Set the IOAPIC ID to the value stored in the MPC table.
1137 */
1138 for (apic = 0; apic < nr_ioapics; apic++) {
1140 /* Read the register 0 value */
1141 spin_lock_irqsave(&ioapic_lock, flags);
1142 reg_00.raw = io_apic_read(apic, 0);
1143 spin_unlock_irqrestore(&ioapic_lock, flags);
1145 old_id = mp_ioapics[apic].mpc_apicid;
1147 if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
1148 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1149 apic, mp_ioapics[apic].mpc_apicid);
1150 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1151 reg_00.bits.ID);
1152 mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
1155 /*
1156 * Sanity check, is the ID really free? Every APIC in a
1157 * system must have a unique ID or we get lots of nice
1158 * 'stuck on smp_invalidate_needed IPI wait' messages.
1159 */
1160 if (check_apicid_used(phys_id_present_map,
1161 mp_ioapics[apic].mpc_apicid)) {
1162 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1163 apic, mp_ioapics[apic].mpc_apicid);
1164 for (i = 0; i < get_physical_broadcast(); i++)
1165 if (!physid_isset(i, phys_id_present_map))
1166 break;
1167 if (i >= get_physical_broadcast())
1168 panic("Max APIC ID exceeded!\n");
1169 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1170 i);
1171 physid_set(i, phys_id_present_map);
1172 mp_ioapics[apic].mpc_apicid = i;
1173 } else {
1174 physid_mask_t tmp;
1175 tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
1176 apic_printk(APIC_VERBOSE, "Setting %d in the "
1177 "phys_id_present_map\n",
1178 mp_ioapics[apic].mpc_apicid);
1179 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1183 /*
1184 * We need to adjust the IRQ routing table
1185 * if the ID changed.
1186 */
1187 if (old_id != mp_ioapics[apic].mpc_apicid)
1188 for (i = 0; i < mp_irq_entries; i++)
1189 if (mp_irqs[i].mpc_dstapic == old_id)
1190 mp_irqs[i].mpc_dstapic
1191 = mp_ioapics[apic].mpc_apicid;
1193 /*
1194 * Read the right value from the MPC table and
1195 * write it into the ID register.
1196 */
1197 apic_printk(APIC_VERBOSE, KERN_INFO
1198 "...changing IO-APIC physical APIC ID to %d ...",
1199 mp_ioapics[apic].mpc_apicid);
1201 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1202 spin_lock_irqsave(&ioapic_lock, flags);
1203 io_apic_write(apic, 0, reg_00.raw);
1204 spin_unlock_irqrestore(&ioapic_lock, flags);
1206 /*
1207 * Sanity check
1208 */
1209 spin_lock_irqsave(&ioapic_lock, flags);
1210 reg_00.raw = io_apic_read(apic, 0);
1211 spin_unlock_irqrestore(&ioapic_lock, flags);
1212 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
1213 printk("could not set ID!\n");
1214 else
1215 apic_printk(APIC_VERBOSE, " ok.\n");
1218 #else
1219 static void __init setup_ioapic_ids_from_mpc(void) { }
1220 #endif
1222 /*
1223 * There is a nasty bug in some older SMP boards, their mptable lies
1224 * about the timer IRQ. We do the following to work around the situation:
1226 * - timer IRQ defaults to IO-APIC IRQ
1227 * - if this function detects that timer IRQs are defunct, then we fall
1228 * back to ISA timer IRQs
1229 */
1230 static int __init timer_irq_works(void)
1232 unsigned long t1 = jiffies;
1234 local_irq_enable();
1235 /* Let ten ticks pass... */
1236 mdelay((10 * 1000) / HZ);
1238 /*
1239 * Expect a few ticks at least, to be sure some possible
1240 * glue logic does not lock up after one or two first
1241 * ticks in a non-ExtINT mode. Also the local APIC
1242 * might have cached one ExtINT interrupt. Finally, at
1243 * least one tick may be lost due to delays.
1244 */
1245 if (jiffies - t1 > 4)
1246 return 1;
1248 return 0;
1251 /*
1252 * In the SMP+IOAPIC case it might happen that there are an unspecified
1253 * number of pending IRQ events unhandled. These cases are very rare,
1254 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1255 * better to do it this way as thus we do not have to be aware of
1256 * 'pending' interrupts in the IRQ path, except at this point.
1257 */
1258 /*
1259 * Edge triggered needs to resend any interrupt
1260 * that was delayed but this is now handled in the device
1261 * independent code.
1262 */
1264 /*
1265 * Starting up a edge-triggered IO-APIC interrupt is
1266 * nasty - we need to make sure that we get the edge.
1267 * If it is already asserted for some reason, we need
1268 * return 1 to indicate that is was pending.
1270 * This is not complete - we should be able to fake
1271 * an edge even if it isn't on the 8259A...
1272 */
1273 static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1275 int was_pending = 0;
1276 unsigned long flags;
1278 spin_lock_irqsave(&ioapic_lock, flags);
1279 if (irq < 16) {
1280 disable_8259A_irq(irq);
1281 if (i8259A_irq_pending(irq))
1282 was_pending = 1;
1284 __unmask_IO_APIC_irq(irq);
1285 spin_unlock_irqrestore(&ioapic_lock, flags);
1287 return was_pending;
1290 /*
1291 * Once we have recorded IRQ_PENDING already, we can mask the
1292 * interrupt for real. This prevents IRQ storms from unhandled
1293 * devices.
1294 */
1295 static void ack_edge_ioapic_irq(unsigned int irq)
1297 if ((irq_desc[IO_APIC_VECTOR(irq)].status & (IRQ_PENDING | IRQ_DISABLED))
1298 == (IRQ_PENDING | IRQ_DISABLED))
1299 mask_IO_APIC_irq(irq);
1300 ack_APIC_irq();
1303 /*
1304 * Level triggered interrupts can just be masked,
1305 * and shutting down and starting up the interrupt
1306 * is the same as enabling and disabling them -- except
1307 * with a startup need to return a "was pending" value.
1309 * Level triggered interrupts are special because we
1310 * do not touch any IO-APIC register while handling
1311 * them. We ack the APIC in the end-IRQ handler, not
1312 * in the start-IRQ-handler. Protection against reentrance
1313 * from the same interrupt is still provided, both by the
1314 * generic IRQ layer and by the fact that an unacked local
1315 * APIC does not accept IRQs.
1316 */
1317 static unsigned int startup_level_ioapic_irq (unsigned int irq)
1319 unmask_IO_APIC_irq(irq);
1321 return 0; /* don't check for pending */
1324 int ioapic_ack_new = 1;
1325 static void setup_ioapic_ack(char *s)
1327 if ( !strcmp(s, "old") )
1328 ioapic_ack_new = 0;
1329 else if ( !strcmp(s, "new") )
1330 ioapic_ack_new = 1;
1331 else
1332 printk("Unknown ioapic_ack value specified: '%s'\n", s);
1334 custom_param("ioapic_ack", setup_ioapic_ack);
1336 static void mask_and_ack_level_ioapic_irq (unsigned int irq)
1338 unsigned long v;
1339 int i;
1341 if ( ioapic_ack_new )
1342 return;
1344 mask_IO_APIC_irq(irq);
1345 /*
1346 * It appears there is an erratum which affects at least version 0x11
1347 * of I/O APIC (that's the 82093AA and cores integrated into various
1348 * chipsets). Under certain conditions a level-triggered interrupt is
1349 * erroneously delivered as edge-triggered one but the respective IRR
1350 * bit gets set nevertheless. As a result the I/O unit expects an EOI
1351 * message but it will never arrive and further interrupts are blocked
1352 * from the source. The exact reason is so far unknown, but the
1353 * phenomenon was observed when two consecutive interrupt requests
1354 * from a given source get delivered to the same CPU and the source is
1355 * temporarily disabled in between.
1357 * A workaround is to simulate an EOI message manually. We achieve it
1358 * by setting the trigger mode to edge and then to level when the edge
1359 * trigger mode gets detected in the TMR of a local APIC for a
1360 * level-triggered interrupt. We mask the source for the time of the
1361 * operation to prevent an edge-triggered interrupt escaping meanwhile.
1362 * The idea is from Manfred Spraul. --macro
1363 */
1364 i = IO_APIC_VECTOR(irq);
1366 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1368 ack_APIC_irq();
1370 if (!(v & (1 << (i & 0x1f)))) {
1371 atomic_inc(&irq_mis_count);
1372 spin_lock(&ioapic_lock);
1373 __edge_IO_APIC_irq(irq);
1374 __level_IO_APIC_irq(irq);
1375 spin_unlock(&ioapic_lock);
1379 static void end_level_ioapic_irq (unsigned int irq)
1381 unsigned long v;
1382 int i;
1384 if ( !ioapic_ack_new )
1386 if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) )
1387 unmask_IO_APIC_irq(irq);
1388 return;
1391 /*
1392 * It appears there is an erratum which affects at least version 0x11
1393 * of I/O APIC (that's the 82093AA and cores integrated into various
1394 * chipsets). Under certain conditions a level-triggered interrupt is
1395 * erroneously delivered as edge-triggered one but the respective IRR
1396 * bit gets set nevertheless. As a result the I/O unit expects an EOI
1397 * message but it will never arrive and further interrupts are blocked
1398 * from the source. The exact reason is so far unknown, but the
1399 * phenomenon was observed when two consecutive interrupt requests
1400 * from a given source get delivered to the same CPU and the source is
1401 * temporarily disabled in between.
1403 * A workaround is to simulate an EOI message manually. We achieve it
1404 * by setting the trigger mode to edge and then to level when the edge
1405 * trigger mode gets detected in the TMR of a local APIC for a
1406 * level-triggered interrupt. We mask the source for the time of the
1407 * operation to prevent an edge-triggered interrupt escaping meanwhile.
1408 * The idea is from Manfred Spraul. --macro
1409 */
1410 i = IO_APIC_VECTOR(irq);
1412 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1414 ack_APIC_irq();
1416 if (!(v & (1 << (i & 0x1f)))) {
1417 atomic_inc(&irq_mis_count);
1418 spin_lock(&ioapic_lock);
1419 __mask_IO_APIC_irq(irq);
1420 __edge_IO_APIC_irq(irq);
1421 __level_IO_APIC_irq(irq);
1422 if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) )
1423 __unmask_IO_APIC_irq(irq);
1424 spin_unlock(&ioapic_lock);
1428 static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1430 int irq = vector_to_irq(vector);
1431 return startup_edge_ioapic_irq(irq);
1434 static void ack_edge_ioapic_vector(unsigned int vector)
1436 int irq = vector_to_irq(vector);
1437 ack_edge_ioapic_irq(irq);
1440 static unsigned int startup_level_ioapic_vector(unsigned int vector)
1442 int irq = vector_to_irq(vector);
1443 return startup_level_ioapic_irq (irq);
1446 static void mask_and_ack_level_ioapic_vector(unsigned int vector)
1448 int irq = vector_to_irq(vector);
1449 mask_and_ack_level_ioapic_irq(irq);
1452 static void end_level_ioapic_vector(unsigned int vector)
1454 int irq = vector_to_irq(vector);
1455 end_level_ioapic_irq(irq);
1458 static void mask_IO_APIC_vector(unsigned int vector)
1460 int irq = vector_to_irq(vector);
1461 mask_IO_APIC_irq(irq);
1464 static void unmask_IO_APIC_vector(unsigned int vector)
1466 int irq = vector_to_irq(vector);
1467 unmask_IO_APIC_irq(irq);
1470 static void set_ioapic_affinity_vector(
1471 unsigned int vector, cpumask_t cpu_mask)
1473 int irq = vector_to_irq(vector);
1475 set_native_irq_info(vector, cpu_mask);
1476 set_ioapic_affinity_irq(irq, cpu_mask);
1479 static void disable_edge_ioapic_vector(unsigned int vector)
1483 static void end_edge_ioapic_vector(unsigned int vector)
1487 /*
1488 * Level and edge triggered IO-APIC interrupts need different handling,
1489 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1490 * handled with the level-triggered descriptor, but that one has slightly
1491 * more overhead. Level-triggered interrupts cannot be handled with the
1492 * edge-triggered handler, without risking IRQ storms and other ugly
1493 * races.
1494 */
1495 static struct hw_interrupt_type ioapic_edge_type = {
1496 .typename = "IO-APIC-edge",
1497 .startup = startup_edge_ioapic_vector,
1498 .shutdown = disable_edge_ioapic_vector,
1499 .enable = unmask_IO_APIC_vector,
1500 .disable = disable_edge_ioapic_vector,
1501 .ack = ack_edge_ioapic_vector,
1502 .end = end_edge_ioapic_vector,
1503 .set_affinity = set_ioapic_affinity_vector,
1504 };
1506 static struct hw_interrupt_type ioapic_level_type = {
1507 .typename = "IO-APIC-level",
1508 .startup = startup_level_ioapic_vector,
1509 .shutdown = mask_IO_APIC_vector,
1510 .enable = unmask_IO_APIC_vector,
1511 .disable = mask_IO_APIC_vector,
1512 .ack = mask_and_ack_level_ioapic_vector,
1513 .end = end_level_ioapic_vector,
1514 .set_affinity = set_ioapic_affinity_vector,
1515 };
1517 static inline void init_IO_APIC_traps(void)
1519 int irq;
1520 /* Xen: This is way simpler than the Linux implementation. */
1521 for (irq = 0; irq < 16 ; irq++)
1522 if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq))
1523 make_8259A_irq(irq);
1526 static void enable_lapic_vector(unsigned int vector)
1528 unsigned long v;
1530 v = apic_read(APIC_LVT0);
1531 apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
1534 static void disable_lapic_vector(unsigned int vector)
1536 unsigned long v;
1538 v = apic_read(APIC_LVT0);
1539 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
1542 static void ack_lapic_vector(unsigned int vector)
1544 ack_APIC_irq();
1547 static void end_lapic_vector(unsigned int vector) { /* nothing */ }
1549 static struct hw_interrupt_type lapic_irq_type = {
1550 .typename = "local-APIC-edge",
1551 .startup = NULL, /* startup_irq() not used for IRQ0 */
1552 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1553 .enable = enable_lapic_vector,
1554 .disable = disable_lapic_vector,
1555 .ack = ack_lapic_vector,
1556 .end = end_lapic_vector
1557 };
1559 /*
1560 * This looks a bit hackish but it's about the only one way of sending
1561 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1562 * not support the ExtINT mode, unfortunately. We need to send these
1563 * cycles as some i82489DX-based boards have glue logic that keeps the
1564 * 8259A interrupt line asserted until INTA. --macro
1565 */
1566 static inline void unlock_ExtINT_logic(void)
1568 int apic, pin, i;
1569 struct IO_APIC_route_entry entry0, entry1;
1570 unsigned char save_control, save_freq_select;
1571 unsigned long flags;
1573 pin = find_isa_irq_pin(8, mp_INT);
1574 apic = find_isa_irq_apic(8, mp_INT);
1575 if (pin == -1)
1576 return;
1578 spin_lock_irqsave(&ioapic_lock, flags);
1579 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1580 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1581 spin_unlock_irqrestore(&ioapic_lock, flags);
1582 clear_IO_APIC_pin(apic, pin);
1584 memset(&entry1, 0, sizeof(entry1));
1586 entry1.dest_mode = 0; /* physical delivery */
1587 entry1.mask = 0; /* unmask IRQ now */
1588 entry1.dest.physical.physical_dest = hard_smp_processor_id();
1589 entry1.delivery_mode = dest_ExtINT;
1590 entry1.polarity = entry0.polarity;
1591 entry1.trigger = 0;
1592 entry1.vector = 0;
1594 spin_lock_irqsave(&ioapic_lock, flags);
1595 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
1596 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
1597 spin_unlock_irqrestore(&ioapic_lock, flags);
1599 save_control = CMOS_READ(RTC_CONTROL);
1600 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1601 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1602 RTC_FREQ_SELECT);
1603 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1605 i = 100;
1606 while (i-- > 0) {
1607 mdelay(10);
1608 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1609 i -= 10;
1612 CMOS_WRITE(save_control, RTC_CONTROL);
1613 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1614 clear_IO_APIC_pin(apic, pin);
1616 spin_lock_irqsave(&ioapic_lock, flags);
1617 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
1618 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
1619 spin_unlock_irqrestore(&ioapic_lock, flags);
1622 /*
1623 * This code may look a bit paranoid, but it's supposed to cooperate with
1624 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1625 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1626 * fanatically on his truly buggy board.
1627 */
1628 static inline void check_timer(void)
1630 int apic1, pin1, apic2, pin2;
1631 int vector;
1633 /*
1634 * get/set the timer IRQ vector:
1635 */
1636 disable_8259A_irq(0);
1637 vector = assign_irq_vector(0);
1639 irq_desc[IO_APIC_VECTOR(0)].action = irq_desc[LEGACY_VECTOR(0)].action;
1640 irq_desc[IO_APIC_VECTOR(0)].depth = 0;
1641 irq_desc[IO_APIC_VECTOR(0)].status &= ~IRQ_DISABLED;
1643 /*
1644 * Subtle, code in do_timer_interrupt() expects an AEOI
1645 * mode for the 8259A whenever interrupts are routed
1646 * through I/O APICs. Also IRQ0 has to be enabled in
1647 * the 8259A which implies the virtual wire has to be
1648 * disabled in the local APIC.
1649 */
1650 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1651 init_8259A(1);
1652 /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */
1653 /*timer_ack = 1;*/
1654 /*enable_8259A_irq(0);*/
1656 pin1 = find_isa_irq_pin(0, mp_INT);
1657 apic1 = find_isa_irq_apic(0, mp_INT);
1658 pin2 = ioapic_i8259.pin;
1659 apic2 = ioapic_i8259.apic;
1661 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1662 vector, apic1, pin1, apic2, pin2);
1664 if (pin1 != -1) {
1665 /*
1666 * Ok, does IRQ0 through the IOAPIC work?
1667 */
1668 unmask_IO_APIC_irq(0);
1669 if (timer_irq_works()) {
1670 if (disable_timer_pin_1 > 0)
1671 clear_IO_APIC_pin(apic1, pin1);
1672 return;
1674 clear_IO_APIC_pin(apic1, pin1);
1675 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
1676 "IO-APIC\n");
1679 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
1680 if (pin2 != -1) {
1681 printk("\n..... (found pin %d) ...", pin2);
1682 /*
1683 * legacy devices should be connected to IO APIC #0
1684 */
1685 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1686 if (timer_irq_works()) {
1687 printk("works.\n");
1688 if (pin1 != -1)
1689 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
1690 else
1691 add_pin_to_irq(0, apic2, pin2);
1692 return;
1694 /*
1695 * Cleanup, just in case ...
1696 */
1697 clear_IO_APIC_pin(apic2, pin2);
1699 printk(" failed.\n");
1701 if (nmi_watchdog == NMI_IO_APIC) {
1702 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1703 nmi_watchdog = 0;
1706 printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1708 disable_8259A_irq(0);
1709 irq_desc[vector].handler = &lapic_irq_type;
1710 apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1711 enable_8259A_irq(0);
1713 if (timer_irq_works()) {
1714 printk(" works.\n");
1715 return;
1717 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1718 printk(" failed.\n");
1720 printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1722 /*timer_ack = 0;*/
1723 init_8259A(0);
1724 make_8259A_irq(0);
1725 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
1727 unlock_ExtINT_logic();
1729 if (timer_irq_works()) {
1730 printk(" works.\n");
1731 return;
1733 printk(" failed :(.\n");
1734 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
1735 "report. Then try booting with the 'noapic' option");
1738 /*
1740 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1741 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1742 * Linux doesn't really care, as it's not actually used
1743 * for any interrupt handling anyway.
1744 */
1745 #define PIC_IRQS (1 << PIC_CASCADE_IR)
1747 void __init setup_IO_APIC(void)
1749 enable_IO_APIC();
1751 if (acpi_ioapic)
1752 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
1753 else
1754 io_apic_irqs = ~PIC_IRQS;
1756 printk("ENABLING IO-APIC IRQs\n");
1757 printk(" -> Using %s ACK method\n", ioapic_ack_new ? "new" : "old");
1759 /*
1760 * Set up IO-APIC IRQ routing.
1761 */
1762 if (!acpi_ioapic)
1763 setup_ioapic_ids_from_mpc();
1764 sync_Arb_IDs();
1765 setup_IO_APIC_irqs();
1766 init_IO_APIC_traps();
1767 check_timer();
1768 print_IO_APIC();
1770 register_keyhandler('z', print_IO_APIC_keyhandler, "print ioapic info");
1773 /* --------------------------------------------------------------------------
1774 ACPI-based IOAPIC Configuration
1775 -------------------------------------------------------------------------- */
1777 #ifdef CONFIG_ACPI_BOOT
1779 int __init io_apic_get_unique_id (int ioapic, int apic_id)
1781 union IO_APIC_reg_00 reg_00;
1782 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
1783 physid_mask_t tmp;
1784 unsigned long flags;
1785 int i = 0;
1787 /*
1788 * The P4 platform supports up to 256 APIC IDs on two separate APIC
1789 * buses (one for LAPICs, one for IOAPICs), where predecessors only
1790 * supports up to 16 on one shared APIC bus.
1792 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
1793 * advantage of new APIC bus architecture.
1794 */
1796 if (physids_empty(apic_id_map))
1797 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
1799 spin_lock_irqsave(&ioapic_lock, flags);
1800 reg_00.raw = io_apic_read(ioapic, 0);
1801 spin_unlock_irqrestore(&ioapic_lock, flags);
1803 if (apic_id >= get_physical_broadcast()) {
1804 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
1805 "%d\n", ioapic, apic_id, reg_00.bits.ID);
1806 apic_id = reg_00.bits.ID;
1809 /*
1810 * Every APIC in a system must have a unique ID or we get lots of nice
1811 * 'stuck on smp_invalidate_needed IPI wait' messages.
1812 */
1813 if (check_apicid_used(apic_id_map, apic_id)) {
1815 for (i = 0; i < get_physical_broadcast(); i++) {
1816 if (!check_apicid_used(apic_id_map, i))
1817 break;
1820 if (i == get_physical_broadcast())
1821 panic("Max apic_id exceeded!\n");
1823 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
1824 "trying %d\n", ioapic, apic_id, i);
1826 apic_id = i;
1829 tmp = apicid_to_cpu_present(apic_id);
1830 physids_or(apic_id_map, apic_id_map, tmp);
1832 if (reg_00.bits.ID != apic_id) {
1833 reg_00.bits.ID = apic_id;
1835 spin_lock_irqsave(&ioapic_lock, flags);
1836 io_apic_write(ioapic, 0, reg_00.raw);
1837 reg_00.raw = io_apic_read(ioapic, 0);
1838 spin_unlock_irqrestore(&ioapic_lock, flags);
1840 /* Sanity check */
1841 if (reg_00.bits.ID != apic_id) {
1842 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
1843 return -1;
1847 apic_printk(APIC_VERBOSE, KERN_INFO
1848 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
1850 return apic_id;
1854 int __init io_apic_get_version (int ioapic)
1856 union IO_APIC_reg_01 reg_01;
1857 unsigned long flags;
1859 spin_lock_irqsave(&ioapic_lock, flags);
1860 reg_01.raw = io_apic_read(ioapic, 1);
1861 spin_unlock_irqrestore(&ioapic_lock, flags);
1863 return reg_01.bits.version;
1867 int __init io_apic_get_redir_entries (int ioapic)
1869 union IO_APIC_reg_01 reg_01;
1870 unsigned long flags;
1872 spin_lock_irqsave(&ioapic_lock, flags);
1873 reg_01.raw = io_apic_read(ioapic, 1);
1874 spin_unlock_irqrestore(&ioapic_lock, flags);
1876 return reg_01.bits.entries;
1880 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
1882 struct IO_APIC_route_entry entry;
1883 unsigned long flags;
1885 if (!IO_APIC_IRQ(irq)) {
1886 printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
1887 ioapic);
1888 return -EINVAL;
1891 /*
1892 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
1893 * Note that we mask (disable) IRQs now -- these get enabled when the
1894 * corresponding device driver registers for this IRQ.
1895 */
1897 memset(&entry,0,sizeof(entry));
1899 entry.delivery_mode = INT_DELIVERY_MODE;
1900 entry.dest_mode = INT_DEST_MODE;
1901 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
1902 entry.trigger = edge_level;
1903 entry.polarity = active_high_low;
1904 entry.mask = 1;
1906 /*
1907 * IRQs < 16 are already in the irq_2_pin[] map
1908 */
1909 if (irq >= 16)
1910 add_pin_to_irq(irq, ioapic, pin);
1912 entry.vector = assign_irq_vector(irq);
1914 apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
1915 "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
1916 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
1917 edge_level, active_high_low);
1919 ioapic_register_intr(irq, entry.vector, edge_level);
1921 if (!ioapic && (irq < 16))
1922 disable_8259A_irq(irq);
1924 spin_lock_irqsave(&ioapic_lock, flags);
1925 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
1926 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
1927 set_native_irq_info(entry.vector, TARGET_CPUS);
1928 spin_unlock_irqrestore(&ioapic_lock, flags);
1930 return 0;
1933 #endif /*CONFIG_ACPI_BOOT*/
1935 static int ioapic_physbase_to_id(unsigned long physbase)
1937 int apic;
1938 for ( apic = 0; apic < nr_ioapics; apic++ )
1939 if ( mp_ioapics[apic].mpc_apicaddr == physbase )
1940 return apic;
1941 return -EINVAL;
1944 int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval)
1946 int apic;
1947 unsigned long flags;
1949 if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
1950 return apic;
1952 spin_lock_irqsave(&ioapic_lock, flags);
1953 *pval = io_apic_read(apic, reg);
1954 spin_unlock_irqrestore(&ioapic_lock, flags);
1956 return 0;
1959 #define WARN_BOGUS_WRITE(f, a...) \
1960 DPRINTK("\n%s: apic=%d, pin=%d, old_irq=%d, new_irq=%d\n" \
1961 "%s: old_entry=%08x, new_entry=%08x\n" \
1962 "%s: " f, __FUNCTION__, apic, pin, old_irq, new_irq, \
1963 __FUNCTION__, *(u32 *)&old_rte, *(u32 *)&new_rte, \
1964 __FUNCTION__ , ##a )
1966 int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
1968 int apic, pin, old_irq = -1, new_irq = -1;
1969 struct IO_APIC_route_entry old_rte = { 0 }, new_rte = { 0 };
1970 unsigned long flags;
1972 if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
1973 return apic;
1975 /* Only write to the first half of a route entry. */
1976 if ( (reg < 0x10) || (reg & 1) )
1977 return 0;
1979 pin = (reg - 0x10) >> 1;
1981 /* Write first half from guest; second half is target info. */
1982 *(u32 *)&new_rte = val;
1983 new_rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
1985 /*
1986 * What about weird destination types?
1987 * SMI: Ignore? Ought to be set up by the BIOS.
1988 * NMI: Ignore? Watchdog functionality is Xen's concern.
1989 * INIT: Definitely ignore: probably a guest OS bug.
1990 * ExtINT: Ignore? Linux only asserts this at start of day.
1991 * For now, print a message and return an error. We can fix up on demand.
1992 */
1993 if ( new_rte.delivery_mode > dest_LowestPrio )
1995 printk("ERROR: Attempt to write weird IOAPIC destination mode!\n");
1996 printk(" APIC=%d/%d, lo-reg=%x\n", apic, pin, val);
1997 return -EINVAL;
2000 /*
2001 * The guest does not know physical APIC arrangement (flat vs. cluster).
2002 * Apply genapic conventions for this platform.
2003 */
2004 new_rte.delivery_mode = INT_DELIVERY_MODE;
2005 new_rte.dest_mode = INT_DEST_MODE;
2007 spin_lock_irqsave(&ioapic_lock, flags);
2009 /* Read first (interesting) half of current routing entry. */
2010 *(u32 *)&old_rte = io_apic_read(apic, 0x10 + 2 * pin);
2012 /* No change to the first half of the routing entry? Bail quietly. */
2013 if ( *(u32 *)&old_rte == *(u32 *)&new_rte )
2015 spin_unlock_irqrestore(&ioapic_lock, flags);
2016 return 0;
2019 if ( old_rte.vector >= FIRST_DYNAMIC_VECTOR )
2020 old_irq = vector_irq[old_rte.vector];
2021 if ( new_rte.vector >= FIRST_DYNAMIC_VECTOR )
2022 new_irq = vector_irq[new_rte.vector];
2024 if ( (old_irq != new_irq) && (old_irq != -1) && IO_APIC_IRQ(old_irq) )
2026 if ( irq_desc[IO_APIC_VECTOR(old_irq)].action )
2028 WARN_BOGUS_WRITE("Attempt to remove IO-APIC pin of in-use IRQ!\n");
2029 spin_unlock_irqrestore(&ioapic_lock, flags);
2030 return 0;
2033 remove_pin_at_irq(old_irq, apic, pin);
2036 if ( (new_irq != -1) && IO_APIC_IRQ(new_irq) )
2038 if ( irq_desc[IO_APIC_VECTOR(new_irq)].action )
2040 WARN_BOGUS_WRITE("Attempt to %s IO-APIC pin for in-use IRQ!\n",
2041 (old_irq != new_irq) ? "add" : "modify");
2042 spin_unlock_irqrestore(&ioapic_lock, flags);
2043 return 0;
2046 /* Set the correct irq-handling type. */
2047 irq_desc[IO_APIC_VECTOR(new_irq)].handler = new_rte.trigger ?
2048 &ioapic_level_type: &ioapic_edge_type;
2050 if ( old_irq != new_irq )
2051 add_pin_to_irq(new_irq, apic, pin);
2053 /* Mask iff level triggered. */
2054 new_rte.mask = new_rte.trigger;
2056 else if ( !new_rte.mask )
2058 /* This pin leads nowhere but the guest has not masked it. */
2059 WARN_BOGUS_WRITE("Installing bogus unmasked IO-APIC entry!\n");
2060 new_rte.mask = 1;
2064 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&new_rte) + 0));
2065 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&new_rte) + 1));
2067 spin_unlock_irqrestore(&ioapic_lock, flags);
2069 return 0;
2072 void dump_ioapic_irq_info(void)
2074 struct irq_pin_list *entry;
2075 struct IO_APIC_route_entry rte;
2076 unsigned int irq, pin, printed = 0;
2077 unsigned long flags;
2079 for ( irq = 0; irq < NR_IRQS; irq++ )
2081 entry = &irq_2_pin[irq];
2082 if ( entry->pin == -1 )
2083 continue;
2085 if ( !printed++ )
2086 printk("IO-APIC interrupt information:\n");
2088 printk(" IRQ%3d Vec%3d:\n", irq, irq_to_vector(irq));
2090 for ( ; ; )
2092 pin = entry->pin;
2094 printk(" Apic 0x%02x, Pin %2d: ", entry->apic, pin);
2096 spin_lock_irqsave(&ioapic_lock, flags);
2097 *(((int *)&rte) + 0) = io_apic_read(entry->apic, 0x10 + 2 * pin);
2098 *(((int *)&rte) + 1) = io_apic_read(entry->apic, 0x11 + 2 * pin);
2099 spin_unlock_irqrestore(&ioapic_lock, flags);
2101 printk("vector=%u, delivery_mode=%u, dest_mode=%s, "
2102 "delivery_status=%d, polarity=%d, irr=%d, "
2103 "trigger=%s, mask=%d\n",
2104 rte.vector, rte.delivery_mode,
2105 rte.dest_mode ? "logical" : "physical",
2106 rte.delivery_status, rte.polarity, rte.irr,
2107 rte.trigger ? "level" : "edge", rte.mask);
2109 if ( entry->next == 0 )
2110 break;
2111 entry = &irq_2_pin[entry->next];