ia64/xen-unstable

view xen/arch/x86/io_apic.c @ 15445:c72a93cbcedb

Remove incorrect __init prefixes

Following functions can be __init in Linux, however shouldn't
in Xen. __print_IO_APIC is called by keyhandler, while
setup_ioapic_dest is invoked in one platform hypercall.

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author Keir Fraser <keir@xensource.com>
date Wed Jun 27 19:59:21 2007 +0100 (2007-06-27)
parents 0f9d683a83ed
children c3929e540632
line source
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
23 #include <xen/config.h>
24 #include <xen/lib.h>
25 #include <xen/init.h>
26 #include <xen/irq.h>
27 #include <xen/delay.h>
28 #include <xen/sched.h>
29 #include <xen/acpi.h>
30 #include <xen/keyhandler.h>
31 #include <asm/io.h>
32 #include <asm/mc146818rtc.h>
33 #include <asm/smp.h>
34 #include <asm/desc.h>
35 #include <mach_apic.h>
36 #include <io_ports.h>
38 #define set_irq_info(irq, mask) ((void)0)
39 #define set_native_irq_info(irq, mask) ((void)0)
41 /* Different to Linux: our implementation can be simpler. */
42 #define make_8259A_irq(irq) (io_apic_irqs &= ~(1<<(irq)))
44 int (*ioapic_renumber_irq)(int ioapic, int irq);
45 atomic_t irq_mis_count;
47 /* Where if anywhere is the i8259 connect in external int mode */
48 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
50 static DEFINE_SPINLOCK(ioapic_lock);
51 static DEFINE_SPINLOCK(vector_lock);
53 int skip_ioapic_setup;
55 #ifndef sis_apic_bug
56 /*
57 * Is the SiS APIC rmw bug present?
58 * -1 = don't know, 0 = no, 1 = yes
59 */
60 int sis_apic_bug = -1;
61 #endif
63 /*
64 * # of IRQ routing registers
65 */
66 int nr_ioapic_registers[MAX_IO_APICS];
68 int disable_timer_pin_1 __initdata;
70 /*
71 * Rough estimation of how many shared IRQs there are, can
72 * be changed anytime.
73 */
74 #define MAX_PLUS_SHARED_IRQS NR_IRQS
75 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
77 /*
78 * This is performance-critical, we want to do it O(1)
79 *
80 * the indexing order of this array favors 1:1 mappings
81 * between pins and IRQs.
82 */
84 static struct irq_pin_list {
85 int apic, pin, next;
86 } irq_2_pin[PIN_MAP_SIZE];
87 static int irq_2_pin_free_entry = NR_IRQS;
89 int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
91 /*
92 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
93 * shared ISA-space IRQs, so we have to support them. We are super
94 * fast in the common case, and fast for shared ISA-space IRQs.
95 */
96 static void add_pin_to_irq(unsigned int irq, int apic, int pin)
97 {
98 struct irq_pin_list *entry = irq_2_pin + irq;
100 while (entry->next) {
101 BUG_ON((entry->apic == apic) && (entry->pin == pin));
102 entry = irq_2_pin + entry->next;
103 }
105 BUG_ON((entry->apic == apic) && (entry->pin == pin));
107 if (entry->pin != -1) {
108 if (irq_2_pin_free_entry >= PIN_MAP_SIZE)
109 panic("io_apic.c: whoops");
110 entry->next = irq_2_pin_free_entry;
111 entry = irq_2_pin + entry->next;
112 irq_2_pin_free_entry = entry->next;
113 entry->next = 0;
114 }
115 entry->apic = apic;
116 entry->pin = pin;
117 }
119 static void remove_pin_at_irq(unsigned int irq, int apic, int pin)
120 {
121 struct irq_pin_list *entry, *prev;
123 for (entry = &irq_2_pin[irq]; ; entry = &irq_2_pin[entry->next]) {
124 if ((entry->apic == apic) && (entry->pin == pin))
125 break;
126 if (!entry->next)
127 BUG();
128 }
130 entry->pin = entry->apic = -1;
132 if (entry != &irq_2_pin[irq]) {
133 /* Removed entry is not at head of list. */
134 prev = &irq_2_pin[irq];
135 while (&irq_2_pin[prev->next] != entry)
136 prev = &irq_2_pin[prev->next];
137 prev->next = entry->next;
138 entry->next = irq_2_pin_free_entry;
139 irq_2_pin_free_entry = entry - irq_2_pin;
140 } else if (entry->next != 0) {
141 /* Removed entry is at head of multi-item list. */
142 prev = entry;
143 entry = &irq_2_pin[entry->next];
144 *prev = *entry;
145 entry->pin = entry->apic = -1;
146 entry->next = irq_2_pin_free_entry;
147 irq_2_pin_free_entry = entry - irq_2_pin;
148 }
149 }
151 /*
152 * Reroute an IRQ to a different pin.
153 */
154 static void __init replace_pin_at_irq(unsigned int irq,
155 int oldapic, int oldpin,
156 int newapic, int newpin)
157 {
158 struct irq_pin_list *entry = irq_2_pin + irq;
160 while (1) {
161 if (entry->apic == oldapic && entry->pin == oldpin) {
162 entry->apic = newapic;
163 entry->pin = newpin;
164 }
165 if (!entry->next)
166 break;
167 entry = irq_2_pin + entry->next;
168 }
169 }
171 static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
172 {
173 struct irq_pin_list *entry = irq_2_pin + irq;
174 unsigned int pin, reg;
176 for (;;) {
177 pin = entry->pin;
178 if (pin == -1)
179 break;
180 reg = io_apic_read(entry->apic, 0x10 + pin*2);
181 reg &= ~disable;
182 reg |= enable;
183 io_apic_modify(entry->apic, 0x10 + pin*2, reg);
184 if (!entry->next)
185 break;
186 entry = irq_2_pin + entry->next;
187 }
188 }
190 /* mask = 1 */
191 static void __mask_IO_APIC_irq (unsigned int irq)
192 {
193 __modify_IO_APIC_irq(irq, 0x00010000, 0);
194 }
196 /* mask = 0 */
197 static void __unmask_IO_APIC_irq (unsigned int irq)
198 {
199 __modify_IO_APIC_irq(irq, 0, 0x00010000);
200 }
202 /* trigger = 0 */
203 static void __edge_IO_APIC_irq (unsigned int irq)
204 {
205 __modify_IO_APIC_irq(irq, 0, 0x00008000);
206 }
208 /* trigger = 1 */
209 static void __level_IO_APIC_irq (unsigned int irq)
210 {
211 __modify_IO_APIC_irq(irq, 0x00008000, 0);
212 }
214 static void mask_IO_APIC_irq (unsigned int irq)
215 {
216 unsigned long flags;
218 spin_lock_irqsave(&ioapic_lock, flags);
219 __mask_IO_APIC_irq(irq);
220 spin_unlock_irqrestore(&ioapic_lock, flags);
221 }
223 static void unmask_IO_APIC_irq (unsigned int irq)
224 {
225 unsigned long flags;
227 spin_lock_irqsave(&ioapic_lock, flags);
228 __unmask_IO_APIC_irq(irq);
229 spin_unlock_irqrestore(&ioapic_lock, flags);
230 }
232 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
233 {
234 struct IO_APIC_route_entry entry;
235 unsigned long flags;
237 /* Check delivery_mode to be sure we're not clearing an SMI pin */
238 spin_lock_irqsave(&ioapic_lock, flags);
239 *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
240 *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
241 spin_unlock_irqrestore(&ioapic_lock, flags);
242 if (entry.delivery_mode == dest_SMI)
243 return;
245 /*
246 * Disable it in the IO-APIC irq-routing table:
247 */
248 memset(&entry, 0, sizeof(entry));
249 entry.mask = 1;
250 spin_lock_irqsave(&ioapic_lock, flags);
251 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
252 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
253 spin_unlock_irqrestore(&ioapic_lock, flags);
254 }
256 static void clear_IO_APIC (void)
257 {
258 int apic, pin;
260 for (apic = 0; apic < nr_ioapics; apic++)
261 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
262 clear_IO_APIC_pin(apic, pin);
263 }
265 #ifdef CONFIG_SMP
266 static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
267 {
268 unsigned long flags;
269 int pin;
270 struct irq_pin_list *entry = irq_2_pin + irq;
271 unsigned int apicid_value;
273 cpus_and(cpumask, cpumask, cpu_online_map);
274 if (cpus_empty(cpumask))
275 cpumask = TARGET_CPUS;
277 apicid_value = cpu_mask_to_apicid(cpumask);
278 /* Prepare to do the io_apic_write */
279 apicid_value = apicid_value << 24;
280 spin_lock_irqsave(&ioapic_lock, flags);
281 for (;;) {
282 pin = entry->pin;
283 if (pin == -1)
284 break;
285 io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
286 if (!entry->next)
287 break;
288 entry = irq_2_pin + entry->next;
289 }
290 set_irq_info(irq, cpumask);
291 spin_unlock_irqrestore(&ioapic_lock, flags);
292 }
293 #endif /* CONFIG_SMP */
295 /*
296 * Find the IRQ entry number of a certain pin.
297 */
298 static int find_irq_entry(int apic, int pin, int type)
299 {
300 int i;
302 for (i = 0; i < mp_irq_entries; i++)
303 if (mp_irqs[i].mpc_irqtype == type &&
304 (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
305 mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
306 mp_irqs[i].mpc_dstirq == pin)
307 return i;
309 return -1;
310 }
312 /*
313 * Find the pin to which IRQ[irq] (ISA) is connected
314 */
315 static int __init find_isa_irq_pin(int irq, int type)
316 {
317 int i;
319 for (i = 0; i < mp_irq_entries; i++) {
320 int lbus = mp_irqs[i].mpc_srcbus;
322 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
323 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
324 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
325 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
326 ) &&
327 (mp_irqs[i].mpc_irqtype == type) &&
328 (mp_irqs[i].mpc_srcbusirq == irq))
330 return mp_irqs[i].mpc_dstirq;
331 }
332 return -1;
333 }
335 static int __init find_isa_irq_apic(int irq, int type)
336 {
337 int i;
339 for (i = 0; i < mp_irq_entries; i++) {
340 int lbus = mp_irqs[i].mpc_srcbus;
342 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
343 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
344 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
345 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
346 ) &&
347 (mp_irqs[i].mpc_irqtype == type) &&
348 (mp_irqs[i].mpc_srcbusirq == irq))
349 break;
350 }
351 if (i < mp_irq_entries) {
352 int apic;
353 for(apic = 0; apic < nr_ioapics; apic++) {
354 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
355 return apic;
356 }
357 }
359 return -1;
360 }
362 /*
363 * Find a specific PCI IRQ entry.
364 * Not an __init, possibly needed by modules
365 */
366 static int pin_2_irq(int idx, int apic, int pin);
368 /*
369 * This function currently is only a helper for the i386 smp boot process where
370 * we need to reprogram the ioredtbls to cater for the cpus which have come online
371 * so mask in all cases should simply be TARGET_CPUS
372 */
373 #ifdef CONFIG_SMP
374 void /*__init*/ setup_ioapic_dest(void)
375 {
376 int pin, ioapic, irq, irq_entry;
378 if (skip_ioapic_setup == 1)
379 return;
381 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
382 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
383 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
384 if (irq_entry == -1)
385 continue;
386 irq = pin_2_irq(irq_entry, ioapic, pin);
387 set_ioapic_affinity_irq(irq, TARGET_CPUS);
388 }
390 }
391 }
392 #endif
394 /*
395 * EISA Edge/Level control register, ELCR
396 */
397 static int EISA_ELCR(unsigned int irq)
398 {
399 if (irq < 16) {
400 unsigned int port = 0x4d0 + (irq >> 3);
401 return (inb(port) >> (irq & 7)) & 1;
402 }
403 apic_printk(APIC_VERBOSE, KERN_INFO
404 "Broken MPtable reports ISA irq %d\n", irq);
405 return 0;
406 }
408 /* EISA interrupts are always polarity zero and can be edge or level
409 * trigger depending on the ELCR value. If an interrupt is listed as
410 * EISA conforming in the MP table, that means its trigger type must
411 * be read in from the ELCR */
413 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
414 #define default_EISA_polarity(idx) (0)
416 /* ISA interrupts are always polarity zero edge triggered,
417 * when listed as conforming in the MP table. */
419 #define default_ISA_trigger(idx) (0)
420 #define default_ISA_polarity(idx) (0)
422 /* PCI interrupts are always polarity one level triggered,
423 * when listed as conforming in the MP table. */
425 #define default_PCI_trigger(idx) (1)
426 #define default_PCI_polarity(idx) (1)
428 /* MCA interrupts are always polarity zero level triggered,
429 * when listed as conforming in the MP table. */
431 #define default_MCA_trigger(idx) (1)
432 #define default_MCA_polarity(idx) (0)
434 /* NEC98 interrupts are always polarity zero edge triggered,
435 * when listed as conforming in the MP table. */
437 #define default_NEC98_trigger(idx) (0)
438 #define default_NEC98_polarity(idx) (0)
440 static int __init MPBIOS_polarity(int idx)
441 {
442 int bus = mp_irqs[idx].mpc_srcbus;
443 int polarity;
445 /*
446 * Determine IRQ line polarity (high active or low active):
447 */
448 switch (mp_irqs[idx].mpc_irqflag & 3)
449 {
450 case 0: /* conforms, ie. bus-type dependent polarity */
451 {
452 switch (mp_bus_id_to_type[bus])
453 {
454 case MP_BUS_ISA: /* ISA pin */
455 {
456 polarity = default_ISA_polarity(idx);
457 break;
458 }
459 case MP_BUS_EISA: /* EISA pin */
460 {
461 polarity = default_EISA_polarity(idx);
462 break;
463 }
464 case MP_BUS_PCI: /* PCI pin */
465 {
466 polarity = default_PCI_polarity(idx);
467 break;
468 }
469 case MP_BUS_MCA: /* MCA pin */
470 {
471 polarity = default_MCA_polarity(idx);
472 break;
473 }
474 case MP_BUS_NEC98: /* NEC 98 pin */
475 {
476 polarity = default_NEC98_polarity(idx);
477 break;
478 }
479 default:
480 {
481 printk(KERN_WARNING "broken BIOS!!\n");
482 polarity = 1;
483 break;
484 }
485 }
486 break;
487 }
488 case 1: /* high active */
489 {
490 polarity = 0;
491 break;
492 }
493 case 2: /* reserved */
494 {
495 printk(KERN_WARNING "broken BIOS!!\n");
496 polarity = 1;
497 break;
498 }
499 case 3: /* low active */
500 {
501 polarity = 1;
502 break;
503 }
504 default: /* invalid */
505 {
506 printk(KERN_WARNING "broken BIOS!!\n");
507 polarity = 1;
508 break;
509 }
510 }
511 return polarity;
512 }
514 static int MPBIOS_trigger(int idx)
515 {
516 int bus = mp_irqs[idx].mpc_srcbus;
517 int trigger;
519 /*
520 * Determine IRQ trigger mode (edge or level sensitive):
521 */
522 switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
523 {
524 case 0: /* conforms, ie. bus-type dependent */
525 {
526 switch (mp_bus_id_to_type[bus])
527 {
528 case MP_BUS_ISA: /* ISA pin */
529 {
530 trigger = default_ISA_trigger(idx);
531 break;
532 }
533 case MP_BUS_EISA: /* EISA pin */
534 {
535 trigger = default_EISA_trigger(idx);
536 break;
537 }
538 case MP_BUS_PCI: /* PCI pin */
539 {
540 trigger = default_PCI_trigger(idx);
541 break;
542 }
543 case MP_BUS_MCA: /* MCA pin */
544 {
545 trigger = default_MCA_trigger(idx);
546 break;
547 }
548 case MP_BUS_NEC98: /* NEC 98 pin */
549 {
550 trigger = default_NEC98_trigger(idx);
551 break;
552 }
553 default:
554 {
555 printk(KERN_WARNING "broken BIOS!!\n");
556 trigger = 1;
557 break;
558 }
559 }
560 break;
561 }
562 case 1: /* edge */
563 {
564 trigger = 0;
565 break;
566 }
567 case 2: /* reserved */
568 {
569 printk(KERN_WARNING "broken BIOS!!\n");
570 trigger = 1;
571 break;
572 }
573 case 3: /* level */
574 {
575 trigger = 1;
576 break;
577 }
578 default: /* invalid */
579 {
580 printk(KERN_WARNING "broken BIOS!!\n");
581 trigger = 0;
582 break;
583 }
584 }
585 return trigger;
586 }
588 static inline int irq_polarity(int idx)
589 {
590 return MPBIOS_polarity(idx);
591 }
593 static inline int irq_trigger(int idx)
594 {
595 return MPBIOS_trigger(idx);
596 }
598 static int pin_2_irq(int idx, int apic, int pin)
599 {
600 int irq, i;
601 int bus = mp_irqs[idx].mpc_srcbus;
603 /*
604 * Debugging check, we are in big trouble if this message pops up!
605 */
606 if (mp_irqs[idx].mpc_dstirq != pin)
607 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
609 switch (mp_bus_id_to_type[bus])
610 {
611 case MP_BUS_ISA: /* ISA pin */
612 case MP_BUS_EISA:
613 case MP_BUS_MCA:
614 case MP_BUS_NEC98:
615 {
616 irq = mp_irqs[idx].mpc_srcbusirq;
617 break;
618 }
619 case MP_BUS_PCI: /* PCI pin */
620 {
621 /*
622 * PCI IRQs are mapped in order
623 */
624 i = irq = 0;
625 while (i < apic)
626 irq += nr_ioapic_registers[i++];
627 irq += pin;
629 /*
630 * For MPS mode, so far only needed by ES7000 platform
631 */
632 if (ioapic_renumber_irq)
633 irq = ioapic_renumber_irq(apic, irq);
635 break;
636 }
637 default:
638 {
639 printk(KERN_ERR "unknown bus type %d.\n",bus);
640 irq = 0;
641 break;
642 }
643 }
645 return irq;
646 }
648 static inline int IO_APIC_irq_trigger(int irq)
649 {
650 int apic, idx, pin;
652 for (apic = 0; apic < nr_ioapics; apic++) {
653 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
654 idx = find_irq_entry(apic,pin,mp_INT);
655 if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
656 return irq_trigger(idx);
657 }
658 }
659 /*
660 * nonexistent IRQs are edge default
661 */
662 return 0;
663 }
665 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
666 u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
668 int assign_irq_vector(int irq)
669 {
670 static unsigned current_vector = FIRST_DYNAMIC_VECTOR, offset = 0;
671 unsigned vector;
673 BUG_ON(irq >= NR_IRQ_VECTORS);
674 spin_lock(&vector_lock);
676 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
677 spin_unlock(&vector_lock);
678 return IO_APIC_VECTOR(irq);
679 }
681 next:
682 current_vector += 8;
684 /* Skip the hypercall vector. */
685 if (current_vector == HYPERCALL_VECTOR)
686 goto next;
688 /* Skip the Linux/BSD fast-trap vector. */
689 if (current_vector == 0x80)
690 goto next;
692 if (current_vector > LAST_DYNAMIC_VECTOR) {
693 offset++;
694 if (!(offset%8)) {
695 spin_unlock(&vector_lock);
696 return -ENOSPC;
697 }
698 current_vector = FIRST_DYNAMIC_VECTOR + offset;
699 }
701 vector = current_vector;
702 vector_irq[vector] = irq;
703 if (irq != AUTO_ASSIGN)
704 IO_APIC_VECTOR(irq) = vector;
706 spin_unlock(&vector_lock);
708 return vector;
709 }
711 static struct hw_interrupt_type ioapic_level_type;
712 static struct hw_interrupt_type ioapic_edge_type;
714 #define IOAPIC_AUTO -1
715 #define IOAPIC_EDGE 0
716 #define IOAPIC_LEVEL 1
718 static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
719 {
720 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
721 trigger == IOAPIC_LEVEL)
722 irq_desc[vector].handler = &ioapic_level_type;
723 else
724 irq_desc[vector].handler = &ioapic_edge_type;
725 }
727 static void __init setup_IO_APIC_irqs(void)
728 {
729 struct IO_APIC_route_entry entry;
730 int apic, pin, idx, irq, first_notcon = 1, vector;
731 unsigned long flags;
733 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
735 for (apic = 0; apic < nr_ioapics; apic++) {
736 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
738 /*
739 * add it to the IO-APIC irq-routing table:
740 */
741 memset(&entry,0,sizeof(entry));
743 entry.delivery_mode = INT_DELIVERY_MODE;
744 entry.dest_mode = INT_DEST_MODE;
745 entry.mask = 0; /* enable IRQ */
746 entry.dest.logical.logical_dest =
747 cpu_mask_to_apicid(TARGET_CPUS);
749 idx = find_irq_entry(apic,pin,mp_INT);
750 if (idx == -1) {
751 if (first_notcon) {
752 apic_printk(APIC_VERBOSE, KERN_DEBUG
753 " IO-APIC (apicid-pin) %d-%d",
754 mp_ioapics[apic].mpc_apicid,
755 pin);
756 first_notcon = 0;
757 } else
758 apic_printk(APIC_VERBOSE, ", %d-%d",
759 mp_ioapics[apic].mpc_apicid, pin);
760 continue;
761 }
763 entry.trigger = irq_trigger(idx);
764 entry.polarity = irq_polarity(idx);
766 if (irq_trigger(idx)) {
767 entry.trigger = 1;
768 entry.mask = 1;
769 }
771 irq = pin_2_irq(idx, apic, pin);
772 /*
773 * skip adding the timer int on secondary nodes, which causes
774 * a small but painful rift in the time-space continuum
775 */
776 if (multi_timer_check(apic, irq))
777 continue;
778 else
779 add_pin_to_irq(irq, apic, pin);
781 if (!apic && !IO_APIC_IRQ(irq))
782 continue;
784 if (IO_APIC_IRQ(irq)) {
785 vector = assign_irq_vector(irq);
786 entry.vector = vector;
787 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
789 if (!apic && (irq < 16))
790 disable_8259A_irq(irq);
791 }
792 spin_lock_irqsave(&ioapic_lock, flags);
793 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
794 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
795 set_native_irq_info(entry.vector, TARGET_CPUS);
796 spin_unlock_irqrestore(&ioapic_lock, flags);
797 }
798 }
800 if (!first_notcon)
801 apic_printk(APIC_VERBOSE, " not connected.\n");
802 }
804 /*
805 * Set up the 8259A-master output pin:
806 */
807 static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
808 {
809 struct IO_APIC_route_entry entry;
810 unsigned long flags;
812 memset(&entry,0,sizeof(entry));
814 disable_8259A_irq(0);
816 /* mask LVT0 */
817 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
819 /*
820 * We use logical delivery to get the timer IRQ
821 * to the first CPU.
822 */
823 entry.dest_mode = INT_DEST_MODE;
824 entry.mask = 0; /* unmask IRQ now */
825 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
826 entry.delivery_mode = INT_DELIVERY_MODE;
827 entry.polarity = 0;
828 entry.trigger = 0;
829 entry.vector = vector;
831 /*
832 * The timer IRQ doesn't have to know that behind the
833 * scene we have a 8259A-master in AEOI mode ...
834 */
835 irq_desc[IO_APIC_VECTOR(0)].handler = &ioapic_edge_type;
837 /*
838 * Add it to the IO-APIC irq-routing table:
839 */
840 spin_lock_irqsave(&ioapic_lock, flags);
841 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
842 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
843 spin_unlock_irqrestore(&ioapic_lock, flags);
845 enable_8259A_irq(0);
846 }
848 static inline void UNEXPECTED_IO_APIC(void)
849 {
850 }
852 void /*__init*/ __print_IO_APIC(void)
853 {
854 int apic, i;
855 union IO_APIC_reg_00 reg_00;
856 union IO_APIC_reg_01 reg_01;
857 union IO_APIC_reg_02 reg_02;
858 union IO_APIC_reg_03 reg_03;
859 unsigned long flags;
861 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
862 for (i = 0; i < nr_ioapics; i++)
863 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
864 mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
866 /*
867 * We are a bit conservative about what we expect. We have to
868 * know about every hardware change ASAP.
869 */
870 printk(KERN_INFO "testing the IO APIC.......................\n");
872 for (apic = 0; apic < nr_ioapics; apic++) {
874 spin_lock_irqsave(&ioapic_lock, flags);
875 reg_00.raw = io_apic_read(apic, 0);
876 reg_01.raw = io_apic_read(apic, 1);
877 if (reg_01.bits.version >= 0x10)
878 reg_02.raw = io_apic_read(apic, 2);
879 if (reg_01.bits.version >= 0x20)
880 reg_03.raw = io_apic_read(apic, 3);
881 spin_unlock_irqrestore(&ioapic_lock, flags);
883 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
884 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
885 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
886 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
887 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
888 if (reg_00.bits.ID >= get_physical_broadcast())
889 UNEXPECTED_IO_APIC();
890 if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
891 UNEXPECTED_IO_APIC();
893 printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
894 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
895 if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
896 (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
897 (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
898 (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
899 (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
900 (reg_01.bits.entries != 0x2E) &&
901 (reg_01.bits.entries != 0x3F)
902 )
903 UNEXPECTED_IO_APIC();
905 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
906 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
907 if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
908 (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
909 (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
910 (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
911 (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
912 )
913 UNEXPECTED_IO_APIC();
914 if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
915 UNEXPECTED_IO_APIC();
917 /*
918 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
919 * but the value of reg_02 is read as the previous read register
920 * value, so ignore it if reg_02 == reg_01.
921 */
922 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
923 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
924 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
925 if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
926 UNEXPECTED_IO_APIC();
927 }
929 /*
930 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
931 * or reg_03, but the value of reg_0[23] is read as the previous read
932 * register value, so ignore it if reg_03 == reg_0[12].
933 */
934 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
935 reg_03.raw != reg_01.raw) {
936 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
937 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
938 if (reg_03.bits.__reserved_1)
939 UNEXPECTED_IO_APIC();
940 }
942 printk(KERN_DEBUG ".... IRQ redirection table:\n");
944 printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
945 " Stat Dest Deli Vect: \n");
947 for (i = 0; i <= reg_01.bits.entries; i++) {
948 struct IO_APIC_route_entry entry;
950 spin_lock_irqsave(&ioapic_lock, flags);
951 *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
952 *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
953 spin_unlock_irqrestore(&ioapic_lock, flags);
955 printk(KERN_DEBUG " %02x %03X %02X ",
956 i,
957 entry.dest.logical.logical_dest,
958 entry.dest.physical.physical_dest
959 );
961 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
962 entry.mask,
963 entry.trigger,
964 entry.irr,
965 entry.polarity,
966 entry.delivery_status,
967 entry.dest_mode,
968 entry.delivery_mode,
969 entry.vector
970 );
971 }
972 }
973 printk(KERN_INFO "Using vector-based indexing\n");
974 printk(KERN_DEBUG "IRQ to pin mappings:\n");
975 for (i = 0; i < NR_IRQS; i++) {
976 struct irq_pin_list *entry = irq_2_pin + i;
977 if (entry->pin < 0)
978 continue;
979 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
980 for (;;) {
981 printk("-> %d:%d", entry->apic, entry->pin);
982 if (!entry->next)
983 break;
984 entry = irq_2_pin + entry->next;
985 }
986 printk("\n");
987 }
989 printk(KERN_INFO ".................................... done.\n");
991 return;
992 }
994 void print_IO_APIC(void)
995 {
996 if (apic_verbosity != APIC_QUIET)
997 __print_IO_APIC();
998 }
1000 void print_IO_APIC_keyhandler(unsigned char key)
1002 __print_IO_APIC();
1005 static void __init enable_IO_APIC(void)
1007 union IO_APIC_reg_01 reg_01;
1008 int i8259_apic, i8259_pin;
1009 int i, apic;
1010 unsigned long flags;
1012 for (i = 0; i < PIN_MAP_SIZE; i++) {
1013 irq_2_pin[i].pin = -1;
1014 irq_2_pin[i].next = 0;
1017 /* Initialise dynamic irq_2_pin free list. */
1018 for (i = NR_IRQS; i < PIN_MAP_SIZE; i++)
1019 irq_2_pin[i].next = i + 1;
1021 /*
1022 * The number of IO-APIC IRQ registers (== #pins):
1023 */
1024 for (apic = 0; apic < nr_ioapics; apic++) {
1025 spin_lock_irqsave(&ioapic_lock, flags);
1026 reg_01.raw = io_apic_read(apic, 1);
1027 spin_unlock_irqrestore(&ioapic_lock, flags);
1028 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1030 for(apic = 0; apic < nr_ioapics; apic++) {
1031 int pin;
1032 /* See if any of the pins is in ExtINT mode */
1033 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1034 struct IO_APIC_route_entry entry;
1035 spin_lock_irqsave(&ioapic_lock, flags);
1036 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1037 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1038 spin_unlock_irqrestore(&ioapic_lock, flags);
1041 /* If the interrupt line is enabled and in ExtInt mode
1042 * I have found the pin where the i8259 is connected.
1043 */
1044 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1045 ioapic_i8259.apic = apic;
1046 ioapic_i8259.pin = pin;
1047 goto found_i8259;
1051 found_i8259:
1052 /* Look to see what if the MP table has reported the ExtINT */
1053 /* If we could not find the appropriate pin by looking at the ioapic
1054 * the i8259 probably is not connected the ioapic but give the
1055 * mptable a chance anyway.
1056 */
1057 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1058 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1059 /* Trust the MP table if nothing is setup in the hardware */
1060 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1061 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1062 ioapic_i8259.pin = i8259_pin;
1063 ioapic_i8259.apic = i8259_apic;
1065 /* Complain if the MP table and the hardware disagree */
1066 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1067 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1069 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1072 /*
1073 * Do not trust the IO-APIC being empty at bootup
1074 */
1075 clear_IO_APIC();
1078 /*
1079 * Not an __init, needed by the reboot code
1080 */
1081 void disable_IO_APIC(void)
1083 /*
1084 * Clear the IO-APIC before rebooting:
1085 */
1086 clear_IO_APIC();
1088 /*
1089 * If the i8259 is routed through an IOAPIC
1090 * Put that IOAPIC in virtual wire mode
1091 * so legacy interrupts can be delivered.
1092 */
1093 if (ioapic_i8259.pin != -1) {
1094 struct IO_APIC_route_entry entry;
1095 unsigned long flags;
1097 memset(&entry, 0, sizeof(entry));
1098 entry.mask = 0; /* Enabled */
1099 entry.trigger = 0; /* Edge */
1100 entry.irr = 0;
1101 entry.polarity = 0; /* High */
1102 entry.delivery_status = 0;
1103 entry.dest_mode = 0; /* Physical */
1104 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1105 entry.vector = 0;
1106 entry.dest.physical.physical_dest =
1107 GET_APIC_ID(apic_read(APIC_ID));
1109 /*
1110 * Add it to the IO-APIC irq-routing table:
1111 */
1112 spin_lock_irqsave(&ioapic_lock, flags);
1113 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1114 *(((int *)&entry)+1));
1115 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1116 *(((int *)&entry)+0));
1117 spin_unlock_irqrestore(&ioapic_lock, flags);
1119 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1122 /*
1123 * function to set the IO-APIC physical IDs based on the
1124 * values stored in the MPC table.
1126 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1127 */
1129 #ifndef CONFIG_X86_NUMAQ
1130 static void __init setup_ioapic_ids_from_mpc(void)
1132 union IO_APIC_reg_00 reg_00;
1133 physid_mask_t phys_id_present_map;
1134 int apic;
1135 int i;
1136 unsigned char old_id;
1137 unsigned long flags;
1139 /*
1140 * Don't check I/O APIC IDs for xAPIC systems. They have
1141 * no meaning without the serial APIC bus.
1142 */
1143 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1144 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1145 return;
1147 /*
1148 * This is broken; anything with a real cpu count has to
1149 * circumvent this idiocy regardless.
1150 */
1151 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
1153 /*
1154 * Set the IOAPIC ID to the value stored in the MPC table.
1155 */
1156 for (apic = 0; apic < nr_ioapics; apic++) {
1158 /* Read the register 0 value */
1159 spin_lock_irqsave(&ioapic_lock, flags);
1160 reg_00.raw = io_apic_read(apic, 0);
1161 spin_unlock_irqrestore(&ioapic_lock, flags);
1163 old_id = mp_ioapics[apic].mpc_apicid;
1165 if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
1166 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1167 apic, mp_ioapics[apic].mpc_apicid);
1168 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1169 reg_00.bits.ID);
1170 mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
1173 /*
1174 * Sanity check, is the ID really free? Every APIC in a
1175 * system must have a unique ID or we get lots of nice
1176 * 'stuck on smp_invalidate_needed IPI wait' messages.
1177 */
1178 if (check_apicid_used(phys_id_present_map,
1179 mp_ioapics[apic].mpc_apicid)) {
1180 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1181 apic, mp_ioapics[apic].mpc_apicid);
1182 for (i = 0; i < get_physical_broadcast(); i++)
1183 if (!physid_isset(i, phys_id_present_map))
1184 break;
1185 if (i >= get_physical_broadcast())
1186 panic("Max APIC ID exceeded!\n");
1187 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
1188 i);
1189 physid_set(i, phys_id_present_map);
1190 mp_ioapics[apic].mpc_apicid = i;
1191 } else {
1192 physid_mask_t tmp;
1193 tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
1194 apic_printk(APIC_VERBOSE, "Setting %d in the "
1195 "phys_id_present_map\n",
1196 mp_ioapics[apic].mpc_apicid);
1197 physids_or(phys_id_present_map, phys_id_present_map, tmp);
1201 /*
1202 * We need to adjust the IRQ routing table
1203 * if the ID changed.
1204 */
1205 if (old_id != mp_ioapics[apic].mpc_apicid)
1206 for (i = 0; i < mp_irq_entries; i++)
1207 if (mp_irqs[i].mpc_dstapic == old_id)
1208 mp_irqs[i].mpc_dstapic
1209 = mp_ioapics[apic].mpc_apicid;
1211 /*
1212 * Read the right value from the MPC table and
1213 * write it into the ID register.
1214 */
1215 apic_printk(APIC_VERBOSE, KERN_INFO
1216 "...changing IO-APIC physical APIC ID to %d ...",
1217 mp_ioapics[apic].mpc_apicid);
1219 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1220 spin_lock_irqsave(&ioapic_lock, flags);
1221 io_apic_write(apic, 0, reg_00.raw);
1222 spin_unlock_irqrestore(&ioapic_lock, flags);
1224 /*
1225 * Sanity check
1226 */
1227 spin_lock_irqsave(&ioapic_lock, flags);
1228 reg_00.raw = io_apic_read(apic, 0);
1229 spin_unlock_irqrestore(&ioapic_lock, flags);
1230 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
1231 printk("could not set ID!\n");
1232 else
1233 apic_printk(APIC_VERBOSE, " ok.\n");
1236 #else
1237 static void __init setup_ioapic_ids_from_mpc(void) { }
1238 #endif
1240 /*
1241 * There is a nasty bug in some older SMP boards, their mptable lies
1242 * about the timer IRQ. We do the following to work around the situation:
1244 * - timer IRQ defaults to IO-APIC IRQ
1245 * - if this function detects that timer IRQs are defunct, then we fall
1246 * back to ISA timer IRQs
1247 */
1248 static int __init timer_irq_works(void)
1250 unsigned long t1 = jiffies;
1252 local_irq_enable();
1253 /* Let ten ticks pass... */
1254 mdelay((10 * 1000) / HZ);
1256 /*
1257 * Expect a few ticks at least, to be sure some possible
1258 * glue logic does not lock up after one or two first
1259 * ticks in a non-ExtINT mode. Also the local APIC
1260 * might have cached one ExtINT interrupt. Finally, at
1261 * least one tick may be lost due to delays.
1262 */
1263 if (jiffies - t1 > 4)
1264 return 1;
1266 return 0;
1269 /*
1270 * In the SMP+IOAPIC case it might happen that there are an unspecified
1271 * number of pending IRQ events unhandled. These cases are very rare,
1272 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1273 * better to do it this way as thus we do not have to be aware of
1274 * 'pending' interrupts in the IRQ path, except at this point.
1275 */
1276 /*
1277 * Edge triggered needs to resend any interrupt
1278 * that was delayed but this is now handled in the device
1279 * independent code.
1280 */
1282 /*
1283 * Starting up a edge-triggered IO-APIC interrupt is
1284 * nasty - we need to make sure that we get the edge.
1285 * If it is already asserted for some reason, we need
1286 * return 1 to indicate that is was pending.
1288 * This is not complete - we should be able to fake
1289 * an edge even if it isn't on the 8259A...
1290 */
1291 static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1293 int was_pending = 0;
1294 unsigned long flags;
1296 spin_lock_irqsave(&ioapic_lock, flags);
1297 if (irq < 16) {
1298 disable_8259A_irq(irq);
1299 if (i8259A_irq_pending(irq))
1300 was_pending = 1;
1302 __unmask_IO_APIC_irq(irq);
1303 spin_unlock_irqrestore(&ioapic_lock, flags);
1305 return was_pending;
1308 /*
1309 * Once we have recorded IRQ_PENDING already, we can mask the
1310 * interrupt for real. This prevents IRQ storms from unhandled
1311 * devices.
1312 */
1313 static void ack_edge_ioapic_irq(unsigned int irq)
1315 if ((irq_desc[IO_APIC_VECTOR(irq)].status & (IRQ_PENDING | IRQ_DISABLED))
1316 == (IRQ_PENDING | IRQ_DISABLED))
1317 mask_IO_APIC_irq(irq);
1318 ack_APIC_irq();
1321 /*
1322 * Level triggered interrupts can just be masked,
1323 * and shutting down and starting up the interrupt
1324 * is the same as enabling and disabling them -- except
1325 * with a startup need to return a "was pending" value.
1327 * Level triggered interrupts are special because we
1328 * do not touch any IO-APIC register while handling
1329 * them. We ack the APIC in the end-IRQ handler, not
1330 * in the start-IRQ-handler. Protection against reentrance
1331 * from the same interrupt is still provided, both by the
1332 * generic IRQ layer and by the fact that an unacked local
1333 * APIC does not accept IRQs.
1334 */
1335 static unsigned int startup_level_ioapic_irq (unsigned int irq)
1337 unmask_IO_APIC_irq(irq);
1339 return 0; /* don't check for pending */
1342 int ioapic_ack_new = 1;
1343 static void setup_ioapic_ack(char *s)
1345 if ( !strcmp(s, "old") )
1346 ioapic_ack_new = 0;
1347 else if ( !strcmp(s, "new") )
1348 ioapic_ack_new = 1;
1349 else
1350 printk("Unknown ioapic_ack value specified: '%s'\n", s);
1352 custom_param("ioapic_ack", setup_ioapic_ack);
1354 static void mask_and_ack_level_ioapic_irq (unsigned int irq)
1356 unsigned long v;
1357 int i;
1359 if ( ioapic_ack_new )
1360 return;
1362 mask_IO_APIC_irq(irq);
1363 /*
1364 * It appears there is an erratum which affects at least version 0x11
1365 * of I/O APIC (that's the 82093AA and cores integrated into various
1366 * chipsets). Under certain conditions a level-triggered interrupt is
1367 * erroneously delivered as edge-triggered one but the respective IRR
1368 * bit gets set nevertheless. As a result the I/O unit expects an EOI
1369 * message but it will never arrive and further interrupts are blocked
1370 * from the source. The exact reason is so far unknown, but the
1371 * phenomenon was observed when two consecutive interrupt requests
1372 * from a given source get delivered to the same CPU and the source is
1373 * temporarily disabled in between.
1375 * A workaround is to simulate an EOI message manually. We achieve it
1376 * by setting the trigger mode to edge and then to level when the edge
1377 * trigger mode gets detected in the TMR of a local APIC for a
1378 * level-triggered interrupt. We mask the source for the time of the
1379 * operation to prevent an edge-triggered interrupt escaping meanwhile.
1380 * The idea is from Manfred Spraul. --macro
1381 */
1382 i = IO_APIC_VECTOR(irq);
1384 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1386 ack_APIC_irq();
1388 if (!(v & (1 << (i & 0x1f)))) {
1389 atomic_inc(&irq_mis_count);
1390 spin_lock(&ioapic_lock);
1391 __edge_IO_APIC_irq(irq);
1392 __level_IO_APIC_irq(irq);
1393 spin_unlock(&ioapic_lock);
1397 static void end_level_ioapic_irq (unsigned int irq)
1399 unsigned long v;
1400 int i;
1402 if ( !ioapic_ack_new )
1404 if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) )
1405 unmask_IO_APIC_irq(irq);
1406 return;
1409 /*
1410 * It appears there is an erratum which affects at least version 0x11
1411 * of I/O APIC (that's the 82093AA and cores integrated into various
1412 * chipsets). Under certain conditions a level-triggered interrupt is
1413 * erroneously delivered as edge-triggered one but the respective IRR
1414 * bit gets set nevertheless. As a result the I/O unit expects an EOI
1415 * message but it will never arrive and further interrupts are blocked
1416 * from the source. The exact reason is so far unknown, but the
1417 * phenomenon was observed when two consecutive interrupt requests
1418 * from a given source get delivered to the same CPU and the source is
1419 * temporarily disabled in between.
1421 * A workaround is to simulate an EOI message manually. We achieve it
1422 * by setting the trigger mode to edge and then to level when the edge
1423 * trigger mode gets detected in the TMR of a local APIC for a
1424 * level-triggered interrupt. We mask the source for the time of the
1425 * operation to prevent an edge-triggered interrupt escaping meanwhile.
1426 * The idea is from Manfred Spraul. --macro
1427 */
1428 i = IO_APIC_VECTOR(irq);
1430 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
1432 ack_APIC_irq();
1434 if (!(v & (1 << (i & 0x1f)))) {
1435 atomic_inc(&irq_mis_count);
1436 spin_lock(&ioapic_lock);
1437 __mask_IO_APIC_irq(irq);
1438 __edge_IO_APIC_irq(irq);
1439 __level_IO_APIC_irq(irq);
1440 if ( !(irq_desc[IO_APIC_VECTOR(irq)].status & IRQ_DISABLED) )
1441 __unmask_IO_APIC_irq(irq);
1442 spin_unlock(&ioapic_lock);
1446 static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1448 int irq = vector_to_irq(vector);
1449 return startup_edge_ioapic_irq(irq);
1452 static void ack_edge_ioapic_vector(unsigned int vector)
1454 int irq = vector_to_irq(vector);
1455 ack_edge_ioapic_irq(irq);
1458 static unsigned int startup_level_ioapic_vector(unsigned int vector)
1460 int irq = vector_to_irq(vector);
1461 return startup_level_ioapic_irq (irq);
1464 static void mask_and_ack_level_ioapic_vector(unsigned int vector)
1466 int irq = vector_to_irq(vector);
1467 mask_and_ack_level_ioapic_irq(irq);
1470 static void end_level_ioapic_vector(unsigned int vector)
1472 int irq = vector_to_irq(vector);
1473 end_level_ioapic_irq(irq);
1476 static void mask_IO_APIC_vector(unsigned int vector)
1478 int irq = vector_to_irq(vector);
1479 mask_IO_APIC_irq(irq);
1482 static void unmask_IO_APIC_vector(unsigned int vector)
1484 int irq = vector_to_irq(vector);
1485 unmask_IO_APIC_irq(irq);
1488 static void set_ioapic_affinity_vector(
1489 unsigned int vector, cpumask_t cpu_mask)
1491 int irq = vector_to_irq(vector);
1493 set_native_irq_info(vector, cpu_mask);
1494 set_ioapic_affinity_irq(irq, cpu_mask);
1497 static void disable_edge_ioapic_vector(unsigned int vector)
1501 static void end_edge_ioapic_vector(unsigned int vector)
1505 /*
1506 * Level and edge triggered IO-APIC interrupts need different handling,
1507 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1508 * handled with the level-triggered descriptor, but that one has slightly
1509 * more overhead. Level-triggered interrupts cannot be handled with the
1510 * edge-triggered handler, without risking IRQ storms and other ugly
1511 * races.
1512 */
1513 static struct hw_interrupt_type ioapic_edge_type = {
1514 .typename = "IO-APIC-edge",
1515 .startup = startup_edge_ioapic_vector,
1516 .shutdown = disable_edge_ioapic_vector,
1517 .enable = unmask_IO_APIC_vector,
1518 .disable = disable_edge_ioapic_vector,
1519 .ack = ack_edge_ioapic_vector,
1520 .end = end_edge_ioapic_vector,
1521 .set_affinity = set_ioapic_affinity_vector,
1522 };
1524 static struct hw_interrupt_type ioapic_level_type = {
1525 .typename = "IO-APIC-level",
1526 .startup = startup_level_ioapic_vector,
1527 .shutdown = mask_IO_APIC_vector,
1528 .enable = unmask_IO_APIC_vector,
1529 .disable = mask_IO_APIC_vector,
1530 .ack = mask_and_ack_level_ioapic_vector,
1531 .end = end_level_ioapic_vector,
1532 .set_affinity = set_ioapic_affinity_vector,
1533 };
1535 static inline void init_IO_APIC_traps(void)
1537 int irq;
1538 /* Xen: This is way simpler than the Linux implementation. */
1539 for (irq = 0; irq < 16 ; irq++)
1540 if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq))
1541 make_8259A_irq(irq);
1544 static void enable_lapic_vector(unsigned int vector)
1546 unsigned long v;
1548 v = apic_read(APIC_LVT0);
1549 apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
1552 static void disable_lapic_vector(unsigned int vector)
1554 unsigned long v;
1556 v = apic_read(APIC_LVT0);
1557 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
1560 static void ack_lapic_vector(unsigned int vector)
1562 ack_APIC_irq();
1565 static void end_lapic_vector(unsigned int vector) { /* nothing */ }
1567 static struct hw_interrupt_type lapic_irq_type = {
1568 .typename = "local-APIC-edge",
1569 .startup = NULL, /* startup_irq() not used for IRQ0 */
1570 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
1571 .enable = enable_lapic_vector,
1572 .disable = disable_lapic_vector,
1573 .ack = ack_lapic_vector,
1574 .end = end_lapic_vector
1575 };
1577 /*
1578 * This looks a bit hackish but it's about the only one way of sending
1579 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1580 * not support the ExtINT mode, unfortunately. We need to send these
1581 * cycles as some i82489DX-based boards have glue logic that keeps the
1582 * 8259A interrupt line asserted until INTA. --macro
1583 */
1584 static inline void unlock_ExtINT_logic(void)
1586 int apic, pin, i;
1587 struct IO_APIC_route_entry entry0, entry1;
1588 unsigned char save_control, save_freq_select;
1589 unsigned long flags;
1591 pin = find_isa_irq_pin(8, mp_INT);
1592 apic = find_isa_irq_apic(8, mp_INT);
1593 if (pin == -1)
1594 return;
1596 spin_lock_irqsave(&ioapic_lock, flags);
1597 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1598 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1599 spin_unlock_irqrestore(&ioapic_lock, flags);
1600 clear_IO_APIC_pin(apic, pin);
1602 memset(&entry1, 0, sizeof(entry1));
1604 entry1.dest_mode = 0; /* physical delivery */
1605 entry1.mask = 0; /* unmask IRQ now */
1606 entry1.dest.physical.physical_dest = hard_smp_processor_id();
1607 entry1.delivery_mode = dest_ExtINT;
1608 entry1.polarity = entry0.polarity;
1609 entry1.trigger = 0;
1610 entry1.vector = 0;
1612 spin_lock_irqsave(&ioapic_lock, flags);
1613 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
1614 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
1615 spin_unlock_irqrestore(&ioapic_lock, flags);
1617 save_control = CMOS_READ(RTC_CONTROL);
1618 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
1619 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
1620 RTC_FREQ_SELECT);
1621 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
1623 i = 100;
1624 while (i-- > 0) {
1625 mdelay(10);
1626 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
1627 i -= 10;
1630 CMOS_WRITE(save_control, RTC_CONTROL);
1631 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1632 clear_IO_APIC_pin(apic, pin);
1634 spin_lock_irqsave(&ioapic_lock, flags);
1635 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
1636 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
1637 spin_unlock_irqrestore(&ioapic_lock, flags);
1640 int timer_uses_ioapic_pin_0;
1642 /*
1643 * This code may look a bit paranoid, but it's supposed to cooperate with
1644 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1645 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1646 * fanatically on his truly buggy board.
1647 */
1648 static inline void check_timer(void)
1650 int apic1, pin1, apic2, pin2;
1651 int vector;
1653 /*
1654 * get/set the timer IRQ vector:
1655 */
1656 disable_8259A_irq(0);
1657 vector = assign_irq_vector(0);
1659 irq_desc[IO_APIC_VECTOR(0)].action = irq_desc[LEGACY_VECTOR(0)].action;
1660 irq_desc[IO_APIC_VECTOR(0)].depth = 0;
1661 irq_desc[IO_APIC_VECTOR(0)].status &= ~IRQ_DISABLED;
1663 /*
1664 * Subtle, code in do_timer_interrupt() expects an AEOI
1665 * mode for the 8259A whenever interrupts are routed
1666 * through I/O APICs. Also IRQ0 has to be enabled in
1667 * the 8259A which implies the virtual wire has to be
1668 * disabled in the local APIC.
1669 */
1670 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1671 init_8259A(1);
1672 /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */
1673 /*timer_ack = 1;*/
1674 /*enable_8259A_irq(0);*/
1676 pin1 = find_isa_irq_pin(0, mp_INT);
1677 apic1 = find_isa_irq_apic(0, mp_INT);
1678 pin2 = ioapic_i8259.pin;
1679 apic2 = ioapic_i8259.apic;
1681 if (pin1 == 0)
1682 timer_uses_ioapic_pin_0 = 1;
1684 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1685 vector, apic1, pin1, apic2, pin2);
1687 if (pin1 != -1) {
1688 /*
1689 * Ok, does IRQ0 through the IOAPIC work?
1690 */
1691 unmask_IO_APIC_irq(0);
1692 if (timer_irq_works()) {
1693 if (disable_timer_pin_1 > 0)
1694 clear_IO_APIC_pin(apic1, pin1);
1695 return;
1697 clear_IO_APIC_pin(apic1, pin1);
1698 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
1699 "IO-APIC\n");
1702 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
1703 if (pin2 != -1) {
1704 printk("\n..... (found pin %d) ...", pin2);
1705 /*
1706 * legacy devices should be connected to IO APIC #0
1707 */
1708 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
1709 if (timer_irq_works()) {
1710 printk("works.\n");
1711 if (pin1 != -1)
1712 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
1713 else
1714 add_pin_to_irq(0, apic2, pin2);
1715 return;
1717 /*
1718 * Cleanup, just in case ...
1719 */
1720 clear_IO_APIC_pin(apic2, pin2);
1722 printk(" failed.\n");
1724 if (nmi_watchdog == NMI_IO_APIC) {
1725 printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
1726 nmi_watchdog = 0;
1729 printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
1731 disable_8259A_irq(0);
1732 irq_desc[vector].handler = &lapic_irq_type;
1733 apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
1734 enable_8259A_irq(0);
1736 if (timer_irq_works()) {
1737 printk(" works.\n");
1738 return;
1740 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
1741 printk(" failed.\n");
1743 printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
1745 /*timer_ack = 0;*/
1746 init_8259A(0);
1747 make_8259A_irq(0);
1748 apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
1750 unlock_ExtINT_logic();
1752 if (timer_irq_works()) {
1753 printk(" works.\n");
1754 return;
1756 printk(" failed :(.\n");
1757 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
1758 "report. Then try booting with the 'noapic' option");
1761 /*
1763 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1764 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1765 * Linux doesn't really care, as it's not actually used
1766 * for any interrupt handling anyway.
1767 */
1768 #define PIC_IRQS (1 << PIC_CASCADE_IR)
1770 void __init setup_IO_APIC(void)
1772 enable_IO_APIC();
1774 if (acpi_ioapic)
1775 io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
1776 else
1777 io_apic_irqs = ~PIC_IRQS;
1779 printk("ENABLING IO-APIC IRQs\n");
1780 printk(" -> Using %s ACK method\n", ioapic_ack_new ? "new" : "old");
1782 /*
1783 * Set up IO-APIC IRQ routing.
1784 */
1785 if (!acpi_ioapic)
1786 setup_ioapic_ids_from_mpc();
1787 sync_Arb_IDs();
1788 setup_IO_APIC_irqs();
1789 init_IO_APIC_traps();
1790 check_timer();
1791 print_IO_APIC();
1793 register_keyhandler('z', print_IO_APIC_keyhandler, "print ioapic info");
1796 struct IO_APIC_route_entry *ioapic_pm_state=NULL;
1798 void ioapic_pm_state_alloc(void)
1800 int i, nr_entry = 0;
1802 if (ioapic_pm_state != NULL)
1803 return;
1805 for (i = 0; i < nr_ioapics; i++)
1806 nr_entry += nr_ioapic_registers[i];
1808 ioapic_pm_state = _xmalloc(sizeof(struct IO_APIC_route_entry)*nr_entry,
1809 sizeof(struct IO_APIC_route_entry));
1812 int ioapic_suspend(void)
1814 struct IO_APIC_route_entry *entry;
1815 unsigned long flags;
1816 int apic,i;
1818 ioapic_pm_state_alloc();
1820 if (ioapic_pm_state == NULL) {
1821 printk("Cannot suspend ioapic due to lack of memory\n");
1822 return 1;
1825 entry = ioapic_pm_state;
1827 spin_lock_irqsave(&ioapic_lock, flags);
1828 for (apic = 0; apic < nr_ioapics; apic++) {
1829 for (i = 0; i < nr_ioapic_registers[apic]; i ++, entry ++ ) {
1830 *(((int *)entry) + 1) = io_apic_read(apic, 0x11 + 2 * i);
1831 *(((int *)entry) + 0) = io_apic_read(apic, 0x10 + 2 * i);
1834 spin_unlock_irqrestore(&ioapic_lock, flags);
1836 return 0;
1839 int ioapic_resume(void)
1841 struct IO_APIC_route_entry *entry;
1842 unsigned long flags;
1843 union IO_APIC_reg_00 reg_00;
1844 int i,apic;
1846 if (ioapic_pm_state == NULL){
1847 printk("Cannot resume ioapic due to lack of memory\n");
1848 return 1;
1851 entry = ioapic_pm_state;
1853 spin_lock_irqsave(&ioapic_lock, flags);
1854 for (apic = 0; apic < nr_ioapics; apic++){
1855 reg_00.raw = io_apic_read(apic, 0);
1856 if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid) {
1857 reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
1858 io_apic_write(apic, 0, reg_00.raw);
1860 for (i = 0; i < nr_ioapic_registers[apic]; i++, entry++) {
1861 io_apic_write(apic, 0x11+2*i, *(((int *)entry)+1));
1862 io_apic_write(apic, 0x10+2*i, *(((int *)entry)+0));
1865 spin_unlock_irqrestore(&ioapic_lock, flags);
1867 return 0;
1870 /* --------------------------------------------------------------------------
1871 ACPI-based IOAPIC Configuration
1872 -------------------------------------------------------------------------- */
1874 #ifdef CONFIG_ACPI_BOOT
1876 int __init io_apic_get_unique_id (int ioapic, int apic_id)
1878 union IO_APIC_reg_00 reg_00;
1879 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
1880 physid_mask_t tmp;
1881 unsigned long flags;
1882 int i = 0;
1884 /*
1885 * The P4 platform supports up to 256 APIC IDs on two separate APIC
1886 * buses (one for LAPICs, one for IOAPICs), where predecessors only
1887 * supports up to 16 on one shared APIC bus.
1889 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
1890 * advantage of new APIC bus architecture.
1891 */
1893 if (physids_empty(apic_id_map))
1894 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
1896 spin_lock_irqsave(&ioapic_lock, flags);
1897 reg_00.raw = io_apic_read(ioapic, 0);
1898 spin_unlock_irqrestore(&ioapic_lock, flags);
1900 if (apic_id >= get_physical_broadcast()) {
1901 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
1902 "%d\n", ioapic, apic_id, reg_00.bits.ID);
1903 apic_id = reg_00.bits.ID;
1906 /*
1907 * Every APIC in a system must have a unique ID or we get lots of nice
1908 * 'stuck on smp_invalidate_needed IPI wait' messages.
1909 */
1910 if (check_apicid_used(apic_id_map, apic_id)) {
1912 for (i = 0; i < get_physical_broadcast(); i++) {
1913 if (!check_apicid_used(apic_id_map, i))
1914 break;
1917 if (i == get_physical_broadcast())
1918 panic("Max apic_id exceeded!\n");
1920 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
1921 "trying %d\n", ioapic, apic_id, i);
1923 apic_id = i;
1926 tmp = apicid_to_cpu_present(apic_id);
1927 physids_or(apic_id_map, apic_id_map, tmp);
1929 if (reg_00.bits.ID != apic_id) {
1930 reg_00.bits.ID = apic_id;
1932 spin_lock_irqsave(&ioapic_lock, flags);
1933 io_apic_write(ioapic, 0, reg_00.raw);
1934 reg_00.raw = io_apic_read(ioapic, 0);
1935 spin_unlock_irqrestore(&ioapic_lock, flags);
1937 /* Sanity check */
1938 if (reg_00.bits.ID != apic_id) {
1939 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
1940 return -1;
1944 apic_printk(APIC_VERBOSE, KERN_INFO
1945 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
1947 return apic_id;
1951 int __init io_apic_get_version (int ioapic)
1953 union IO_APIC_reg_01 reg_01;
1954 unsigned long flags;
1956 spin_lock_irqsave(&ioapic_lock, flags);
1957 reg_01.raw = io_apic_read(ioapic, 1);
1958 spin_unlock_irqrestore(&ioapic_lock, flags);
1960 return reg_01.bits.version;
1964 int __init io_apic_get_redir_entries (int ioapic)
1966 union IO_APIC_reg_01 reg_01;
1967 unsigned long flags;
1969 spin_lock_irqsave(&ioapic_lock, flags);
1970 reg_01.raw = io_apic_read(ioapic, 1);
1971 spin_unlock_irqrestore(&ioapic_lock, flags);
1973 return reg_01.bits.entries;
1977 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
1979 struct IO_APIC_route_entry entry;
1980 unsigned long flags;
1982 if (!IO_APIC_IRQ(irq)) {
1983 printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
1984 ioapic);
1985 return -EINVAL;
1988 /*
1989 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
1990 * Note that we mask (disable) IRQs now -- these get enabled when the
1991 * corresponding device driver registers for this IRQ.
1992 */
1994 memset(&entry,0,sizeof(entry));
1996 entry.delivery_mode = INT_DELIVERY_MODE;
1997 entry.dest_mode = INT_DEST_MODE;
1998 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
1999 entry.trigger = edge_level;
2000 entry.polarity = active_high_low;
2001 entry.mask = 1;
2003 /*
2004 * IRQs < 16 are already in the irq_2_pin[] map
2005 */
2006 if (irq >= 16)
2007 add_pin_to_irq(irq, ioapic, pin);
2009 entry.vector = assign_irq_vector(irq);
2011 apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
2012 "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
2013 mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
2014 edge_level, active_high_low);
2016 ioapic_register_intr(irq, entry.vector, edge_level);
2018 if (!ioapic && (irq < 16))
2019 disable_8259A_irq(irq);
2021 spin_lock_irqsave(&ioapic_lock, flags);
2022 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
2023 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
2024 set_native_irq_info(entry.vector, TARGET_CPUS);
2025 spin_unlock_irqrestore(&ioapic_lock, flags);
2027 return 0;
2030 #endif /*CONFIG_ACPI_BOOT*/
2032 static int ioapic_physbase_to_id(unsigned long physbase)
2034 int apic;
2035 for ( apic = 0; apic < nr_ioapics; apic++ )
2036 if ( mp_ioapics[apic].mpc_apicaddr == physbase )
2037 return apic;
2038 return -EINVAL;
2041 int ioapic_guest_read(unsigned long physbase, unsigned int reg, u32 *pval)
2043 int apic;
2044 unsigned long flags;
2046 if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
2047 return apic;
2049 spin_lock_irqsave(&ioapic_lock, flags);
2050 *pval = io_apic_read(apic, reg);
2051 spin_unlock_irqrestore(&ioapic_lock, flags);
2053 return 0;
2056 #define WARN_BOGUS_WRITE(f, a...) \
2057 dprintk(XENLOG_INFO, "\n%s: " \
2058 "apic=%d, pin=%d, old_irq=%d, new_irq=%d\n" \
2059 "%s: old_entry=%08x, new_entry=%08x\n" \
2060 "%s: " f, __FUNCTION__, apic, pin, old_irq, new_irq, \
2061 __FUNCTION__, *(u32 *)&old_rte, *(u32 *)&new_rte, \
2062 __FUNCTION__ , ##a )
2064 int ioapic_guest_write(unsigned long physbase, unsigned int reg, u32 val)
2066 int apic, pin, old_irq = -1, new_irq = -1;
2067 struct IO_APIC_route_entry old_rte = { 0 }, new_rte = { 0 };
2068 unsigned long flags;
2070 if ( (apic = ioapic_physbase_to_id(physbase)) < 0 )
2071 return apic;
2073 /* Only write to the first half of a route entry. */
2074 if ( (reg < 0x10) || (reg & 1) )
2075 return 0;
2077 pin = (reg - 0x10) >> 1;
2079 /* Write first half from guest; second half is target info. */
2080 *(u32 *)&new_rte = val;
2081 new_rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
2083 /*
2084 * What about weird destination types?
2085 * SMI: Ignore? Ought to be set up by the BIOS.
2086 * NMI: Ignore? Watchdog functionality is Xen's concern.
2087 * INIT: Definitely ignore: probably a guest OS bug.
2088 * ExtINT: Ignore? Linux only asserts this at start of day.
2089 * For now, print a message and return an error. We can fix up on demand.
2090 */
2091 if ( new_rte.delivery_mode > dest_LowestPrio )
2093 printk("ERROR: Attempt to write weird IOAPIC destination mode!\n");
2094 printk(" APIC=%d/%d, lo-reg=%x\n", apic, pin, val);
2095 return -EINVAL;
2098 /*
2099 * The guest does not know physical APIC arrangement (flat vs. cluster).
2100 * Apply genapic conventions for this platform.
2101 */
2102 new_rte.delivery_mode = INT_DELIVERY_MODE;
2103 new_rte.dest_mode = INT_DEST_MODE;
2105 spin_lock_irqsave(&ioapic_lock, flags);
2107 /* Read first (interesting) half of current routing entry. */
2108 *(u32 *)&old_rte = io_apic_read(apic, 0x10 + 2 * pin);
2110 /* No change to the first half of the routing entry? Bail quietly. */
2111 if ( *(u32 *)&old_rte == *(u32 *)&new_rte )
2113 spin_unlock_irqrestore(&ioapic_lock, flags);
2114 return 0;
2117 /* Special delivery modes (SMI,NMI,INIT,ExtInt) should have no vector. */
2118 if ( (old_rte.delivery_mode > dest_LowestPrio) && (old_rte.vector != 0) )
2120 WARN_BOGUS_WRITE("Special delivery mode %d with non-zero vector "
2121 "%02x\n", old_rte.delivery_mode, old_rte.vector);
2122 /* Nobble the vector here as it does not relate to a valid irq. */
2123 old_rte.vector = 0;
2126 if ( old_rte.vector >= FIRST_DYNAMIC_VECTOR )
2127 old_irq = vector_irq[old_rte.vector];
2128 if ( new_rte.vector >= FIRST_DYNAMIC_VECTOR )
2129 new_irq = vector_irq[new_rte.vector];
2131 if ( (old_irq != new_irq) && (old_irq != -1) && IO_APIC_IRQ(old_irq) )
2133 if ( irq_desc[IO_APIC_VECTOR(old_irq)].action )
2135 WARN_BOGUS_WRITE("Attempt to remove IO-APIC pin of in-use IRQ!\n");
2136 spin_unlock_irqrestore(&ioapic_lock, flags);
2137 return 0;
2140 remove_pin_at_irq(old_irq, apic, pin);
2143 if ( (new_irq != -1) && IO_APIC_IRQ(new_irq) )
2145 if ( irq_desc[IO_APIC_VECTOR(new_irq)].action )
2147 WARN_BOGUS_WRITE("Attempt to %s IO-APIC pin for in-use IRQ!\n",
2148 (old_irq != new_irq) ? "add" : "modify");
2149 spin_unlock_irqrestore(&ioapic_lock, flags);
2150 return 0;
2153 /* Set the correct irq-handling type. */
2154 irq_desc[IO_APIC_VECTOR(new_irq)].handler = new_rte.trigger ?
2155 &ioapic_level_type: &ioapic_edge_type;
2157 if ( old_irq != new_irq )
2158 add_pin_to_irq(new_irq, apic, pin);
2160 /* Mask iff level triggered. */
2161 new_rte.mask = new_rte.trigger;
2163 else if ( !new_rte.mask )
2165 /* This pin leads nowhere but the guest has not masked it. */
2166 WARN_BOGUS_WRITE("Installing bogus unmasked IO-APIC entry!\n");
2167 new_rte.mask = 1;
2171 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&new_rte) + 0));
2172 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&new_rte) + 1));
2174 spin_unlock_irqrestore(&ioapic_lock, flags);
2176 return 0;
2179 void dump_ioapic_irq_info(void)
2181 struct irq_pin_list *entry;
2182 struct IO_APIC_route_entry rte;
2183 unsigned int irq, pin, printed = 0;
2184 unsigned long flags;
2186 for ( irq = 0; irq < NR_IRQS; irq++ )
2188 entry = &irq_2_pin[irq];
2189 if ( entry->pin == -1 )
2190 continue;
2192 if ( !printed++ )
2193 printk("IO-APIC interrupt information:\n");
2195 printk(" IRQ%3d Vec%3d:\n", irq, irq_to_vector(irq));
2197 for ( ; ; )
2199 pin = entry->pin;
2201 printk(" Apic 0x%02x, Pin %2d: ", entry->apic, pin);
2203 spin_lock_irqsave(&ioapic_lock, flags);
2204 *(((int *)&rte) + 0) = io_apic_read(entry->apic, 0x10 + 2 * pin);
2205 *(((int *)&rte) + 1) = io_apic_read(entry->apic, 0x11 + 2 * pin);
2206 spin_unlock_irqrestore(&ioapic_lock, flags);
2208 printk("vector=%u, delivery_mode=%u, dest_mode=%s, "
2209 "delivery_status=%d, polarity=%d, irr=%d, "
2210 "trigger=%s, mask=%d\n",
2211 rte.vector, rte.delivery_mode,
2212 rte.dest_mode ? "logical" : "physical",
2213 rte.delivery_status, rte.polarity, rte.irr,
2214 rte.trigger ? "level" : "edge", rte.mask);
2216 if ( entry->next == 0 )
2217 break;
2218 entry = &irq_2_pin[entry->next];