ia64/xen-unstable

view xen/arch/ia64/xen/irq.c @ 9405:29dfadcc5029

[IA64] Followup to xen time cleanup

Clean up to xen time handler. Tristan #if 0 some code because it seems
redundant, which however is actually problematic logic as a reason for
an intermittent timer oops issue of dom0. So delete it now.

Also remove vcpu_wake, since wakeup current has nothing meaningful and
simply waste cpu cycle.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Mon Mar 27 15:32:08 2006 -0700 (2006-03-27)
parents 1abf3783975d
children db2bd8169e9b 3b0d07af46cb
line source
1 /*
2 * linux/arch/ia64/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
11 *
12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
13 *
14 * 4/14/2004: Added code to handle cpu migration and do safe irq
15 * migration without lossing interrupts for iosapic
16 * architecture.
17 */
19 /*
20 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
21 *
22 * IRQs are in fact implemented a bit like signal handlers for the kernel.
23 * Naturally it's not a 1:1 relation, but there are similarities.
24 */
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/module.h>
29 #ifndef XEN
30 #include <linux/signal.h>
31 #endif
32 #include <linux/sched.h>
33 #include <linux/ioport.h>
34 #include <linux/interrupt.h>
35 #include <linux/timex.h>
36 #include <linux/slab.h>
37 #ifndef XEN
38 #include <linux/random.h>
39 #include <linux/cpu.h>
40 #endif
41 #include <linux/ctype.h>
42 #ifndef XEN
43 #include <linux/smp_lock.h>
44 #endif
45 #include <linux/init.h>
46 #ifndef XEN
47 #include <linux/kernel_stat.h>
48 #endif
49 #include <linux/irq.h>
50 #ifndef XEN
51 #include <linux/proc_fs.h>
52 #endif
53 #include <linux/seq_file.h>
54 #ifndef XEN
55 #include <linux/kallsyms.h>
56 #include <linux/notifier.h>
57 #endif
59 #include <asm/atomic.h>
60 #ifndef XEN
61 #include <asm/cpu.h>
62 #endif
63 #include <asm/io.h>
64 #include <asm/smp.h>
65 #include <asm/system.h>
66 #include <asm/bitops.h>
67 #include <asm/uaccess.h>
68 #include <asm/pgalloc.h>
69 #ifndef XEN
70 #include <asm/tlbflush.h>
71 #endif
72 #include <asm/delay.h>
73 #include <asm/irq.h>
75 #ifdef XEN
76 #include <xen/event.h>
77 #define _irq_desc irq_desc
78 #define irq_descp(irq) &irq_desc[irq]
79 #define apicid_to_phys_cpu_present(x) 1
80 #endif
83 /*
84 * Linux has a controller-independent x86 interrupt architecture.
85 * every controller has a 'controller-template', that is used
86 * by the main code to do the right thing. Each driver-visible
87 * interrupt source is transparently wired to the appropriate
88 * controller. Thus drivers need not be aware of the
89 * interrupt-controller.
90 *
91 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
92 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
93 * (IO-APICs assumed to be messaging to Pentium local-APICs)
94 *
95 * the code is designed to be easily extended with new/different
96 * interrupt controllers, without having to do assembly magic.
97 */
99 /*
100 * Controller mappings for all interrupt sources:
101 */
102 irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = {
103 [0 ... NR_IRQS-1] = {
104 .status = IRQ_DISABLED,
105 .handler = &no_irq_type,
106 .lock = SPIN_LOCK_UNLOCKED
107 }
108 };
110 /*
111 * This is updated when the user sets irq affinity via /proc
112 */
113 cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
115 #ifdef CONFIG_IA64_GENERIC
116 irq_desc_t * __ia64_irq_desc (unsigned int irq)
117 {
118 return _irq_desc + irq;
119 }
121 ia64_vector __ia64_irq_to_vector (unsigned int irq)
122 {
123 return (ia64_vector) irq;
124 }
126 unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
127 {
128 return (unsigned int) vec;
129 }
130 #endif
132 #ifndef XEN
133 static void register_irq_proc (unsigned int irq);
134 #endif
136 /*
137 * Special irq handlers.
138 */
140 #ifdef XEN
141 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
142 #else
143 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
144 { return IRQ_NONE; }
145 #endif
147 /*
148 * Generic no controller code
149 */
151 static void enable_none(unsigned int irq) { }
152 static unsigned int startup_none(unsigned int irq) { return 0; }
153 static void disable_none(unsigned int irq) { }
154 static void ack_none(unsigned int irq)
155 {
156 /*
157 * 'what should we do if we get a hw irq event on an illegal vector'.
158 * each architecture has to answer this themselves, it doesn't deserve
159 * a generic callback i think.
160 */
161 #ifdef CONFIG_X86
162 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
163 #ifdef CONFIG_X86_LOCAL_APIC
164 /*
165 * Currently unexpected vectors happen only on SMP and APIC.
166 * We _must_ ack these because every local APIC has only N
167 * irq slots per priority level, and a 'hanging, unacked' IRQ
168 * holds up an irq slot - in excessive cases (when multiple
169 * unexpected vectors occur) that might lock up the APIC
170 * completely.
171 */
172 ack_APIC_irq();
173 #endif
174 #endif
175 #ifdef CONFIG_IA64
176 printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
177 #endif
178 }
180 /* startup is the same as "enable", shutdown is same as "disable" */
181 #define shutdown_none disable_none
182 #define end_none enable_none
184 struct hw_interrupt_type no_irq_type = {
185 "none",
186 startup_none,
187 shutdown_none,
188 enable_none,
189 disable_none,
190 ack_none,
191 end_none
192 };
194 atomic_t irq_err_count;
195 #ifdef CONFIG_X86_IO_APIC
196 #ifdef APIC_MISMATCH_DEBUG
197 atomic_t irq_mis_count;
198 #endif
199 #endif
201 /*
202 * Generic, controller-independent functions:
203 */
205 #ifndef XEN
206 int show_interrupts(struct seq_file *p, void *v)
207 {
208 int j, i = *(loff_t *) v;
209 struct irqaction * action;
210 irq_desc_t *idesc;
211 unsigned long flags;
213 if (i == 0) {
214 seq_puts(p, " ");
215 for (j=0; j<NR_CPUS; j++)
216 if (cpu_online(j))
217 seq_printf(p, "CPU%d ",j);
218 seq_putc(p, '\n');
219 }
221 if (i < NR_IRQS) {
222 idesc = irq_descp(i);
223 spin_lock_irqsave(&idesc->lock, flags);
224 action = idesc->action;
225 if (!action)
226 goto skip;
227 seq_printf(p, "%3d: ",i);
228 #ifndef CONFIG_SMP
229 seq_printf(p, "%10u ", kstat_irqs(i));
230 #else
231 for (j = 0; j < NR_CPUS; j++)
232 if (cpu_online(j))
233 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
234 #endif
235 seq_printf(p, " %14s", idesc->handler->typename);
236 seq_printf(p, " %s", action->name);
238 for (action=action->next; action; action = action->next)
239 seq_printf(p, ", %s", action->name);
241 seq_putc(p, '\n');
242 skip:
243 spin_unlock_irqrestore(&idesc->lock, flags);
244 } else if (i == NR_IRQS) {
245 seq_puts(p, "NMI: ");
246 for (j = 0; j < NR_CPUS; j++)
247 if (cpu_online(j))
248 seq_printf(p, "%10u ", nmi_count(j));
249 seq_putc(p, '\n');
250 #ifdef CONFIG_X86_LOCAL_APIC
251 seq_puts(p, "LOC: ");
252 for (j = 0; j < NR_CPUS; j++)
253 if (cpu_online(j))
254 seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
255 seq_putc(p, '\n');
256 #endif
257 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
258 #ifdef CONFIG_X86_IO_APIC
259 #ifdef APIC_MISMATCH_DEBUG
260 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
261 #endif
262 #endif
263 }
264 return 0;
265 }
266 #endif
268 #ifdef CONFIG_SMP
269 inline void synchronize_irq(unsigned int irq)
270 {
271 #ifndef XEN
272 struct irq_desc *desc = irq_desc + irq;
274 while (desc->status & IRQ_INPROGRESS)
275 cpu_relax();
276 #endif
277 }
278 EXPORT_SYMBOL(synchronize_irq);
279 #endif
281 /*
282 * This should really return information about whether
283 * we should do bottom half handling etc. Right now we
284 * end up _always_ checking the bottom half, which is a
285 * waste of time and is not what some drivers would
286 * prefer.
287 */
288 int handle_IRQ_event(unsigned int irq,
289 struct pt_regs *regs, struct irqaction *action)
290 {
291 #ifndef XEN
292 int status = 1; /* Force the "do bottom halves" bit */
293 #endif
294 int retval = 0;
296 #ifndef XEN
297 if (!(action->flags & SA_INTERRUPT))
298 #endif
299 local_irq_enable();
301 #ifdef XEN
302 action->handler(irq, action->dev_id, regs);
303 #else
304 do {
305 status |= action->flags;
306 retval |= action->handler(irq, action->dev_id, regs);
307 action = action->next;
308 } while (action);
309 if (status & SA_SAMPLE_RANDOM)
310 add_interrupt_randomness(irq);
311 #endif
312 local_irq_disable();
313 return retval;
314 }
316 #ifndef XEN
317 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
318 {
319 struct irqaction *action;
321 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
322 printk(KERN_ERR "irq event %d: bogus return value %x\n",
323 irq, action_ret);
324 } else {
325 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
326 }
327 dump_stack();
328 printk(KERN_ERR "handlers:\n");
329 action = desc->action;
330 do {
331 printk(KERN_ERR "[<%p>]", action->handler);
332 print_symbol(" (%s)",
333 (unsigned long)action->handler);
334 printk("\n");
335 action = action->next;
336 } while (action);
337 }
339 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
340 {
341 static int count = 100;
343 if (count) {
344 count--;
345 __report_bad_irq(irq, desc, action_ret);
346 }
347 }
348 #endif
350 static int noirqdebug;
352 static int __init noirqdebug_setup(char *str)
353 {
354 noirqdebug = 1;
355 printk("IRQ lockup detection disabled\n");
356 return 1;
357 }
359 __setup("noirqdebug", noirqdebug_setup);
361 /*
362 * If 99,900 of the previous 100,000 interrupts have not been handled then
363 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
364 * turn the IRQ off.
365 *
366 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
367 * device sharing an IRQ with the failing one)
368 *
369 * Called under desc->lock
370 */
371 #ifndef XEN
372 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
373 {
374 if (action_ret != IRQ_HANDLED) {
375 desc->irqs_unhandled++;
376 if (action_ret != IRQ_NONE)
377 report_bad_irq(irq, desc, action_ret);
378 }
380 desc->irq_count++;
381 if (desc->irq_count < 100000)
382 return;
384 desc->irq_count = 0;
385 if (desc->irqs_unhandled > 99900) {
386 /*
387 * The interrupt is stuck
388 */
389 __report_bad_irq(irq, desc, action_ret);
390 /*
391 * Now kill the IRQ
392 */
393 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
394 desc->status |= IRQ_DISABLED;
395 desc->handler->disable(irq);
396 }
397 desc->irqs_unhandled = 0;
398 }
399 #endif
401 /*
402 * Generic enable/disable code: this just calls
403 * down into the PIC-specific version for the actual
404 * hardware disable after having gotten the irq
405 * controller lock.
406 */
408 /**
409 * disable_irq_nosync - disable an irq without waiting
410 * @irq: Interrupt to disable
411 *
412 * Disable the selected interrupt line. Disables and Enables are
413 * nested.
414 * Unlike disable_irq(), this function does not ensure existing
415 * instances of the IRQ handler have completed before returning.
416 *
417 * This function may be called from IRQ context.
418 */
420 inline void disable_irq_nosync(unsigned int irq)
421 {
422 irq_desc_t *desc = irq_descp(irq);
423 unsigned long flags;
425 spin_lock_irqsave(&desc->lock, flags);
426 if (!desc->depth++) {
427 desc->status |= IRQ_DISABLED;
428 desc->handler->disable(irq);
429 }
430 spin_unlock_irqrestore(&desc->lock, flags);
431 }
432 EXPORT_SYMBOL(disable_irq_nosync);
434 /**
435 * disable_irq - disable an irq and wait for completion
436 * @irq: Interrupt to disable
437 *
438 * Disable the selected interrupt line. Enables and Disables are
439 * nested.
440 * This function waits for any pending IRQ handlers for this interrupt
441 * to complete before returning. If you use this function while
442 * holding a resource the IRQ handler may need you will deadlock.
443 *
444 * This function may be called - with care - from IRQ context.
445 */
447 void disable_irq(unsigned int irq)
448 {
449 irq_desc_t *desc = irq_descp(irq);
451 disable_irq_nosync(irq);
452 if (desc->action)
453 synchronize_irq(irq);
454 }
455 EXPORT_SYMBOL(disable_irq);
457 /**
458 * enable_irq - enable handling of an irq
459 * @irq: Interrupt to enable
460 *
461 * Undoes the effect of one call to disable_irq(). If this
462 * matches the last disable, processing of interrupts on this
463 * IRQ line is re-enabled.
464 *
465 * This function may be called from IRQ context.
466 */
468 void enable_irq(unsigned int irq)
469 {
470 irq_desc_t *desc = irq_descp(irq);
471 unsigned long flags;
473 spin_lock_irqsave(&desc->lock, flags);
474 switch (desc->depth) {
475 case 1: {
476 unsigned int status = desc->status & ~IRQ_DISABLED;
477 desc->status = status;
478 #ifndef XEN
479 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
480 desc->status = status | IRQ_REPLAY;
481 hw_resend_irq(desc->handler,irq);
482 }
483 #endif
484 desc->handler->enable(irq);
485 /* fall-through */
486 }
487 default:
488 desc->depth--;
489 break;
490 case 0:
491 printk(KERN_ERR "enable_irq(%u) unbalanced from %p\n",
492 irq, (void *) __builtin_return_address(0));
493 }
494 spin_unlock_irqrestore(&desc->lock, flags);
495 }
496 EXPORT_SYMBOL(enable_irq);
498 /*
499 * do_IRQ handles all normal device IRQ's (the special
500 * SMP cross-CPU interrupts have their own specific
501 * handlers).
502 */
503 fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
504 {
505 irq_desc_t *desc = irq_desc + irq;
506 struct irqaction * action;
507 unsigned int status;
509 #ifndef XEN
510 kstat_this_cpu.irqs[irq]++;
511 #endif
512 if (desc->status & IRQ_PER_CPU) {
513 irqreturn_t action_ret;
515 /*
516 * No locking required for CPU-local interrupts:
517 */
518 desc->handler->ack(irq);
519 action_ret = handle_IRQ_event(irq, regs, desc->action);
520 #ifndef XEN
521 if (!noirqdebug)
522 note_interrupt(irq, desc, action_ret);
523 #endif
524 desc->handler->end(irq);
525 return 1;
526 }
528 spin_lock(&desc->lock);
529 desc->handler->ack(irq);
530 /*
531 * REPLAY is when Linux resends an IRQ that was dropped earlier
532 * WAITING is used by probe to mark irqs that are being tested
533 */
534 #ifdef XEN
535 status = desc->status & ~IRQ_REPLAY;
536 #else
537 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
538 #endif
539 status |= IRQ_PENDING; /* we _want_ to handle it */
541 /*
542 * If the IRQ is disabled for whatever reason, we cannot
543 * use the action we have.
544 */
545 action = NULL;
546 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
547 action = desc->action;
548 status &= ~IRQ_PENDING; /* we commit to handling */
549 status |= IRQ_INPROGRESS; /* we are handling it */
550 }
551 desc->status = status;
553 /*
554 * If there is no IRQ handler or it was disabled, exit early.
555 * Since we set PENDING, if another processor is handling
556 * a different instance of this same irq, the other processor
557 * will take care of it.
558 */
559 if (unlikely(!action))
560 goto out;
562 /*
563 * Edge triggered interrupts need to remember
564 * pending events.
565 * This applies to any hw interrupts that allow a second
566 * instance of the same irq to arrive while we are in do_IRQ
567 * or in the handler. But the code here only handles the _second_
568 * instance of the irq, not the third or fourth. So it is mostly
569 * useful for irq hardware that does not mask cleanly in an
570 * SMP environment.
571 */
572 for (;;) {
573 irqreturn_t action_ret;
575 spin_unlock(&desc->lock);
577 action_ret = handle_IRQ_event(irq, regs, action);
579 spin_lock(&desc->lock);
580 #ifndef XEN
581 if (!noirqdebug)
582 note_interrupt(irq, desc, action_ret);
583 #endif
584 if (likely(!(desc->status & IRQ_PENDING)))
585 break;
586 desc->status &= ~IRQ_PENDING;
587 }
588 desc->status &= ~IRQ_INPROGRESS;
590 out:
591 /*
592 * The ->end() handler has to deal with interrupts which got
593 * disabled while the handler was running.
594 */
595 desc->handler->end(irq);
596 spin_unlock(&desc->lock);
598 return 1;
599 }
601 /**
602 * request_irq - allocate an interrupt line
603 * @irq: Interrupt line to allocate
604 * @handler: Function to be called when the IRQ occurs
605 * @irqflags: Interrupt type flags
606 * @devname: An ascii name for the claiming device
607 * @dev_id: A cookie passed back to the handler function
608 *
609 * This call allocates interrupt resources and enables the
610 * interrupt line and IRQ handling. From the point this
611 * call is made your handler function may be invoked. Since
612 * your handler function must clear any interrupt the board
613 * raises, you must take care both to initialise your hardware
614 * and to set up the interrupt handler in the right order.
615 *
616 * Dev_id must be globally unique. Normally the address of the
617 * device data structure is used as the cookie. Since the handler
618 * receives this value it makes sense to use it.
619 *
620 * If your interrupt is shared you must pass a non NULL dev_id
621 * as this is required when freeing the interrupt.
622 *
623 * Flags:
624 *
625 * SA_SHIRQ Interrupt is shared
626 *
627 * SA_INTERRUPT Disable local interrupts while processing
628 *
629 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
630 *
631 */
633 int request_irq(unsigned int irq,
634 irqreturn_t (*handler)(int, void *, struct pt_regs *),
635 unsigned long irqflags,
636 const char * devname,
637 void *dev_id)
638 {
639 int retval;
640 struct irqaction * action;
642 #if 1
643 /*
644 * Sanity-check: shared interrupts should REALLY pass in
645 * a real dev-ID, otherwise we'll have trouble later trying
646 * to figure out which interrupt is which (messes up the
647 * interrupt freeing logic etc).
648 */
649 if (irqflags & SA_SHIRQ) {
650 if (!dev_id)
651 printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
652 }
653 #endif
655 if (irq >= NR_IRQS)
656 return -EINVAL;
657 if (!handler)
658 return -EINVAL;
660 action = xmalloc(struct irqaction);
661 if (!action)
662 return -ENOMEM;
664 #ifdef XEN
665 action->handler = (void *) handler;
666 #else
667 action->handler = handler;
668 action->flags = irqflags;
669 action->mask = 0;
670 #endif
671 action->name = devname;
672 #ifndef XEN
673 action->next = NULL;
674 #endif
675 action->dev_id = dev_id;
677 retval = setup_irq(irq, action);
678 if (retval)
679 xfree(action);
680 return retval;
681 }
683 EXPORT_SYMBOL(request_irq);
685 /**
686 * free_irq - free an interrupt
687 * @irq: Interrupt line to free
688 * @dev_id: Device identity to free
689 *
690 * Remove an interrupt handler. The handler is removed and if the
691 * interrupt line is no longer in use by any driver it is disabled.
692 * On a shared IRQ the caller must ensure the interrupt is disabled
693 * on the card it drives before calling this function. The function
694 * does not return until any executing interrupts for this IRQ
695 * have completed.
696 *
697 * This function must not be called from interrupt context.
698 */
700 #ifdef XEN
701 void free_irq(unsigned int irq)
702 #else
703 void free_irq(unsigned int irq, void *dev_id)
704 #endif
705 {
706 irq_desc_t *desc;
707 #ifndef XEN
708 struct irqaction **p;
709 #endif
710 unsigned long flags;
712 if (irq >= NR_IRQS)
713 return;
715 desc = irq_descp(irq);
716 spin_lock_irqsave(&desc->lock,flags);
717 #ifdef XEN
718 if (desc->action) {
719 struct irqaction * action = desc->action;
720 desc->action = NULL;
721 #else
722 p = &desc->action;
723 for (;;) {
724 struct irqaction * action = *p;
725 if (action) {
726 struct irqaction **pp = p;
727 p = &action->next;
728 if (action->dev_id != dev_id)
729 continue;
731 /* Found it - now remove it from the list of entries */
732 *pp = action->next;
733 if (!desc->action) {
734 #endif
735 desc->status |= IRQ_DISABLED;
736 desc->handler->shutdown(irq);
737 #ifndef XEN
738 }
739 #endif
740 spin_unlock_irqrestore(&desc->lock,flags);
742 /* Wait to make sure it's not being used on another CPU */
743 synchronize_irq(irq);
744 xfree(action);
745 return;
746 }
747 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
748 spin_unlock_irqrestore(&desc->lock,flags);
749 #ifndef XEN
750 return;
751 }
752 #endif
753 }
755 EXPORT_SYMBOL(free_irq);
757 /*
758 * IRQ autodetection code..
759 *
760 * This depends on the fact that any interrupt that
761 * comes in on to an unassigned handler will get stuck
762 * with "IRQ_WAITING" cleared and the interrupt
763 * disabled.
764 */
766 #ifndef XEN
767 static int DECLARE_MUTEX(probe_sem);
769 /**
770 * probe_irq_on - begin an interrupt autodetect
771 *
772 * Commence probing for an interrupt. The interrupts are scanned
773 * and a mask of potential interrupt lines is returned.
774 *
775 */
777 unsigned long probe_irq_on(void)
778 {
779 unsigned int i;
780 irq_desc_t *desc;
781 unsigned long val;
782 unsigned long delay;
784 down(&probe_sem);
785 /*
786 * something may have generated an irq long ago and we want to
787 * flush such a longstanding irq before considering it as spurious.
788 */
789 for (i = NR_IRQS-1; i > 0; i--) {
790 desc = irq_descp(i);
792 spin_lock_irq(&desc->lock);
793 if (!desc->action)
794 desc->handler->startup(i);
795 spin_unlock_irq(&desc->lock);
796 }
798 /* Wait for longstanding interrupts to trigger. */
799 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
800 /* about 20ms delay */ barrier();
802 /*
803 * enable any unassigned irqs
804 * (we must startup again here because if a longstanding irq
805 * happened in the previous stage, it may have masked itself)
806 */
807 for (i = NR_IRQS-1; i > 0; i--) {
808 desc = irq_descp(i);
810 spin_lock_irq(&desc->lock);
811 if (!desc->action) {
812 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
813 if (desc->handler->startup(i))
814 desc->status |= IRQ_PENDING;
815 }
816 spin_unlock_irq(&desc->lock);
817 }
819 /*
820 * Wait for spurious interrupts to trigger
821 */
822 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
823 /* about 100ms delay */ barrier();
825 /*
826 * Now filter out any obviously spurious interrupts
827 */
828 val = 0;
829 for (i = 0; i < NR_IRQS; i++) {
830 irq_desc_t *desc = irq_descp(i);
831 unsigned int status;
833 spin_lock_irq(&desc->lock);
834 status = desc->status;
836 if (status & IRQ_AUTODETECT) {
837 /* It triggered already - consider it spurious. */
838 if (!(status & IRQ_WAITING)) {
839 desc->status = status & ~IRQ_AUTODETECT;
840 desc->handler->shutdown(i);
841 } else
842 if (i < 32)
843 val |= 1 << i;
844 }
845 spin_unlock_irq(&desc->lock);
846 }
848 return val;
849 }
851 EXPORT_SYMBOL(probe_irq_on);
853 /**
854 * probe_irq_mask - scan a bitmap of interrupt lines
855 * @val: mask of interrupts to consider
856 *
857 * Scan the ISA bus interrupt lines and return a bitmap of
858 * active interrupts. The interrupt probe logic state is then
859 * returned to its previous value.
860 *
861 * Note: we need to scan all the irq's even though we will
862 * only return ISA irq numbers - just so that we reset them
863 * all to a known state.
864 */
865 unsigned int probe_irq_mask(unsigned long val)
866 {
867 int i;
868 unsigned int mask;
870 mask = 0;
871 for (i = 0; i < 16; i++) {
872 irq_desc_t *desc = irq_descp(i);
873 unsigned int status;
875 spin_lock_irq(&desc->lock);
876 status = desc->status;
878 if (status & IRQ_AUTODETECT) {
879 if (!(status & IRQ_WAITING))
880 mask |= 1 << i;
882 desc->status = status & ~IRQ_AUTODETECT;
883 desc->handler->shutdown(i);
884 }
885 spin_unlock_irq(&desc->lock);
886 }
887 up(&probe_sem);
889 return mask & val;
890 }
891 EXPORT_SYMBOL(probe_irq_mask);
893 /**
894 * probe_irq_off - end an interrupt autodetect
895 * @val: mask of potential interrupts (unused)
896 *
897 * Scans the unused interrupt lines and returns the line which
898 * appears to have triggered the interrupt. If no interrupt was
899 * found then zero is returned. If more than one interrupt is
900 * found then minus the first candidate is returned to indicate
901 * their is doubt.
902 *
903 * The interrupt probe logic state is returned to its previous
904 * value.
905 *
906 * BUGS: When used in a module (which arguably shouldn't happen)
907 * nothing prevents two IRQ probe callers from overlapping. The
908 * results of this are non-optimal.
909 */
911 int probe_irq_off(unsigned long val)
912 {
913 int i, irq_found, nr_irqs;
915 nr_irqs = 0;
916 irq_found = 0;
917 for (i = 0; i < NR_IRQS; i++) {
918 irq_desc_t *desc = irq_descp(i);
919 unsigned int status;
921 spin_lock_irq(&desc->lock);
922 status = desc->status;
924 if (status & IRQ_AUTODETECT) {
925 if (!(status & IRQ_WAITING)) {
926 if (!nr_irqs)
927 irq_found = i;
928 nr_irqs++;
929 }
930 desc->status = status & ~IRQ_AUTODETECT;
931 desc->handler->shutdown(i);
932 }
933 spin_unlock_irq(&desc->lock);
934 }
935 up(&probe_sem);
937 if (nr_irqs > 1)
938 irq_found = -irq_found;
939 return irq_found;
940 }
942 EXPORT_SYMBOL(probe_irq_off);
943 #endif
945 int setup_irq(unsigned int irq, struct irqaction * new)
946 {
947 #ifndef XEN
948 int shared = 0;
949 #endif
950 unsigned long flags;
951 struct irqaction *old, **p;
952 irq_desc_t *desc = irq_descp(irq);
954 #ifndef XEN
955 if (desc->handler == &no_irq_type)
956 return -ENOSYS;
957 /*
958 * Some drivers like serial.c use request_irq() heavily,
959 * so we have to be careful not to interfere with a
960 * running system.
961 */
962 if (new->flags & SA_SAMPLE_RANDOM) {
963 /*
964 * This function might sleep, we want to call it first,
965 * outside of the atomic block.
966 * Yes, this might clear the entropy pool if the wrong
967 * driver is attempted to be loaded, without actually
968 * installing a new handler, but is this really a problem,
969 * only the sysadmin is able to do this.
970 */
971 rand_initialize_irq(irq);
972 }
974 if (new->flags & SA_PERCPU_IRQ) {
975 desc->status |= IRQ_PER_CPU;
976 desc->handler = &irq_type_ia64_lsapic;
977 }
978 #endif
980 /*
981 * The following block of code has to be executed atomically
982 */
983 spin_lock_irqsave(&desc->lock,flags);
984 p = &desc->action;
985 if ((old = *p) != NULL) {
986 #ifdef XEN
987 if (1) {
988 /* Can't share interrupts unless both agree to */
989 #else
990 if (!(old->flags & new->flags & SA_SHIRQ)) {
991 #endif
992 spin_unlock_irqrestore(&desc->lock,flags);
993 return -EBUSY;
994 }
996 #ifndef XEN
997 /* add new interrupt at end of irq queue */
998 do {
999 p = &old->next;
1000 old = *p;
1001 } while (old);
1002 shared = 1;
1003 #endif
1006 *p = new;
1008 #ifndef XEN
1009 if (!shared) {
1010 #else
1012 #endif
1013 desc->depth = 0;
1014 #ifdef XEN
1015 desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS);
1016 #else
1017 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
1018 #endif
1019 desc->handler->startup(irq);
1021 spin_unlock_irqrestore(&desc->lock,flags);
1023 #ifndef XEN
1024 register_irq_proc(irq);
1025 #endif
1026 return 0;
1029 #ifndef XEN
1031 static struct proc_dir_entry * root_irq_dir;
1032 static struct proc_dir_entry * irq_dir [NR_IRQS];
1034 #ifdef CONFIG_SMP
1036 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
1038 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
1040 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
1042 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
1044 cpumask_t mask = CPU_MASK_NONE;
1046 cpu_set(cpu_logical_id(hwid), mask);
1048 if (irq < NR_IRQS) {
1049 irq_affinity[irq] = mask;
1050 irq_redir[irq] = (char) (redir & 0xff);
1054 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1055 int count, int *eof, void *data)
1057 int len = sprintf(page, "%s", irq_redir[(long)data] ? "r " : "");
1059 len += cpumask_scnprintf(page+len, count, irq_affinity[(long)data]);
1060 if (count - len < 2)
1061 return -EINVAL;
1062 len += sprintf(page + len, "\n");
1063 return len;
1066 static int irq_affinity_write_proc (struct file *file, const char *buffer,
1067 unsigned long count, void *data)
1069 unsigned int irq = (unsigned long) data;
1070 int full_count = count, err;
1071 cpumask_t new_value, tmp;
1072 # define R_PREFIX_LEN 16
1073 char rbuf[R_PREFIX_LEN];
1074 int rlen;
1075 int prelen;
1076 irq_desc_t *desc = irq_descp(irq);
1077 unsigned long flags;
1079 if (!desc->handler->set_affinity)
1080 return -EIO;
1082 /*
1083 * If string being written starts with a prefix of 'r' or 'R'
1084 * and some limited number of spaces, set IA64_IRQ_REDIRECTED.
1085 * If more than (R_PREFIX_LEN - 2) spaces are passed, they won't
1086 * all be trimmed as part of prelen, the untrimmed spaces will
1087 * cause the hex parsing to fail, and this write() syscall will
1088 * fail with EINVAL.
1089 */
1091 if (!count)
1092 return -EINVAL;
1093 rlen = min(sizeof(rbuf)-1, count);
1094 if (copy_from_user(rbuf, buffer, rlen))
1095 return -EFAULT;
1096 rbuf[rlen] = 0;
1097 prelen = 0;
1098 if (tolower(*rbuf) == 'r') {
1099 prelen = strspn(rbuf, "Rr ");
1100 irq |= IA64_IRQ_REDIRECTED;
1103 err = cpumask_parse(buffer+prelen, count-prelen, new_value);
1104 if (err)
1105 return err;
1107 /*
1108 * Do not allow disabling IRQs completely - it's a too easy
1109 * way to make the system unusable accidentally :-) At least
1110 * one online CPU still has to be targeted.
1111 */
1112 cpus_and(tmp, new_value, cpu_online_map);
1113 if (cpus_empty(tmp))
1114 return -EINVAL;
1116 spin_lock_irqsave(&desc->lock, flags);
1117 pending_irq_cpumask[irq] = new_value;
1118 spin_unlock_irqrestore(&desc->lock, flags);
1120 return full_count;
1123 void move_irq(int irq)
1125 /* note - we hold desc->lock */
1126 cpumask_t tmp;
1127 irq_desc_t *desc = irq_descp(irq);
1129 if (!cpus_empty(pending_irq_cpumask[irq])) {
1130 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
1131 if (unlikely(!cpus_empty(tmp))) {
1132 desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
1134 cpus_clear(pending_irq_cpumask[irq]);
1139 #endif /* CONFIG_SMP */
1140 #endif
1142 #ifdef CONFIG_HOTPLUG_CPU
1143 unsigned int vectors_in_migration[NR_IRQS];
1145 /*
1146 * Since cpu_online_map is already updated, we just need to check for
1147 * affinity that has zeros
1148 */
1149 static void migrate_irqs(void)
1151 cpumask_t mask;
1152 irq_desc_t *desc;
1153 int irq, new_cpu;
1155 for (irq=0; irq < NR_IRQS; irq++) {
1156 desc = irq_descp(irq);
1158 /*
1159 * No handling for now.
1160 * TBD: Implement a disable function so we can now
1161 * tell CPU not to respond to these local intr sources.
1162 * such as ITV,CPEI,MCA etc.
1163 */
1164 if (desc->status == IRQ_PER_CPU)
1165 continue;
1167 cpus_and(mask, irq_affinity[irq], cpu_online_map);
1168 if (any_online_cpu(mask) == NR_CPUS) {
1169 /*
1170 * Save it for phase 2 processing
1171 */
1172 vectors_in_migration[irq] = irq;
1174 new_cpu = any_online_cpu(cpu_online_map);
1175 mask = cpumask_of_cpu(new_cpu);
1177 /*
1178 * Al three are essential, currently WARN_ON.. maybe panic?
1179 */
1180 if (desc->handler && desc->handler->disable &&
1181 desc->handler->enable && desc->handler->set_affinity) {
1182 desc->handler->disable(irq);
1183 desc->handler->set_affinity(irq, mask);
1184 desc->handler->enable(irq);
1185 } else {
1186 WARN_ON((!(desc->handler) || !(desc->handler->disable) ||
1187 !(desc->handler->enable) ||
1188 !(desc->handler->set_affinity)));
1194 void fixup_irqs(void)
1196 unsigned int irq;
1197 extern void ia64_process_pending_intr(void);
1199 ia64_set_itv(1<<16);
1200 /*
1201 * Phase 1: Locate irq's bound to this cpu and
1202 * relocate them for cpu removal.
1203 */
1204 migrate_irqs();
1206 /*
1207 * Phase 2: Perform interrupt processing for all entries reported in
1208 * local APIC.
1209 */
1210 ia64_process_pending_intr();
1212 /*
1213 * Phase 3: Now handle any interrupts not captured in local APIC.
1214 * This is to account for cases that device interrupted during the time the
1215 * rte was being disabled and re-programmed.
1216 */
1217 for (irq=0; irq < NR_IRQS; irq++) {
1218 if (vectors_in_migration[irq]) {
1219 vectors_in_migration[irq]=0;
1220 do_IRQ(irq, NULL);
1224 /*
1225 * Now let processor die. We do irq disable and max_xtp() to
1226 * ensure there is no more interrupts routed to this processor.
1227 * But the local timer interrupt can have 1 pending which we
1228 * take care in timer_interrupt().
1229 */
1230 max_xtp();
1231 local_irq_disable();
1233 #endif
1235 #ifndef XEN
1236 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1237 int count, int *eof, void *data)
1239 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
1240 if (count - len < 2)
1241 return -EINVAL;
1242 len += sprintf(page + len, "\n");
1243 return len;
1246 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1247 unsigned long count, void *data)
1249 cpumask_t *mask = (cpumask_t *)data;
1250 unsigned long full_count = count, err;
1251 cpumask_t new_value;
1253 err = cpumask_parse(buffer, count, new_value);
1254 if (err)
1255 return err;
1257 *mask = new_value;
1258 return full_count;
1261 #define MAX_NAMELEN 10
1263 static void register_irq_proc (unsigned int irq)
1265 char name [MAX_NAMELEN];
1267 if (!root_irq_dir || (irq_descp(irq)->handler == &no_irq_type) || irq_dir[irq])
1268 return;
1270 memset(name, 0, MAX_NAMELEN);
1271 sprintf(name, "%d", irq);
1273 /* create /proc/irq/1234 */
1274 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1276 #ifdef CONFIG_SMP
1278 struct proc_dir_entry *entry;
1280 /* create /proc/irq/1234/smp_affinity */
1281 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1283 if (entry) {
1284 entry->nlink = 1;
1285 entry->data = (void *)(long)irq;
1286 entry->read_proc = irq_affinity_read_proc;
1287 entry->write_proc = irq_affinity_write_proc;
1290 smp_affinity_entry[irq] = entry;
1292 #endif
1295 cpumask_t prof_cpu_mask = CPU_MASK_ALL;
1297 void init_irq_proc (void)
1299 struct proc_dir_entry *entry;
1300 int i;
1302 /* create /proc/irq */
1303 root_irq_dir = proc_mkdir("irq", 0);
1305 /* create /proc/irq/prof_cpu_mask */
1306 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1308 if (!entry)
1309 return;
1311 entry->nlink = 1;
1312 entry->data = (void *)&prof_cpu_mask;
1313 entry->read_proc = prof_cpu_mask_read_proc;
1314 entry->write_proc = prof_cpu_mask_write_proc;
1316 /*
1317 * Create entries for all existing IRQs.
1318 */
1319 for (i = 0; i < NR_IRQS; i++) {
1320 if (irq_descp(i)->handler == &no_irq_type)
1321 continue;
1322 register_irq_proc(i);
1325 #endif
1328 #ifdef XEN
1329 /*
1330 * HANDLING OF GUEST-BOUND PHYSICAL IRQS
1331 */
1333 #define IRQ_MAX_GUESTS 7
1334 typedef struct {
1335 u8 nr_guests;
1336 u8 in_flight;
1337 u8 shareable;
1338 struct domain *guest[IRQ_MAX_GUESTS];
1339 } irq_guest_action_t;
1341 /*
1342 static void __do_IRQ_guest(int irq)
1344 irq_desc_t *desc = &irq_desc[irq];
1345 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
1346 struct domain *d;
1347 int i;
1349 for ( i = 0; i < action->nr_guests; i++ )
1351 d = action->guest[i];
1352 if ( !test_and_set_bit(irq, &d->pirq_mask) )
1353 action->in_flight++;
1354 send_guest_pirq(d, irq);
1357 */
1358 int pirq_guest_unmask(struct domain *d)
1360 irq_desc_t *desc;
1361 int i, j, pirq;
1362 u32 m;
1363 shared_info_t *s = d->shared_info;
1365 for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
1367 m = d->pirq_mask[i];
1368 while ( (j = ffs(m)) != 0 )
1370 m &= ~(1 << --j);
1371 pirq = (i << 5) + j;
1372 desc = &irq_desc[pirq];
1373 spin_lock_irq(&desc->lock);
1374 if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
1375 test_and_clear_bit(pirq, &d->pirq_mask) &&
1376 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
1377 desc->handler->end(pirq);
1378 spin_unlock_irq(&desc->lock);
1382 return 0;
1385 int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
1387 irq_desc_t *desc = &irq_desc[irq];
1388 irq_guest_action_t *action;
1389 unsigned long flags;
1390 int rc = 0;
1392 spin_lock_irqsave(&desc->lock, flags);
1394 action = (irq_guest_action_t *)desc->action;
1396 if ( !(desc->status & IRQ_GUEST) )
1398 if ( desc->action != NULL )
1400 DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n",
1401 irq, desc->action->name);
1402 rc = -EBUSY;
1403 goto out;
1406 action = xmalloc(irq_guest_action_t);
1407 if ( (desc->action = (struct irqaction *)action) == NULL )
1409 DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
1410 rc = -ENOMEM;
1411 goto out;
1414 action->nr_guests = 0;
1415 action->in_flight = 0;
1416 action->shareable = will_share;
1418 desc->depth = 0;
1419 desc->status |= IRQ_GUEST;
1420 desc->status &= ~IRQ_DISABLED;
1421 desc->handler->startup(irq);
1423 /* Attempt to bind the interrupt target to the correct CPU. */
1424 #if 0 /* FIXME CONFIG_SMP ??? */
1425 if ( desc->handler->set_affinity != NULL )
1426 desc->handler->set_affinity(
1427 irq, apicid_to_phys_cpu_present(d->processor));
1428 #endif
1430 else if ( !will_share || !action->shareable )
1432 DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n",
1433 irq);
1434 rc = -EBUSY;
1435 goto out;
1438 if ( action->nr_guests == IRQ_MAX_GUESTS )
1440 DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq);
1441 rc = -EBUSY;
1442 goto out;
1445 action->guest[action->nr_guests++] = v->domain;
1447 out:
1448 spin_unlock_irqrestore(&desc->lock, flags);
1449 return rc;
1452 int pirq_guest_unbind(struct domain *d, int irq)
1454 irq_desc_t *desc = &irq_desc[irq];
1455 irq_guest_action_t *action;
1456 unsigned long flags;
1457 int i;
1459 spin_lock_irqsave(&desc->lock, flags);
1461 action = (irq_guest_action_t *)desc->action;
1463 if ( test_and_clear_bit(irq, &d->pirq_mask) &&
1464 (--action->in_flight == 0) )
1465 desc->handler->end(irq);
1467 if ( action->nr_guests == 1 )
1469 desc->action = NULL;
1470 xfree(action);
1471 desc->depth = 1;
1472 desc->status |= IRQ_DISABLED;
1473 desc->status &= ~IRQ_GUEST;
1474 desc->handler->shutdown(irq);
1476 else
1478 i = 0;
1479 while ( action->guest[i] != d )
1480 i++;
1481 memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
1482 action->nr_guests--;
1485 spin_unlock_irqrestore(&desc->lock, flags);
1486 return 0;
1489 #endif
1491 #ifdef XEN
1492 #ifdef IA64
1493 // this is a temporary hack until real console input is implemented
1494 extern void domain_pend_keyboard_interrupt(int irq);
1495 irqreturn_t guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
1497 domain_pend_keyboard_interrupt(irq);
1498 return 0;
1501 void serial_input_init(void)
1503 int retval;
1504 int irq = 0x30; // FIXME
1506 retval = request_irq(irq,guest_forward_keyboard_input,SA_INTERRUPT,"siminput",NULL);
1507 if (retval) {
1508 printk("serial_input_init: broken request_irq call\n");
1509 while(1);
1512 #endif
1513 #endif