ia64/xen-unstable

view linux-2.4.27-xen-sparse/arch/xen/kernel/irq.c @ 2621:9402048e2325

bitkeeper revision 1.1159.1.218 (416a8128OiHXHyk_Sy8FsA0YUQcEnA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-26dom0
into freefall.cl.cam.ac.uk:/local/scratch/cl349/xeno.bk-26dom0
author cl349@freefall.cl.cam.ac.uk
date Mon Oct 11 12:48:40 2004 +0000 (2004-10-11)
parents 869c20f2977b
children 38c450a5a8a3 13728122c78d
line source
1 /*
2 * linux/arch/i386/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
11 */
13 /*
14 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
15 *
16 * IRQs are in fact implemented a bit like signal handlers for the kernel.
17 * Naturally it's not a 1:1 relation, but there are similarities.
18 */
20 #include <linux/config.h>
21 #include <linux/ptrace.h>
22 #include <linux/errno.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/timex.h>
28 #include <linux/slab.h>
29 #include <linux/random.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/irq.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
37 #include <asm/atomic.h>
38 #include <asm/io.h>
39 #include <asm/smp.h>
40 #include <asm/system.h>
41 #include <asm/bitops.h>
42 #include <asm/uaccess.h>
43 #include <asm/pgalloc.h>
44 #include <asm/delay.h>
45 #include <asm/desc.h>
46 #include <asm/irq.h>
50 /*
51 * Linux has a controller-independent x86 interrupt architecture.
52 * every controller has a 'controller-template', that is used
53 * by the main code to do the right thing. Each driver-visible
54 * interrupt source is transparently wired to the apropriate
55 * controller. Thus drivers need not be aware of the
56 * interrupt-controller.
57 *
58 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
59 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
60 * (IO-APICs assumed to be messaging to Pentium local-APICs)
61 *
62 * the code is designed to be easily extended with new/different
63 * interrupt controllers, without having to do assembly magic.
64 */
66 /*
67 * Controller mappings for all interrupt sources:
68 */
69 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
70 { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
72 static void register_irq_proc (unsigned int irq);
74 /*
75 * Special irq handlers.
76 */
78 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
80 /*
81 * Generic no controller code
82 */
84 static void enable_none(unsigned int irq) { }
85 static unsigned int startup_none(unsigned int irq) { return 0; }
86 static void disable_none(unsigned int irq) { }
87 static void ack_none(unsigned int irq)
88 {
89 /*
90 * 'what should we do if we get a hw irq event on an illegal vector'.
91 * each architecture has to answer this themselves, it doesnt deserve
92 * a generic callback i think.
93 */
94 #if CONFIG_X86
95 printk("unexpected IRQ trap at vector %02x\n", irq);
96 #ifdef CONFIG_X86_LOCAL_APIC
97 /*
98 * Currently unexpected vectors happen only on SMP and APIC.
99 * We _must_ ack these because every local APIC has only N
100 * irq slots per priority level, and a 'hanging, unacked' IRQ
101 * holds up an irq slot - in excessive cases (when multiple
102 * unexpected vectors occur) that might lock up the APIC
103 * completely.
104 */
105 ack_APIC_irq();
106 #endif
107 #endif
108 }
110 /* startup is the same as "enable", shutdown is same as "disable" */
111 #define shutdown_none disable_none
112 #define end_none enable_none
114 struct hw_interrupt_type no_irq_type = {
115 "none",
116 startup_none,
117 shutdown_none,
118 enable_none,
119 disable_none,
120 ack_none,
121 end_none
122 };
124 atomic_t irq_err_count;
125 #ifdef CONFIG_X86_IO_APIC
126 #ifdef APIC_MISMATCH_DEBUG
127 atomic_t irq_mis_count;
128 #endif
129 #endif
131 /*
132 * Generic, controller-independent functions:
133 */
135 int show_interrupts(struct seq_file *p, void *v)
136 {
137 int i, j;
138 struct irqaction * action;
140 seq_printf(p, " ");
141 for (j=0; j<smp_num_cpus; j++)
142 seq_printf(p, "CPU%d ",j);
143 seq_putc(p,'\n');
145 for (i = 0 ; i < NR_IRQS ; i++) {
146 action = irq_desc[i].action;
147 if (!action)
148 continue;
149 seq_printf(p, "%3d: ",i);
150 #ifndef CONFIG_SMP
151 seq_printf(p, "%10u ", kstat_irqs(i));
152 #else
153 for (j = 0; j < smp_num_cpus; j++)
154 seq_printf(p, "%10u ",
155 kstat.irqs[cpu_logical_map(j)][i]);
156 #endif
157 seq_printf(p, " %14s", irq_desc[i].handler->typename);
158 seq_printf(p, " %s", action->name);
160 for (action=action->next; action; action = action->next)
161 seq_printf(p, ", %s", action->name);
162 seq_putc(p,'\n');
163 }
164 seq_printf(p, "NMI: ");
165 for (j = 0; j < smp_num_cpus; j++)
166 seq_printf(p, "%10u ",
167 nmi_count(cpu_logical_map(j)));
168 seq_printf(p, "\n");
169 #if CONFIG_X86_LOCAL_APIC
170 seq_printf(p, "LOC: ");
171 for (j = 0; j < smp_num_cpus; j++)
172 seq_printf(p, "%10u ",
173 apic_timer_irqs[cpu_logical_map(j)]);
174 seq_printf(p, "\n");
175 #endif
176 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
177 #ifdef CONFIG_X86_IO_APIC
178 #ifdef APIC_MISMATCH_DEBUG
179 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
180 #endif
181 #endif
183 return 0;
184 }
187 /*
188 * Global interrupt locks for SMP. Allow interrupts to come in on any
189 * CPU, yet make cli/sti act globally to protect critical regions..
190 */
192 #ifdef CONFIG_SMP
193 unsigned char global_irq_holder = NO_PROC_ID;
194 unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
196 extern void show_stack(unsigned long* esp);
198 static void show(char * str)
199 {
200 int i;
201 int cpu = smp_processor_id();
203 printk("\n%s, CPU %d:\n", str, cpu);
204 printk("irq: %d [",irqs_running());
205 for(i=0;i < smp_num_cpus;i++)
206 printk(" %d",local_irq_count(i));
207 printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
208 for(i=0;i < smp_num_cpus;i++)
209 printk(" %d",local_bh_count(i));
211 printk(" ]\nStack dumps:");
212 for(i = 0; i < smp_num_cpus; i++) {
213 unsigned long esp;
214 if (i == cpu)
215 continue;
216 printk("\nCPU %d:",i);
217 esp = init_tss[i].esp0;
218 if (!esp) {
219 /* tss->esp0 is set to NULL in cpu_init(),
220 * it's initialized when the cpu returns to user
221 * space. -- manfreds
222 */
223 printk(" <unknown> ");
224 continue;
225 }
226 esp &= ~(THREAD_SIZE-1);
227 esp += sizeof(struct task_struct);
228 show_stack((void*)esp);
229 }
230 printk("\nCPU %d:",cpu);
231 show_stack(NULL);
232 printk("\n");
233 }
235 #define MAXCOUNT 100000000
237 /*
238 * I had a lockup scenario where a tight loop doing
239 * spin_unlock()/spin_lock() on CPU#1 was racing with
240 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
241 * apparently the spin_unlock() information did not make it
242 * through to CPU#0 ... nasty, is this by design, do we have to limit
243 * 'memory update oscillation frequency' artificially like here?
244 *
245 * Such 'high frequency update' races can be avoided by careful design, but
246 * some of our major constructs like spinlocks use similar techniques,
247 * it would be nice to clarify this issue. Set this define to 0 if you
248 * want to check whether your system freezes. I suspect the delay done
249 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
250 * i thought that such things are guaranteed by design, since we use
251 * the 'LOCK' prefix.
252 */
253 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
255 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
256 # define SYNC_OTHER_CORES(x) udelay(x+1)
257 #else
258 /*
259 * We have to allow irqs to arrive between __sti and __cli
260 */
261 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
262 #endif
264 static inline void wait_on_irq(int cpu)
265 {
266 int count = MAXCOUNT;
268 for (;;) {
270 /*
271 * Wait until all interrupts are gone. Wait
272 * for bottom half handlers unless we're
273 * already executing in one..
274 */
275 if (!irqs_running())
276 if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
277 break;
279 /* Duh, we have to loop. Release the lock to avoid deadlocks */
280 clear_bit(0,&global_irq_lock);
282 for (;;) {
283 if (!--count) {
284 show("wait_on_irq");
285 count = ~0;
286 }
287 __sti();
288 SYNC_OTHER_CORES(cpu);
289 __cli();
290 if (irqs_running())
291 continue;
292 if (global_irq_lock)
293 continue;
294 if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
295 continue;
296 if (!test_and_set_bit(0,&global_irq_lock))
297 break;
298 }
299 }
300 }
302 /*
303 * This is called when we want to synchronize with
304 * interrupts. We may for example tell a device to
305 * stop sending interrupts: but to make sure there
306 * are no interrupts that are executing on another
307 * CPU we need to call this function.
308 */
309 void synchronize_irq(void)
310 {
311 if (irqs_running()) {
312 /* Stupid approach */
313 cli();
314 sti();
315 }
316 }
318 static inline void get_irqlock(int cpu)
319 {
320 if (test_and_set_bit(0,&global_irq_lock)) {
321 /* do we already hold the lock? */
322 if ((unsigned char) cpu == global_irq_holder)
323 return;
324 /* Uhhuh.. Somebody else got it. Wait.. */
325 do {
326 do {
327 rep_nop();
328 } while (test_bit(0,&global_irq_lock));
329 } while (test_and_set_bit(0,&global_irq_lock));
330 }
331 /*
332 * We also to make sure that nobody else is running
333 * in an interrupt context.
334 */
335 wait_on_irq(cpu);
337 /*
338 * Ok, finally..
339 */
340 global_irq_holder = cpu;
341 }
343 /*
344 * A global "cli()" while in an interrupt context
345 * turns into just a local cli(). Interrupts
346 * should use spinlocks for the (very unlikely)
347 * case that they ever want to protect against
348 * each other.
349 *
350 * If we already have local interrupts disabled,
351 * this will not turn a local disable into a
352 * global one (problems with spinlocks: this makes
353 * save_flags+cli+sti usable inside a spinlock).
354 */
355 void __global_cli(void)
356 {
357 unsigned int flags;
359 __save_flags(flags);
360 if (!flags) {
361 int cpu = smp_processor_id();
362 __cli();
363 if (!local_irq_count(cpu))
364 get_irqlock(cpu);
365 }
366 }
368 void __global_sti(void)
369 {
370 int cpu = smp_processor_id();
372 if (!local_irq_count(cpu))
373 release_irqlock(cpu);
374 __sti();
375 }
377 /*
378 * SMP flags value to restore to:
379 * 0 - global cli
380 * 1 - global sti
381 * 2 - local cli
382 * 3 - local sti
383 */
384 unsigned long __global_save_flags(void)
385 {
386 int retval;
387 int local_enabled;
388 unsigned long flags;
389 int cpu = smp_processor_id();
391 __save_flags(flags);
392 local_enabled = !flags;
393 /* default to local */
394 retval = 2 + local_enabled;
396 /* check for global flags if we're not in an interrupt */
397 if (!local_irq_count(cpu)) {
398 if (local_enabled)
399 retval = 1;
400 if (global_irq_holder == cpu)
401 retval = 0;
402 }
403 return retval;
404 }
406 void __global_restore_flags(unsigned long flags)
407 {
408 switch (flags) {
409 case 0:
410 __global_cli();
411 break;
412 case 1:
413 __global_sti();
414 break;
415 case 2:
416 __cli();
417 break;
418 case 3:
419 __sti();
420 break;
421 default:
422 printk("global_restore_flags: %08lx (%08lx)\n",
423 flags, (&flags)[-1]);
424 }
425 }
427 #endif
429 /*
430 * This should really return information about whether
431 * we should do bottom half handling etc. Right now we
432 * end up _always_ checking the bottom half, which is a
433 * waste of time and is not what some drivers would
434 * prefer.
435 */
436 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
437 {
438 int status;
439 int cpu = smp_processor_id();
441 irq_enter(cpu, irq);
443 status = 1; /* Force the "do bottom halves" bit */
445 if (!(action->flags & SA_INTERRUPT))
446 __sti();
448 do {
449 status |= action->flags;
450 action->handler(irq, action->dev_id, regs);
451 action = action->next;
452 } while (action);
453 if (status & SA_SAMPLE_RANDOM)
454 add_interrupt_randomness(irq);
455 __cli();
457 irq_exit(cpu, irq);
459 return status;
460 }
462 /*
463 * Generic enable/disable code: this just calls
464 * down into the PIC-specific version for the actual
465 * hardware disable after having gotten the irq
466 * controller lock.
467 */
469 /**
470 * disable_irq_nosync - disable an irq without waiting
471 * @irq: Interrupt to disable
472 *
473 * Disable the selected interrupt line. Disables and Enables are
474 * nested.
475 * Unlike disable_irq(), this function does not ensure existing
476 * instances of the IRQ handler have completed before returning.
477 *
478 * This function may be called from IRQ context.
479 */
481 inline void disable_irq_nosync(unsigned int irq)
482 {
483 irq_desc_t *desc = irq_desc + irq;
484 unsigned long flags;
486 spin_lock_irqsave(&desc->lock, flags);
487 if (!desc->depth++) {
488 desc->status |= IRQ_DISABLED;
489 desc->handler->disable(irq);
490 }
491 spin_unlock_irqrestore(&desc->lock, flags);
492 }
494 /**
495 * disable_irq - disable an irq and wait for completion
496 * @irq: Interrupt to disable
497 *
498 * Disable the selected interrupt line. Enables and Disables are
499 * nested.
500 * This function waits for any pending IRQ handlers for this interrupt
501 * to complete before returning. If you use this function while
502 * holding a resource the IRQ handler may need you will deadlock.
503 *
504 * This function may be called - with care - from IRQ context.
505 */
507 void disable_irq(unsigned int irq)
508 {
509 disable_irq_nosync(irq);
511 if (!local_irq_count(smp_processor_id())) {
512 do {
513 barrier();
514 cpu_relax();
515 } while (irq_desc[irq].status & IRQ_INPROGRESS);
516 }
517 }
519 /**
520 * enable_irq - enable handling of an irq
521 * @irq: Interrupt to enable
522 *
523 * Undoes the effect of one call to disable_irq(). If this
524 * matches the last disable, processing of interrupts on this
525 * IRQ line is re-enabled.
526 *
527 * This function may be called from IRQ context.
528 */
530 void enable_irq(unsigned int irq)
531 {
532 irq_desc_t *desc = irq_desc + irq;
533 unsigned long flags;
535 spin_lock_irqsave(&desc->lock, flags);
536 switch (desc->depth) {
537 case 1: {
538 unsigned int status = desc->status & ~IRQ_DISABLED;
539 desc->status = status;
540 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
541 desc->status = status | IRQ_REPLAY;
542 hw_resend_irq(desc->handler,irq);
543 }
544 desc->handler->enable(irq);
545 /* fall-through */
546 }
547 default:
548 desc->depth--;
549 break;
550 case 0:
551 printk("enable_irq(%u) unbalanced from %p\n", irq,
552 __builtin_return_address(0));
553 }
554 spin_unlock_irqrestore(&desc->lock, flags);
555 }
557 /*
558 * do_IRQ handles all normal device IRQ's (the special
559 * SMP cross-CPU interrupts have their own specific
560 * handlers).
561 */
562 asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
563 {
564 /*
565 * We ack quickly, we don't want the irq controller
566 * thinking we're snobs just because some other CPU has
567 * disabled global interrupts (we have already done the
568 * INT_ACK cycles, it's too late to try to pretend to the
569 * controller that we aren't taking the interrupt).
570 *
571 * 0 return value means that this irq is already being
572 * handled by some other CPU. (or is disabled)
573 */
574 int cpu = smp_processor_id();
575 irq_desc_t *desc = irq_desc + irq;
576 struct irqaction * action;
577 unsigned int status;
578 #ifdef CONFIG_DEBUG_STACKOVERFLOW
579 long esp;
581 /* Debugging check for stack overflow: is there less than 1KB free? */
582 __asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "0" (8191));
583 if (unlikely(esp < (sizeof(struct task_struct) + 1024))) {
584 extern void show_stack(unsigned long *);
586 printk("do_IRQ: stack overflow: %ld\n",
587 esp - sizeof(struct task_struct));
588 __asm__ __volatile__("movl %%esp,%0" : "=r" (esp));
589 show_stack((void *)esp);
590 }
591 #endif
593 kstat.irqs[cpu][irq]++;
594 spin_lock(&desc->lock);
595 desc->handler->ack(irq);
596 /*
597 REPLAY is when Linux resends an IRQ that was dropped earlier
598 WAITING is used by probe to mark irqs that are being tested
599 */
600 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
601 status |= IRQ_PENDING; /* we _want_ to handle it */
603 /*
604 * If the IRQ is disabled for whatever reason, we cannot
605 * use the action we have.
606 */
607 action = NULL;
608 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
609 action = desc->action;
610 status &= ~IRQ_PENDING; /* we commit to handling */
611 status |= IRQ_INPROGRESS; /* we are handling it */
612 }
613 desc->status = status;
615 /*
616 * If there is no IRQ handler or it was disabled, exit early.
617 Since we set PENDING, if another processor is handling
618 a different instance of this same irq, the other processor
619 will take care of it.
620 */
621 if (!action)
622 goto out;
624 /*
625 * Edge triggered interrupts need to remember
626 * pending events.
627 * This applies to any hw interrupts that allow a second
628 * instance of the same irq to arrive while we are in do_IRQ
629 * or in the handler. But the code here only handles the _second_
630 * instance of the irq, not the third or fourth. So it is mostly
631 * useful for irq hardware that does not mask cleanly in an
632 * SMP environment.
633 */
634 for (;;) {
635 spin_unlock(&desc->lock);
636 handle_IRQ_event(irq, regs, action);
637 spin_lock(&desc->lock);
639 if (!(desc->status & IRQ_PENDING))
640 break;
641 desc->status &= ~IRQ_PENDING;
642 }
643 desc->status &= ~IRQ_INPROGRESS;
644 out:
645 /*
646 * The ->end() handler has to deal with interrupts which got
647 * disabled while the handler was running.
648 */
649 desc->handler->end(irq);
650 spin_unlock(&desc->lock);
652 if (softirq_pending(cpu))
653 do_softirq();
654 return 1;
655 }
657 /**
658 * request_irq - allocate an interrupt line
659 * @irq: Interrupt line to allocate
660 * @handler: Function to be called when the IRQ occurs
661 * @irqflags: Interrupt type flags
662 * @devname: An ascii name for the claiming device
663 * @dev_id: A cookie passed back to the handler function
664 *
665 * This call allocates interrupt resources and enables the
666 * interrupt line and IRQ handling. From the point this
667 * call is made your handler function may be invoked. Since
668 * your handler function must clear any interrupt the board
669 * raises, you must take care both to initialise your hardware
670 * and to set up the interrupt handler in the right order.
671 *
672 * Dev_id must be globally unique. Normally the address of the
673 * device data structure is used as the cookie. Since the handler
674 * receives this value it makes sense to use it.
675 *
676 * If your interrupt is shared you must pass a non NULL dev_id
677 * as this is required when freeing the interrupt.
678 *
679 * Flags:
680 *
681 * SA_SHIRQ Interrupt is shared
682 *
683 * SA_INTERRUPT Disable local interrupts while processing
684 *
685 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
686 *
687 */
689 int request_irq(unsigned int irq,
690 void (*handler)(int, void *, struct pt_regs *),
691 unsigned long irqflags,
692 const char * devname,
693 void *dev_id)
694 {
695 int retval;
696 struct irqaction * action;
698 #if 1
699 /*
700 * Sanity-check: shared interrupts should REALLY pass in
701 * a real dev-ID, otherwise we'll have trouble later trying
702 * to figure out which interrupt is which (messes up the
703 * interrupt freeing logic etc).
704 */
705 if (irqflags & SA_SHIRQ) {
706 if (!dev_id)
707 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
708 }
709 #endif
711 if (irq >= NR_IRQS)
712 return -EINVAL;
713 if (!handler)
714 return -EINVAL;
716 action = (struct irqaction *)
717 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
718 if (!action)
719 return -ENOMEM;
721 action->handler = handler;
722 action->flags = irqflags;
723 action->mask = 0;
724 action->name = devname;
725 action->next = NULL;
726 action->dev_id = dev_id;
728 retval = setup_irq(irq, action);
729 if (retval)
730 kfree(action);
731 return retval;
732 }
734 /**
735 * free_irq - free an interrupt
736 * @irq: Interrupt line to free
737 * @dev_id: Device identity to free
738 *
739 * Remove an interrupt handler. The handler is removed and if the
740 * interrupt line is no longer in use by any driver it is disabled.
741 * On a shared IRQ the caller must ensure the interrupt is disabled
742 * on the card it drives before calling this function. The function
743 * does not return until any executing interrupts for this IRQ
744 * have completed.
745 *
746 * This function may be called from interrupt context.
747 *
748 * Bugs: Attempting to free an irq in a handler for the same irq hangs
749 * the machine.
750 */
752 void free_irq(unsigned int irq, void *dev_id)
753 {
754 irq_desc_t *desc;
755 struct irqaction **p;
756 unsigned long flags;
758 if (irq >= NR_IRQS)
759 return;
761 desc = irq_desc + irq;
762 spin_lock_irqsave(&desc->lock,flags);
763 p = &desc->action;
764 for (;;) {
765 struct irqaction * action = *p;
766 if (action) {
767 struct irqaction **pp = p;
768 p = &action->next;
769 if (action->dev_id != dev_id)
770 continue;
772 /* Found it - now remove it from the list of entries */
773 *pp = action->next;
774 if (!desc->action) {
775 desc->status |= IRQ_DISABLED;
776 desc->handler->shutdown(irq);
777 }
778 spin_unlock_irqrestore(&desc->lock,flags);
780 #ifdef CONFIG_SMP
781 /* Wait to make sure it's not being used on another CPU */
782 while (desc->status & IRQ_INPROGRESS) {
783 barrier();
784 cpu_relax();
785 }
786 #endif
787 #define SA_STATIC_ACTION 0x01000000 /* Is it our duty to free the action? */
788 if (!(action->flags & SA_STATIC_ACTION))
789 kfree(action);
790 return;
791 }
792 printk("Trying to free free IRQ%d\n",irq);
793 spin_unlock_irqrestore(&desc->lock,flags);
794 return;
795 }
796 }
798 /*
799 * IRQ autodetection code..
800 *
801 * This depends on the fact that any interrupt that
802 * comes in on to an unassigned handler will get stuck
803 * with "IRQ_WAITING" cleared and the interrupt
804 * disabled.
805 */
807 static DECLARE_MUTEX(probe_sem);
809 /**
810 * probe_irq_on - begin an interrupt autodetect
811 *
812 * Commence probing for an interrupt. The interrupts are scanned
813 * and a mask of potential interrupt lines is returned.
814 *
815 */
817 unsigned long probe_irq_on(void)
818 {
819 unsigned int i;
820 irq_desc_t *desc;
821 unsigned long val;
822 unsigned long delay;
824 down(&probe_sem);
825 /*
826 * something may have generated an irq long ago and we want to
827 * flush such a longstanding irq before considering it as spurious.
828 */
829 for (i = NR_PIRQS-1; i > 0; i--) {
830 desc = irq_desc + i;
832 spin_lock_irq(&desc->lock);
833 if (!irq_desc[i].action)
834 irq_desc[i].handler->startup(i);
835 spin_unlock_irq(&desc->lock);
836 }
838 /* Wait for longstanding interrupts to trigger. */
839 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
840 /* about 20ms delay */ synchronize_irq();
842 /*
843 * enable any unassigned irqs
844 * (we must startup again here because if a longstanding irq
845 * happened in the previous stage, it may have masked itself)
846 */
847 for (i = NR_PIRQS-1; i > 0; i--) {
848 desc = irq_desc + i;
850 spin_lock_irq(&desc->lock);
851 if (!desc->action) {
852 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
853 if (desc->handler->startup(i))
854 desc->status |= IRQ_PENDING;
855 }
856 spin_unlock_irq(&desc->lock);
857 }
859 /*
860 * Wait for spurious interrupts to trigger
861 */
862 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
863 /* about 100ms delay */ synchronize_irq();
865 /*
866 * Now filter out any obviously spurious interrupts
867 */
868 val = 0;
869 for (i = 0; i < NR_PIRQS; i++) {
870 irq_desc_t *desc = irq_desc + i;
871 unsigned int status;
873 spin_lock_irq(&desc->lock);
874 status = desc->status;
876 if (status & IRQ_AUTODETECT) {
877 /* It triggered already - consider it spurious. */
878 if (!(status & IRQ_WAITING)) {
879 desc->status = status & ~IRQ_AUTODETECT;
880 desc->handler->shutdown(i);
881 } else
882 if (i < 32)
883 val |= 1 << i;
884 }
885 spin_unlock_irq(&desc->lock);
886 }
888 return val;
889 }
891 /*
892 * Return a mask of triggered interrupts (this
893 * can handle only legacy ISA interrupts).
894 */
896 /**
897 * probe_irq_mask - scan a bitmap of interrupt lines
898 * @val: mask of interrupts to consider
899 *
900 * Scan the ISA bus interrupt lines and return a bitmap of
901 * active interrupts. The interrupt probe logic state is then
902 * returned to its previous value.
903 *
904 * Note: we need to scan all the irq's even though we will
905 * only return ISA irq numbers - just so that we reset them
906 * all to a known state.
907 */
908 unsigned int probe_irq_mask(unsigned long val)
909 {
910 int i;
911 unsigned int mask;
913 mask = 0;
914 for (i = 0; i < NR_PIRQS; i++) {
915 irq_desc_t *desc = irq_desc + i;
916 unsigned int status;
918 spin_lock_irq(&desc->lock);
919 status = desc->status;
921 if (status & IRQ_AUTODETECT) {
922 if (i < 16 && !(status & IRQ_WAITING))
923 mask |= 1 << i;
925 desc->status = status & ~IRQ_AUTODETECT;
926 desc->handler->shutdown(i);
927 }
928 spin_unlock_irq(&desc->lock);
929 }
930 up(&probe_sem);
932 return mask & val;
933 }
935 /*
936 * Return the one interrupt that triggered (this can
937 * handle any interrupt source).
938 */
940 /**
941 * probe_irq_off - end an interrupt autodetect
942 * @val: mask of potential interrupts (unused)
943 *
944 * Scans the unused interrupt lines and returns the line which
945 * appears to have triggered the interrupt. If no interrupt was
946 * found then zero is returned. If more than one interrupt is
947 * found then minus the first candidate is returned to indicate
948 * their is doubt.
949 *
950 * The interrupt probe logic state is returned to its previous
951 * value.
952 *
953 * BUGS: When used in a module (which arguably shouldnt happen)
954 * nothing prevents two IRQ probe callers from overlapping. The
955 * results of this are non-optimal.
956 */
958 int probe_irq_off(unsigned long val)
959 {
960 int i, irq_found, nr_irqs;
962 nr_irqs = 0;
963 irq_found = 0;
964 for (i = 0; i < NR_PIRQS; i++) {
965 irq_desc_t *desc = irq_desc + i;
966 unsigned int status;
968 spin_lock_irq(&desc->lock);
969 status = desc->status;
971 if (status & IRQ_AUTODETECT) {
972 if (!(status & IRQ_WAITING)) {
973 if (!nr_irqs)
974 irq_found = i;
975 nr_irqs++;
976 }
977 desc->status = status & ~IRQ_AUTODETECT;
978 desc->handler->shutdown(i);
979 }
980 spin_unlock_irq(&desc->lock);
981 }
982 up(&probe_sem);
984 if (nr_irqs > 1)
985 irq_found = -irq_found;
986 return irq_found;
987 }
989 /* this was setup_x86_irq but it seems pretty generic */
990 int setup_irq(unsigned int irq, struct irqaction * new)
991 {
992 int shared = 0;
993 unsigned long flags;
994 struct irqaction *old, **p;
995 irq_desc_t *desc = irq_desc + irq;
997 /*
998 * Some drivers like serial.c use request_irq() heavily,
999 * so we have to be careful not to interfere with a
1000 * running system.
1001 */
1002 if (new->flags & SA_SAMPLE_RANDOM) {
1003 /*
1004 * This function might sleep, we want to call it first,
1005 * outside of the atomic block.
1006 * Yes, this might clear the entropy pool if the wrong
1007 * driver is attempted to be loaded, without actually
1008 * installing a new handler, but is this really a problem,
1009 * only the sysadmin is able to do this.
1010 */
1011 rand_initialize_irq(irq);
1014 /*
1015 * The following block of code has to be executed atomically
1016 */
1017 spin_lock_irqsave(&desc->lock,flags);
1018 p = &desc->action;
1019 if ((old = *p) != NULL) {
1020 /* Can't share interrupts unless both agree to */
1021 if (!(old->flags & new->flags & SA_SHIRQ)) {
1022 spin_unlock_irqrestore(&desc->lock,flags);
1023 return -EBUSY;
1026 /* add new interrupt at end of irq queue */
1027 do {
1028 p = &old->next;
1029 old = *p;
1030 } while (old);
1031 shared = 1;
1034 *p = new;
1036 if (!shared) {
1037 desc->depth = 0;
1038 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
1039 desc->handler->startup(irq);
1041 spin_unlock_irqrestore(&desc->lock,flags);
1043 register_irq_proc(irq);
1044 return 0;
1047 static struct proc_dir_entry * root_irq_dir;
1048 static struct proc_dir_entry * irq_dir [NR_IRQS];
1050 #define HEX_DIGITS 8
1052 static unsigned int parse_hex_value (const char *buffer,
1053 unsigned long count, unsigned long *ret)
1055 unsigned char hexnum [HEX_DIGITS];
1056 unsigned long value;
1057 int i;
1059 if (!count)
1060 return -EINVAL;
1061 if (count > HEX_DIGITS)
1062 count = HEX_DIGITS;
1063 if (copy_from_user(hexnum, buffer, count))
1064 return -EFAULT;
1066 /*
1067 * Parse the first 8 characters as a hex string, any non-hex char
1068 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
1069 */
1070 value = 0;
1072 for (i = 0; i < count; i++) {
1073 unsigned int c = hexnum[i];
1075 switch (c) {
1076 case '0' ... '9': c -= '0'; break;
1077 case 'a' ... 'f': c -= 'a'-10; break;
1078 case 'A' ... 'F': c -= 'A'-10; break;
1079 default:
1080 goto out;
1082 value = (value << 4) | c;
1084 out:
1085 *ret = value;
1086 return 0;
1089 #if CONFIG_SMP
1091 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
1093 static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
1094 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1095 int count, int *eof, void *data)
1097 if (count < HEX_DIGITS+1)
1098 return -EINVAL;
1099 return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
1102 static int irq_affinity_write_proc (struct file *file, const char *buffer,
1103 unsigned long count, void *data)
1105 int irq = (long) data, full_count = count, err;
1106 unsigned long new_value;
1108 if (!irq_desc[irq].handler->set_affinity)
1109 return -EIO;
1111 err = parse_hex_value(buffer, count, &new_value);
1113 /*
1114 * Do not allow disabling IRQs completely - it's a too easy
1115 * way to make the system unusable accidentally :-) At least
1116 * one online CPU still has to be targeted.
1117 */
1118 if (!(new_value & cpu_online_map))
1119 return -EINVAL;
1121 irq_affinity[irq] = new_value;
1122 irq_desc[irq].handler->set_affinity(irq, new_value);
1124 return full_count;
1127 #endif
1129 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1130 int count, int *eof, void *data)
1132 unsigned long *mask = (unsigned long *) data;
1133 if (count < HEX_DIGITS+1)
1134 return -EINVAL;
1135 return sprintf (page, "%08lx\n", *mask);
1138 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1139 unsigned long count, void *data)
1141 unsigned long *mask = (unsigned long *) data, full_count = count, err;
1142 unsigned long new_value;
1144 err = parse_hex_value(buffer, count, &new_value);
1145 if (err)
1146 return err;
1148 *mask = new_value;
1149 return full_count;
1152 #define MAX_NAMELEN 10
1154 static void register_irq_proc (unsigned int irq)
1156 char name [MAX_NAMELEN];
1158 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
1159 irq_dir[irq])
1160 return;
1162 memset(name, 0, MAX_NAMELEN);
1163 sprintf(name, "%d", irq);
1165 /* create /proc/irq/1234 */
1166 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1168 #if CONFIG_SMP
1170 struct proc_dir_entry *entry;
1172 /* create /proc/irq/1234/smp_affinity */
1173 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1175 if (entry) {
1176 entry->nlink = 1;
1177 entry->data = (void *)(long)irq;
1178 entry->read_proc = irq_affinity_read_proc;
1179 entry->write_proc = irq_affinity_write_proc;
1182 smp_affinity_entry[irq] = entry;
1184 #endif
1187 unsigned long prof_cpu_mask = -1;
1189 void init_irq_proc (void)
1191 struct proc_dir_entry *entry;
1192 int i;
1194 /* create /proc/irq */
1195 root_irq_dir = proc_mkdir("irq", 0);
1197 /* create /proc/irq/prof_cpu_mask */
1198 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1200 if (!entry)
1201 return;
1203 entry->nlink = 1;
1204 entry->data = (void *)&prof_cpu_mask;
1205 entry->read_proc = prof_cpu_mask_read_proc;
1206 entry->write_proc = prof_cpu_mask_write_proc;
1208 /*
1209 * Create entries for all existing IRQs.
1210 */
1211 for (i = 0; i < NR_IRQS; i++)
1212 register_irq_proc(i);