ia64/xen-unstable

view xen/arch/i386/irq.c @ 945:db2e1ea917df

bitkeeper revision 1.596.1.3 (3fb3b41eWUoRU0H8A0jEX5roXjxKkA)

Many files:
Greatly simplified Xen softirqs. They are now only executed in outermost Xen activation; they are never called within an irq context.
author kaf24@scramble.cl.cam.ac.uk
date Thu Nov 13 16:41:02 2003 +0000 (2003-11-13)
parents 0f28cb35057d
children 7a554cbf0f58
line source
1 /*
2 * linux/arch/i386/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setup_irqs with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
11 */
13 /*
14 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
15 *
16 * IRQs are in fact implemented a bit like signal handlers for the kernel.
17 * Naturally it's not a 1:1 relation, but there are similarities.
18 */
20 #include <xeno/config.h>
21 #include <xeno/init.h>
22 #include <xeno/errno.h>
23 #include <xeno/sched.h>
24 #include <xeno/interrupt.h>
25 #include <xeno/irq.h>
26 #include <xeno/slab.h>
28 #include <asm/msr.h>
29 #include <asm/hardirq.h>
30 #include <asm/ptrace.h>
31 #include <asm/atomic.h>
32 #include <asm/io.h>
33 #include <asm/smp.h>
34 #include <asm/system.h>
35 #include <asm/bitops.h>
36 #include <asm/pgalloc.h>
37 #include <xeno/delay.h>
38 #include <xeno/timex.h>
39 #include <xeno/perfc.h>
41 /*
42 * Linux has a controller-independent x86 interrupt architecture.
43 * every controller has a 'controller-template', that is used
44 * by the main code to do the right thing. Each driver-visible
45 * interrupt source is transparently wired to the apropriate
46 * controller. Thus drivers need not be aware of the
47 * interrupt-controller.
48 *
49 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
50 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
51 * (IO-APICs assumed to be messaging to Pentium local-APICs)
52 *
53 * the code is designed to be easily extended with new/different
54 * interrupt controllers, without having to do assembly magic.
55 */
57 /*
58 * Controller mappings for all interrupt sources:
59 */
60 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
61 { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
63 #ifdef CONFIG_SMP
64 /* NB. XXX We'll want some way of fiddling with this from DOM0. */
65 unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
66 #endif
68 /*
69 * Special irq handlers.
70 */
72 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
74 /*
75 * Generic no controller code
76 */
78 static void enable_none(unsigned int irq) { }
79 static unsigned int startup_none(unsigned int irq) { return 0; }
80 static void disable_none(unsigned int irq) { }
81 static void ack_none(unsigned int irq)
82 {
83 /*
84 * 'what should we do if we get a hw irq event on an illegal vector'.
85 * each architecture has to answer this themselves, it doesnt deserve
86 * a generic callback i think.
87 */
88 #if CONFIG_X86
89 printk("unexpected IRQ trap at vector %02x\n", irq);
90 #ifdef CONFIG_X86_LOCAL_APIC
91 /*
92 * Currently unexpected vectors happen only on SMP and APIC.
93 * We _must_ ack these because every local APIC has only N
94 * irq slots per priority level, and a 'hanging, unacked' IRQ
95 * holds up an irq slot - in excessive cases (when multiple
96 * unexpected vectors occur) that might lock up the APIC
97 * completely.
98 */
99 ack_APIC_irq();
100 #endif
101 #endif
102 }
104 /* startup is the same as "enable", shutdown is same as "disable" */
105 #define shutdown_none disable_none
106 #define end_none enable_none
108 struct hw_interrupt_type no_irq_type = {
109 "none",
110 startup_none,
111 shutdown_none,
112 enable_none,
113 disable_none,
114 ack_none,
115 end_none
116 };
118 atomic_t irq_err_count;
119 #ifdef CONFIG_X86_IO_APIC
120 #ifdef APIC_MISMATCH_DEBUG
121 atomic_t irq_mis_count;
122 #endif
123 #endif
125 /*
126 * Generic, controller-independent functions:
127 */
129 /*
130 * Global interrupt locks for SMP. Allow interrupts to come in on any
131 * CPU, yet make cli/sti act globally to protect critical regions..
132 */
134 #ifdef CONFIG_SMP
135 unsigned char global_irq_holder = 0xff;
136 unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
138 #define MAXCOUNT 100000000
140 /*
141 * I had a lockup scenario where a tight loop doing
142 * spin_unlock()/spin_lock() on CPU#1 was racing with
143 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
144 * apparently the spin_unlock() information did not make it
145 * through to CPU#0 ... nasty, is this by design, do we have to limit
146 * 'memory update oscillation frequency' artificially like here?
147 *
148 * Such 'high frequency update' races can be avoided by careful design, but
149 * some of our major constructs like spinlocks use similar techniques,
150 * it would be nice to clarify this issue. Set this define to 0 if you
151 * want to check whether your system freezes. I suspect the delay done
152 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
153 * i thought that such things are guaranteed by design, since we use
154 * the 'LOCK' prefix.
155 */
156 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
158 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
159 # define SYNC_OTHER_CORES(x) udelay(x+1)
160 #else
161 /*
162 * We have to allow irqs to arrive between __sti and __cli
163 */
164 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
165 #endif
167 static inline void wait_on_irq(int cpu)
168 {
169 for (;;) {
171 /*
172 * Wait until all interrupts are gone. Wait
173 * for bottom half handlers unless we're
174 * already executing in one..
175 */
176 if (!irqs_running())
177 if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
178 break;
180 /* Duh, we have to loop. Release the lock to avoid deadlocks */
181 clear_bit(0,&global_irq_lock);
183 for (;;) {
184 __sti();
185 SYNC_OTHER_CORES(cpu);
186 __cli();
187 if (irqs_running())
188 continue;
189 if (global_irq_lock)
190 continue;
191 if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
192 continue;
193 if (!test_and_set_bit(0,&global_irq_lock))
194 break;
195 }
196 }
197 }
199 /*
200 * This is called when we want to synchronize with
201 * interrupts. We may for example tell a device to
202 * stop sending interrupts: but to make sure there
203 * are no interrupts that are executing on another
204 * CPU we need to call this function.
205 */
206 void synchronize_irq(void)
207 {
208 if (irqs_running()) {
209 /* Stupid approach */
210 cli();
211 sti();
212 }
213 }
215 static inline void get_irqlock(int cpu)
216 {
217 if (test_and_set_bit(0,&global_irq_lock)) {
218 /* do we already hold the lock? */
219 if ((unsigned char) cpu == global_irq_holder)
220 return;
221 /* Uhhuh.. Somebody else got it. Wait.. */
222 do {
223 do {
224 rep_nop();
225 } while (test_bit(0,&global_irq_lock));
226 } while (test_and_set_bit(0,&global_irq_lock));
227 }
228 /*
229 * We also to make sure that nobody else is running
230 * in an interrupt context.
231 */
232 wait_on_irq(cpu);
234 /*
235 * Ok, finally..
236 */
237 global_irq_holder = cpu;
238 }
240 #define EFLAGS_IF_SHIFT 9
242 /*
243 * A global "cli()" while in an interrupt context
244 * turns into just a local cli(). Interrupts
245 * should use spinlocks for the (very unlikely)
246 * case that they ever want to protect against
247 * each other.
248 *
249 * If we already have local interrupts disabled,
250 * this will not turn a local disable into a
251 * global one (problems with spinlocks: this makes
252 * save_flags+cli+sti usable inside a spinlock).
253 */
254 void __global_cli(void)
255 {
256 unsigned int flags;
258 __save_flags(flags);
259 if (flags & (1 << EFLAGS_IF_SHIFT)) {
260 int cpu = smp_processor_id();
261 __cli();
262 if (!local_irq_count(cpu))
263 get_irqlock(cpu);
264 }
265 }
267 void __global_sti(void)
268 {
269 int cpu = smp_processor_id();
271 if (!local_irq_count(cpu))
272 release_irqlock(cpu);
273 __sti();
274 }
276 /*
277 * SMP flags value to restore to:
278 * 0 - global cli
279 * 1 - global sti
280 * 2 - local cli
281 * 3 - local sti
282 */
283 unsigned long __global_save_flags(void)
284 {
285 int retval;
286 int local_enabled;
287 unsigned long flags;
288 int cpu = smp_processor_id();
290 __save_flags(flags);
291 local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
292 /* default to local */
293 retval = 2 + local_enabled;
295 /* check for global flags if we're not in an interrupt */
296 if (!local_irq_count(cpu)) {
297 if (local_enabled)
298 retval = 1;
299 if (global_irq_holder == cpu)
300 retval = 0;
301 }
302 return retval;
303 }
305 void __global_restore_flags(unsigned long flags)
306 {
307 switch (flags) {
308 case 0:
309 __global_cli();
310 break;
311 case 1:
312 __global_sti();
313 break;
314 case 2:
315 __cli();
316 break;
317 case 3:
318 __sti();
319 break;
320 default:
321 printk("global_restore_flags: %08lx (%08lx)\n",
322 flags, (&flags)[-1]);
323 }
324 }
326 #endif
328 /*
329 * This should really return information about whether
330 * we should do bottom half handling etc. Right now we
331 * end up _always_ checking the bottom half, which is a
332 * waste of time and is not what some drivers would
333 * prefer.
334 */
335 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
336 {
337 int status;
338 int cpu = smp_processor_id();
340 irq_enter(cpu, irq);
342 status = 1; /* Force the "do bottom halves" bit */
344 if (!(action->flags & SA_INTERRUPT))
345 __sti();
347 do {
348 status |= action->flags;
349 action->handler(irq, action->dev_id, regs);
350 action = action->next;
351 } while (action);
353 __cli();
355 irq_exit(cpu, irq);
357 return status;
358 }
360 /*
361 * Generic enable/disable code: this just calls
362 * down into the PIC-specific version for the actual
363 * hardware disable after having gotten the irq
364 * controller lock.
365 */
367 /**
368 * disable_irq_nosync - disable an irq without waiting
369 * @irq: Interrupt to disable
370 *
371 * Disable the selected interrupt line. Disables and Enables are
372 * nested.
373 * Unlike disable_irq(), this function does not ensure existing
374 * instances of the IRQ handler have completed before returning.
375 *
376 * This function may be called from IRQ context.
377 */
379 inline void disable_irq_nosync(unsigned int irq)
380 {
381 irq_desc_t *desc = irq_desc + irq;
382 unsigned long flags;
384 spin_lock_irqsave(&desc->lock, flags);
385 if (!desc->depth++) {
386 desc->status |= IRQ_DISABLED;
387 desc->handler->disable(irq);
388 }
389 spin_unlock_irqrestore(&desc->lock, flags);
390 }
392 /**
393 * disable_irq - disable an irq and wait for completion
394 * @irq: Interrupt to disable
395 *
396 * Disable the selected interrupt line. Enables and Disables are
397 * nested.
398 * This function waits for any pending IRQ handlers for this interrupt
399 * to complete before returning. If you use this function while
400 * holding a resource the IRQ handler may need you will deadlock.
401 *
402 * This function may be called - with care - from IRQ context.
403 */
405 void disable_irq(unsigned int irq)
406 {
407 disable_irq_nosync(irq);
409 if (!local_irq_count(smp_processor_id())) {
410 do {
411 barrier();
412 cpu_relax();
413 } while (irq_desc[irq].status & IRQ_INPROGRESS);
414 }
415 }
417 /**
418 * enable_irq - enable handling of an irq
419 * @irq: Interrupt to enable
420 *
421 * Undoes the effect of one call to disable_irq(). If this
422 * matches the last disable, processing of interrupts on this
423 * IRQ line is re-enabled.
424 *
425 * This function may be called from IRQ context.
426 */
428 void enable_irq(unsigned int irq)
429 {
430 irq_desc_t *desc = irq_desc + irq;
431 unsigned long flags;
433 spin_lock_irqsave(&desc->lock, flags);
434 switch (desc->depth) {
435 case 1: {
436 unsigned int status = desc->status & ~IRQ_DISABLED;
437 desc->status = status;
438 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
439 desc->status = status | IRQ_REPLAY;
440 hw_resend_irq(desc->handler,irq);
441 }
442 desc->handler->enable(irq);
443 /* fall-through */
444 }
445 default:
446 desc->depth--;
447 break;
448 case 0:
449 printk("enable_irq(%u) unbalanced from %p\n", irq,
450 __builtin_return_address(0));
451 }
452 spin_unlock_irqrestore(&desc->lock, flags);
453 }
455 /*
456 * do_IRQ handles all normal device IRQ's (the special
457 * SMP cross-CPU interrupts have their own specific
458 * handlers).
459 */
460 asmlinkage unsigned int do_IRQ(struct pt_regs regs)
461 {
462 /*
463 * We ack quickly, we don't want the irq controller
464 * thinking we're snobs just because some other CPU has
465 * disabled global interrupts (we have already done the
466 * INT_ACK cycles, it's too late to try to pretend to the
467 * controller that we aren't taking the interrupt).
468 *
469 * 0 return value means that this irq is already being
470 * handled by some other CPU. (or is disabled)
471 */
472 int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code */
473 int cpu = smp_processor_id();
474 irq_desc_t *desc = irq_desc + irq;
475 struct irqaction * action;
476 unsigned int status;
478 u32 cc_start, cc_end;
480 perfc_incra(irqs, cpu);
481 rdtscl(cc_start);
483 spin_lock(&desc->lock);
484 desc->handler->ack(irq);
485 /*
486 REPLAY is when Linux resends an IRQ that was dropped earlier
487 WAITING is used by probe to mark irqs that are being tested
488 */
489 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
490 status |= IRQ_PENDING; /* we _want_ to handle it */
492 /*
493 * If the IRQ is disabled for whatever reason, we cannot use the action we
494 * have.
495 */
496 action = NULL;
497 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
498 action = desc->action;
499 status &= ~IRQ_PENDING; /* we commit to handling */
500 status |= IRQ_INPROGRESS; /* we are handling it */
501 }
502 desc->status = status;
504 /*
505 * If there is no IRQ handler or it was disabled, exit early. Since we set
506 * PENDING, if another processor is handling a different instance of this
507 * same irq, the other processor will take care of it.
508 */
509 if (!action)
510 goto out;
512 /*
513 * Edge triggered interrupts need to remember pending events. This applies
514 * to any hw interrupts that allow a second instance of the same irq to
515 * arrive while we are in do_IRQ or in the handler. But the code here only
516 * handles the _second_ instance of the irq, not the third or fourth. So
517 * it is mostly useful for irq hardware that does not mask cleanly in an
518 * SMP environment.
519 */
520 for (;;) {
521 spin_unlock(&desc->lock);
522 handle_IRQ_event(irq, &regs, action);
523 spin_lock(&desc->lock);
525 if (!(desc->status & IRQ_PENDING))
526 break;
527 desc->status &= ~IRQ_PENDING;
528 }
529 desc->status &= ~IRQ_INPROGRESS;
530 out:
531 /*
532 * The ->end() handler has to deal with interrupts which got disabled
533 * while the handler was running.
534 */
535 desc->handler->end(irq);
536 spin_unlock(&desc->lock);
538 rdtscl(cc_end);
540 if ( !action || (!(action->flags & SA_NOPROFILE)) )
541 {
542 perfc_adda(irq_time, cpu, cc_end - cc_start);
543 #ifndef NDEBUG
544 if ( (cc_end - cc_start) > (cpu_khz * 100) )
545 printk("Long interrupt %08x -> %08x\n", cc_start, cc_end);
546 #endif
547 }
549 return 1;
550 }
552 /**
553 * request_irq - allocate an interrupt line
554 * @irq: Interrupt line to allocate
555 * @handler: Function to be called when the IRQ occurs
556 * @irqflags: Interrupt type flags
557 * @devname: An ascii name for the claiming device
558 * @dev_id: A cookie passed back to the handler function
559 *
560 * This call allocates interrupt resources and enables the
561 * interrupt line and IRQ handling. From the point this
562 * call is made your handler function may be invoked. Since
563 * your handler function must clear any interrupt the board
564 * raises, you must take care both to initialise your hardware
565 * and to set up the interrupt handler in the right order.
566 *
567 * Dev_id must be globally unique. Normally the address of the
568 * device data structure is used as the cookie. Since the handler
569 * receives this value it makes sense to use it.
570 *
571 * If your interrupt is shared you must pass a non NULL dev_id
572 * as this is required when freeing the interrupt.
573 *
574 * Flags:
575 *
576 * SA_SHIRQ Interrupt is shared
577 *
578 * SA_INTERRUPT Disable local interrupts while processing
579 */
581 int request_irq(unsigned int irq,
582 void (*handler)(int, void *, struct pt_regs *),
583 unsigned long irqflags,
584 const char * devname,
585 void *dev_id)
586 {
587 int retval;
588 struct irqaction * action;
590 if (irq >= NR_IRQS)
591 return -EINVAL;
592 if (!handler)
593 return -EINVAL;
595 action = (struct irqaction *)
596 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
597 if (!action)
598 return -ENOMEM;
600 action->handler = handler;
601 action->flags = irqflags;
602 action->mask = 0;
603 action->name = devname;
604 action->next = NULL;
605 action->dev_id = dev_id;
607 retval = setup_irq(irq, action);
608 if (retval)
609 kfree(action);
611 return retval;
612 }
614 /**
615 * free_irq - free an interrupt
616 * @irq: Interrupt line to free
617 * @dev_id: Device identity to free
618 *
619 * Remove an interrupt handler. The handler is removed and if the
620 * interrupt line is no longer in use by any driver it is disabled.
621 * On a shared IRQ the caller must ensure the interrupt is disabled
622 * on the card it drives before calling this function. The function
623 * does not return until any executing interrupts for this IRQ
624 * have completed.
625 *
626 * This function may be called from interrupt context.
627 *
628 * Bugs: Attempting to free an irq in a handler for the same irq hangs
629 * the machine.
630 */
632 void free_irq(unsigned int irq, void *dev_id)
633 {
634 irq_desc_t *desc;
635 struct irqaction **p;
636 unsigned long flags;
638 if (irq >= NR_IRQS)
639 return;
641 desc = irq_desc + irq;
642 spin_lock_irqsave(&desc->lock,flags);
643 p = &desc->action;
644 for (;;) {
645 struct irqaction * action = *p;
646 if (action) {
647 struct irqaction **pp = p;
648 p = &action->next;
649 if (action->dev_id != dev_id)
650 continue;
652 /* Found it - now remove it from the list of entries */
653 *pp = action->next;
654 if (!desc->action) {
655 desc->status |= IRQ_DISABLED;
656 desc->handler->shutdown(irq);
657 }
658 spin_unlock_irqrestore(&desc->lock,flags);
660 #ifdef CONFIG_SMP
661 /* Wait to make sure it's not being used on another CPU */
662 while (desc->status & IRQ_INPROGRESS) {
663 barrier();
664 cpu_relax();
665 }
666 #endif
667 kfree(action);
668 return;
669 }
670 printk("Trying to free free IRQ%d\n",irq);
671 spin_unlock_irqrestore(&desc->lock,flags);
672 return;
673 }
674 }
676 /*
677 * IRQ autodetection code..
678 *
679 * This depends on the fact that any interrupt that
680 * comes in on to an unassigned handler will get stuck
681 * with "IRQ_WAITING" cleared and the interrupt
682 * disabled.
683 */
685 static spinlock_t probe_sem = SPIN_LOCK_UNLOCKED;
687 /**
688 * probe_irq_on - begin an interrupt autodetect
689 *
690 * Commence probing for an interrupt. The interrupts are scanned
691 * and a mask of potential interrupt lines is returned.
692 *
693 */
695 unsigned long probe_irq_on(void)
696 {
697 unsigned int i;
698 irq_desc_t *desc;
699 unsigned long val;
700 unsigned long s=0, e=0;
702 spin_lock(&probe_sem);
703 /*
704 * something may have generated an irq long ago and we want to
705 * flush such a longstanding irq before considering it as spurious.
706 */
707 for (i = NR_IRQS-1; i > 0; i--) {
708 desc = irq_desc + i;
710 spin_lock_irq(&desc->lock);
711 if (!irq_desc[i].action)
712 irq_desc[i].handler->startup(i);
713 spin_unlock_irq(&desc->lock);
714 }
716 /* Wait for longstanding interrupts to trigger (20ms delay). */
717 rdtscl(s);
718 do {
719 synchronize_irq();
720 rdtscl(e);
721 } while ( ((e-s)/ticks_per_usec) < 20000 );
723 /*
724 * enable any unassigned irqs
725 * (we must startup again here because if a longstanding irq
726 * happened in the previous stage, it may have masked itself)
727 */
728 for (i = NR_IRQS-1; i > 0; i--) {
729 desc = irq_desc + i;
731 spin_lock_irq(&desc->lock);
732 if (!desc->action) {
733 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
734 if (desc->handler->startup(i))
735 desc->status |= IRQ_PENDING;
736 }
737 spin_unlock_irq(&desc->lock);
738 }
740 /*
741 * Wait for spurious interrupts to trigger (100ms delay).
742 */
743 rdtscl(s);
744 do {
745 synchronize_irq();
746 rdtscl(e);
747 } while ( ((e-s)/ticks_per_usec) < 100000 );
749 /*
750 * Now filter out any obviously spurious interrupts
751 */
752 val = 0;
753 for (i = 0; i < NR_IRQS; i++) {
754 irq_desc_t *desc = irq_desc + i;
755 unsigned int status;
757 spin_lock_irq(&desc->lock);
758 status = desc->status;
760 if (status & IRQ_AUTODETECT) {
761 /* It triggered already - consider it spurious. */
762 if (!(status & IRQ_WAITING)) {
763 desc->status = status & ~IRQ_AUTODETECT;
764 desc->handler->shutdown(i);
765 } else
766 if (i < 32)
767 val |= 1 << i;
768 }
769 spin_unlock_irq(&desc->lock);
770 }
772 return val;
773 }
775 /*
776 * Return a mask of triggered interrupts (this
777 * can handle only legacy ISA interrupts).
778 */
780 /**
781 * probe_irq_mask - scan a bitmap of interrupt lines
782 * @val: mask of interrupts to consider
783 *
784 * Scan the ISA bus interrupt lines and return a bitmap of
785 * active interrupts. The interrupt probe logic state is then
786 * returned to its previous value.
787 *
788 * Note: we need to scan all the irq's even though we will
789 * only return ISA irq numbers - just so that we reset them
790 * all to a known state.
791 */
792 unsigned int probe_irq_mask(unsigned long val)
793 {
794 int i;
795 unsigned int mask;
797 mask = 0;
798 for (i = 0; i < NR_IRQS; i++) {
799 irq_desc_t *desc = irq_desc + i;
800 unsigned int status;
802 spin_lock_irq(&desc->lock);
803 status = desc->status;
805 if (status & IRQ_AUTODETECT) {
806 if (i < 16 && !(status & IRQ_WAITING))
807 mask |= 1 << i;
809 desc->status = status & ~IRQ_AUTODETECT;
810 desc->handler->shutdown(i);
811 }
812 spin_unlock_irq(&desc->lock);
813 }
814 spin_unlock(&probe_sem);
816 return mask & val;
817 }
819 /*
820 * Return the one interrupt that triggered (this can
821 * handle any interrupt source).
822 */
824 /**
825 * probe_irq_off - end an interrupt autodetect
826 * @val: mask of potential interrupts (unused)
827 *
828 * Scans the unused interrupt lines and returns the line which
829 * appears to have triggered the interrupt. If no interrupt was
830 * found then zero is returned. If more than one interrupt is
831 * found then minus the first candidate is returned to indicate
832 * their is doubt.
833 *
834 * The interrupt probe logic state is returned to its previous
835 * value.
836 *
837 * BUGS: When used in a module (which arguably shouldnt happen)
838 * nothing prevents two IRQ probe callers from overlapping. The
839 * results of this are non-optimal.
840 */
842 int probe_irq_off(unsigned long val)
843 {
844 int i, irq_found, nr_irqs;
846 nr_irqs = 0;
847 irq_found = 0;
848 for (i = 0; i < NR_IRQS; i++) {
849 irq_desc_t *desc = irq_desc + i;
850 unsigned int status;
852 spin_lock_irq(&desc->lock);
853 status = desc->status;
855 if (status & IRQ_AUTODETECT) {
856 if (!(status & IRQ_WAITING)) {
857 if (!nr_irqs)
858 irq_found = i;
859 nr_irqs++;
860 }
861 desc->status = status & ~IRQ_AUTODETECT;
862 desc->handler->shutdown(i);
863 }
864 spin_unlock_irq(&desc->lock);
865 }
866 spin_unlock(&probe_sem);
868 if (nr_irqs > 1)
869 irq_found = -irq_found;
870 return irq_found;
871 }
873 /* this was setup_x86_irq but it seems pretty generic */
874 int setup_irq(unsigned int irq, struct irqaction * new)
875 {
876 int shared = 0;
877 unsigned long flags;
878 struct irqaction *old, **p;
879 irq_desc_t *desc = irq_desc + irq;
881 /*
882 * The following block of code has to be executed atomically
883 */
884 spin_lock_irqsave(&desc->lock,flags);
885 p = &desc->action;
886 if ((old = *p) != NULL) {
887 /* Can't share interrupts unless both agree to */
888 if (!(old->flags & new->flags & SA_SHIRQ)) {
889 spin_unlock_irqrestore(&desc->lock,flags);
890 return -EBUSY;
891 }
893 /* add new interrupt at end of irq queue */
894 do {
895 p = &old->next;
896 old = *p;
897 } while (old);
898 shared = 1;
899 }
901 *p = new;
903 if (!shared) {
904 desc->depth = 0;
905 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
906 desc->handler->startup(irq);
907 }
908 spin_unlock_irqrestore(&desc->lock,flags);
910 return 0;
911 }