ia64/xen-unstable

changeset 10336:6fb0d5ad63d7

[LINUX] Only trigger unhandled irq path if irq is not shared across
multiple guests (another guest may have handled the interrupt).
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Jun 08 10:11:04 2006 +0100 (2006-06-08)
parents 8c64169a05d3
children fe35ddc5fd38
files linux-2.6-xen-sparse/drivers/xen/Kconfig linux-2.6-xen-sparse/drivers/xen/core/evtchn.c linux-2.6-xen-sparse/include/linux/interrupt.h linux-2.6-xen-sparse/kernel/irq/spurious.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/Kconfig	Thu Jun 08 09:52:04 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/Kconfig	Thu Jun 08 10:11:04 2006 +0100
     1.3 @@ -224,6 +224,10 @@ config HAVE_ARCH_DEV_ALLOC_SKB
     1.4  	bool
     1.5  	default y
     1.6  
     1.7 +config HAVE_IRQ_IGNORE_UNHANDLED
     1.8 +	bool
     1.9 +	default y
    1.10 +
    1.11  config NO_IDLE_HZ
    1.12  	bool
    1.13  	default y
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Thu Jun 08 09:52:04 2006 +0100
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Thu Jun 08 10:11:04 2006 +0100
     2.3 @@ -678,6 +678,13 @@ static struct hw_interrupt_type pirq_typ
     2.4  	set_affinity_irq
     2.5  };
     2.6  
     2.7 +int irq_ignore_unhandled(unsigned int irq)
     2.8 +{
     2.9 +	struct physdev_irq_status_query irq_status = { .irq = irq };
    2.10 +	(void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
    2.11 +	return !!(irq_status.flags & XENIRQSTAT_shared);
    2.12 +}
    2.13 +
    2.14  void resend_irq_on_evtchn(struct hw_interrupt_type *h, unsigned int i)
    2.15  {
    2.16  	int evtchn = evtchn_from_irq(i);
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/linux-2.6-xen-sparse/include/linux/interrupt.h	Thu Jun 08 10:11:04 2006 +0100
     3.3 @@ -0,0 +1,301 @@
     3.4 +/* interrupt.h */
     3.5 +#ifndef _LINUX_INTERRUPT_H
     3.6 +#define _LINUX_INTERRUPT_H
     3.7 +
     3.8 +#include <linux/config.h>
     3.9 +#include <linux/kernel.h>
    3.10 +#include <linux/linkage.h>
    3.11 +#include <linux/bitops.h>
    3.12 +#include <linux/preempt.h>
    3.13 +#include <linux/cpumask.h>
    3.14 +#include <linux/hardirq.h>
    3.15 +#include <linux/sched.h>
    3.16 +#include <asm/atomic.h>
    3.17 +#include <asm/ptrace.h>
    3.18 +#include <asm/system.h>
    3.19 +
    3.20 +/*
    3.21 + * For 2.4.x compatibility, 2.4.x can use
    3.22 + *
    3.23 + *	typedef void irqreturn_t;
    3.24 + *	#define IRQ_NONE
    3.25 + *	#define IRQ_HANDLED
    3.26 + *	#define IRQ_RETVAL(x)
    3.27 + *
    3.28 + * To mix old-style and new-style irq handler returns.
    3.29 + *
    3.30 + * IRQ_NONE means we didn't handle it.
    3.31 + * IRQ_HANDLED means that we did have a valid interrupt and handled it.
    3.32 + * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
    3.33 + */
    3.34 +typedef int irqreturn_t;
    3.35 +
    3.36 +#define IRQ_NONE	(0)
    3.37 +#define IRQ_HANDLED	(1)
    3.38 +#define IRQ_RETVAL(x)	((x) != 0)
    3.39 +
    3.40 +struct irqaction {
    3.41 +	irqreturn_t (*handler)(int, void *, struct pt_regs *);
    3.42 +	unsigned long flags;
    3.43 +	cpumask_t mask;
    3.44 +	const char *name;
    3.45 +	void *dev_id;
    3.46 +	struct irqaction *next;
    3.47 +	int irq;
    3.48 +	struct proc_dir_entry *dir;
    3.49 +};
    3.50 +
    3.51 +extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs);
    3.52 +extern int request_irq(unsigned int,
    3.53 +		       irqreturn_t (*handler)(int, void *, struct pt_regs *),
    3.54 +		       unsigned long, const char *, void *);
    3.55 +extern void free_irq(unsigned int, void *);
    3.56 +
    3.57 +
    3.58 +#ifdef CONFIG_GENERIC_HARDIRQS
    3.59 +extern void disable_irq_nosync(unsigned int irq);
    3.60 +extern void disable_irq(unsigned int irq);
    3.61 +extern void enable_irq(unsigned int irq);
    3.62 +#endif
    3.63 +
    3.64 +#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
    3.65 +int irq_ignore_unhandled(unsigned int irq);
    3.66 +#else
    3.67 +#define irq_ignore_unhandled(irq) 0
    3.68 +#endif
    3.69 +
    3.70 +#ifndef __ARCH_SET_SOFTIRQ_PENDING
    3.71 +#define set_softirq_pending(x) (local_softirq_pending() = (x))
    3.72 +#define or_softirq_pending(x)  (local_softirq_pending() |= (x))
    3.73 +#endif
    3.74 +
    3.75 +/*
    3.76 + * Temporary defines for UP kernels, until all code gets fixed.
    3.77 + */
    3.78 +#ifndef CONFIG_SMP
    3.79 +static inline void __deprecated cli(void)
    3.80 +{
    3.81 +	local_irq_disable();
    3.82 +}
    3.83 +static inline void __deprecated sti(void)
    3.84 +{
    3.85 +	local_irq_enable();
    3.86 +}
    3.87 +static inline void __deprecated save_flags(unsigned long *x)
    3.88 +{
    3.89 +	local_save_flags(*x);
    3.90 +}
    3.91 +#define save_flags(x) save_flags(&x)
    3.92 +static inline void __deprecated restore_flags(unsigned long x)
    3.93 +{
    3.94 +	local_irq_restore(x);
    3.95 +}
    3.96 +
    3.97 +static inline void __deprecated save_and_cli(unsigned long *x)
    3.98 +{
    3.99 +	local_irq_save(*x);
   3.100 +}
   3.101 +#define save_and_cli(x)	save_and_cli(&x)
   3.102 +#endif /* CONFIG_SMP */
   3.103 +
   3.104 +/* SoftIRQ primitives.  */
   3.105 +#define local_bh_disable() \
   3.106 +		do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
   3.107 +#define __local_bh_enable() \
   3.108 +		do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
   3.109 +
   3.110 +extern void local_bh_enable(void);
   3.111 +
   3.112 +/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
   3.113 +   frequency threaded job scheduling. For almost all the purposes
   3.114 +   tasklets are more than enough. F.e. all serial device BHs et
   3.115 +   al. should be converted to tasklets, not to softirqs.
   3.116 + */
   3.117 +
   3.118 +enum
   3.119 +{
   3.120 +	HI_SOFTIRQ=0,
   3.121 +	TIMER_SOFTIRQ,
   3.122 +	NET_TX_SOFTIRQ,
   3.123 +	NET_RX_SOFTIRQ,
   3.124 +	BLOCK_SOFTIRQ,
   3.125 +	TASKLET_SOFTIRQ
   3.126 +};
   3.127 +
   3.128 +/* softirq mask and active fields moved to irq_cpustat_t in
   3.129 + * asm/hardirq.h to get better cache usage.  KAO
   3.130 + */
   3.131 +
   3.132 +struct softirq_action
   3.133 +{
   3.134 +	void	(*action)(struct softirq_action *);
   3.135 +	void	*data;
   3.136 +};
   3.137 +
   3.138 +asmlinkage void do_softirq(void);
   3.139 +extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
   3.140 +extern void softirq_init(void);
   3.141 +#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
   3.142 +extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
   3.143 +extern void FASTCALL(raise_softirq(unsigned int nr));
   3.144 +
   3.145 +
   3.146 +/* Tasklets --- multithreaded analogue of BHs.
   3.147 +
   3.148 +   Main feature differing them of generic softirqs: tasklet
   3.149 +   is running only on one CPU simultaneously.
   3.150 +
   3.151 +   Main feature differing them of BHs: different tasklets
   3.152 +   may be run simultaneously on different CPUs.
   3.153 +
   3.154 +   Properties:
   3.155 +   * If tasklet_schedule() is called, then tasklet is guaranteed
   3.156 +     to be executed on some cpu at least once after this.
   3.157 +   * If the tasklet is already scheduled, but its excecution is still not
   3.158 +     started, it will be executed only once.
   3.159 +   * If this tasklet is already running on another CPU (or schedule is called
   3.160 +     from tasklet itself), it is rescheduled for later.
   3.161 +   * Tasklet is strictly serialized wrt itself, but not
   3.162 +     wrt another tasklets. If client needs some intertask synchronization,
   3.163 +     he makes it with spinlocks.
   3.164 + */
   3.165 +
   3.166 +struct tasklet_struct
   3.167 +{
   3.168 +	struct tasklet_struct *next;
   3.169 +	unsigned long state;
   3.170 +	atomic_t count;
   3.171 +	void (*func)(unsigned long);
   3.172 +	unsigned long data;
   3.173 +};
   3.174 +
   3.175 +#define DECLARE_TASKLET(name, func, data) \
   3.176 +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
   3.177 +
   3.178 +#define DECLARE_TASKLET_DISABLED(name, func, data) \
   3.179 +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
   3.180 +
   3.181 +
   3.182 +enum
   3.183 +{
   3.184 +	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
   3.185 +	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
   3.186 +};
   3.187 +
   3.188 +#ifdef CONFIG_SMP
   3.189 +static inline int tasklet_trylock(struct tasklet_struct *t)
   3.190 +{
   3.191 +	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
   3.192 +}
   3.193 +
   3.194 +static inline void tasklet_unlock(struct tasklet_struct *t)
   3.195 +{
   3.196 +	smp_mb__before_clear_bit(); 
   3.197 +	clear_bit(TASKLET_STATE_RUN, &(t)->state);
   3.198 +}
   3.199 +
   3.200 +static inline void tasklet_unlock_wait(struct tasklet_struct *t)
   3.201 +{
   3.202 +	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
   3.203 +}
   3.204 +#else
   3.205 +#define tasklet_trylock(t) 1
   3.206 +#define tasklet_unlock_wait(t) do { } while (0)
   3.207 +#define tasklet_unlock(t) do { } while (0)
   3.208 +#endif
   3.209 +
   3.210 +extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
   3.211 +
   3.212 +static inline void tasklet_schedule(struct tasklet_struct *t)
   3.213 +{
   3.214 +	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
   3.215 +		__tasklet_schedule(t);
   3.216 +}
   3.217 +
   3.218 +extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
   3.219 +
   3.220 +static inline void tasklet_hi_schedule(struct tasklet_struct *t)
   3.221 +{
   3.222 +	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
   3.223 +		__tasklet_hi_schedule(t);
   3.224 +}
   3.225 +
   3.226 +
   3.227 +static inline void tasklet_disable_nosync(struct tasklet_struct *t)
   3.228 +{
   3.229 +	atomic_inc(&t->count);
   3.230 +	smp_mb__after_atomic_inc();
   3.231 +}
   3.232 +
   3.233 +static inline void tasklet_disable(struct tasklet_struct *t)
   3.234 +{
   3.235 +	tasklet_disable_nosync(t);
   3.236 +	tasklet_unlock_wait(t);
   3.237 +	smp_mb();
   3.238 +}
   3.239 +
   3.240 +static inline void tasklet_enable(struct tasklet_struct *t)
   3.241 +{
   3.242 +	smp_mb__before_atomic_dec();
   3.243 +	atomic_dec(&t->count);
   3.244 +}
   3.245 +
   3.246 +static inline void tasklet_hi_enable(struct tasklet_struct *t)
   3.247 +{
   3.248 +	smp_mb__before_atomic_dec();
   3.249 +	atomic_dec(&t->count);
   3.250 +}
   3.251 +
   3.252 +extern void tasklet_kill(struct tasklet_struct *t);
   3.253 +extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
   3.254 +extern void tasklet_init(struct tasklet_struct *t,
   3.255 +			 void (*func)(unsigned long), unsigned long data);
   3.256 +
   3.257 +/*
   3.258 + * Autoprobing for irqs:
   3.259 + *
   3.260 + * probe_irq_on() and probe_irq_off() provide robust primitives
   3.261 + * for accurate IRQ probing during kernel initialization.  They are
   3.262 + * reasonably simple to use, are not "fooled" by spurious interrupts,
   3.263 + * and, unlike other attempts at IRQ probing, they do not get hung on
   3.264 + * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
   3.265 + *
   3.266 + * For reasonably foolproof probing, use them as follows:
   3.267 + *
   3.268 + * 1. clear and/or mask the device's internal interrupt.
   3.269 + * 2. sti();
   3.270 + * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
   3.271 + * 4. enable the device and cause it to trigger an interrupt.
   3.272 + * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
   3.273 + * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
   3.274 + * 7. service the device to clear its pending interrupt.
   3.275 + * 8. loop again if paranoia is required.
   3.276 + *
   3.277 + * probe_irq_on() returns a mask of allocated irq's.
   3.278 + *
   3.279 + * probe_irq_off() takes the mask as a parameter,
   3.280 + * and returns the irq number which occurred,
   3.281 + * or zero if none occurred, or a negative irq number
   3.282 + * if more than one irq occurred.
   3.283 + */
   3.284 +
   3.285 +#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) 
   3.286 +static inline unsigned long probe_irq_on(void)
   3.287 +{
   3.288 +	return 0;
   3.289 +}
   3.290 +static inline int probe_irq_off(unsigned long val)
   3.291 +{
   3.292 +	return 0;
   3.293 +}
   3.294 +static inline unsigned int probe_irq_mask(unsigned long val)
   3.295 +{
   3.296 +	return 0;
   3.297 +}
   3.298 +#else
   3.299 +extern unsigned long probe_irq_on(void);	/* returns 0 on failure */
   3.300 +extern int probe_irq_off(unsigned long);	/* returns 0 or negative on failure */
   3.301 +extern unsigned int probe_irq_mask(unsigned long);	/* returns mask of ISA interrupts */
   3.302 +#endif
   3.303 +
   3.304 +#endif
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/linux-2.6-xen-sparse/kernel/irq/spurious.c	Thu Jun 08 10:11:04 2006 +0100
     4.3 @@ -0,0 +1,206 @@
     4.4 +/*
     4.5 + * linux/kernel/irq/spurious.c
     4.6 + *
     4.7 + * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
     4.8 + *
     4.9 + * This file contains spurious interrupt handling.
    4.10 + */
    4.11 +
    4.12 +#include <linux/irq.h>
    4.13 +#include <linux/module.h>
    4.14 +#include <linux/kallsyms.h>
    4.15 +#include <linux/interrupt.h>
    4.16 +
    4.17 +static int irqfixup;
    4.18 +
    4.19 +/*
    4.20 + * Recovery handler for misrouted interrupts.
    4.21 + */
    4.22 +
    4.23 +static int misrouted_irq(int irq, struct pt_regs *regs)
    4.24 +{
    4.25 +	int i;
    4.26 +	irq_desc_t *desc;
    4.27 +	int ok = 0;
    4.28 +	int work = 0;	/* Did we do work for a real IRQ */
    4.29 +
    4.30 +	for(i = 1; i < NR_IRQS; i++) {
    4.31 +		struct irqaction *action;
    4.32 +
    4.33 +		if (i == irq)	/* Already tried */
    4.34 +			continue;
    4.35 +		desc = &irq_desc[i];
    4.36 +		spin_lock(&desc->lock);
    4.37 +		action = desc->action;
    4.38 +		/* Already running on another processor */
    4.39 +		if (desc->status & IRQ_INPROGRESS) {
    4.40 +			/*
    4.41 +			 * Already running: If it is shared get the other
    4.42 +			 * CPU to go looking for our mystery interrupt too
    4.43 +			 */
    4.44 +			if (desc->action && (desc->action->flags & SA_SHIRQ))
    4.45 +				desc->status |= IRQ_PENDING;
    4.46 +			spin_unlock(&desc->lock);
    4.47 +			continue;
    4.48 +		}
    4.49 +		/* Honour the normal IRQ locking */
    4.50 +		desc->status |= IRQ_INPROGRESS;
    4.51 +		spin_unlock(&desc->lock);
    4.52 +		while (action) {
    4.53 +			/* Only shared IRQ handlers are safe to call */
    4.54 +			if (action->flags & SA_SHIRQ) {
    4.55 +				if (action->handler(i, action->dev_id, regs) ==
    4.56 +						IRQ_HANDLED)
    4.57 +					ok = 1;
    4.58 +			}
    4.59 +			action = action->next;
    4.60 +		}
    4.61 +		local_irq_disable();
    4.62 +		/* Now clean up the flags */
    4.63 +		spin_lock(&desc->lock);
    4.64 +		action = desc->action;
    4.65 +
    4.66 +		/*
    4.67 +		 * While we were looking for a fixup someone queued a real
    4.68 +		 * IRQ clashing with our walk
    4.69 +		 */
    4.70 +
    4.71 +		while ((desc->status & IRQ_PENDING) && action) {
    4.72 +			/*
    4.73 +			 * Perform real IRQ processing for the IRQ we deferred
    4.74 +			 */
    4.75 +			work = 1;
    4.76 +			spin_unlock(&desc->lock);
    4.77 +			handle_IRQ_event(i, regs, action);
    4.78 +			spin_lock(&desc->lock);
    4.79 +			desc->status &= ~IRQ_PENDING;
    4.80 +		}
    4.81 +		desc->status &= ~IRQ_INPROGRESS;
    4.82 +		/*
    4.83 +		 * If we did actual work for the real IRQ line we must let the
    4.84 +		 * IRQ controller clean up too
    4.85 +		 */
    4.86 +		if(work)
    4.87 +			desc->handler->end(i);
    4.88 +		spin_unlock(&desc->lock);
    4.89 +	}
    4.90 +	/* So the caller can adjust the irq error counts */
    4.91 +	return ok;
    4.92 +}
    4.93 +
    4.94 +/*
    4.95 + * If 99,900 of the previous 100,000 interrupts have not been handled
    4.96 + * then assume that the IRQ is stuck in some manner. Drop a diagnostic
    4.97 + * and try to turn the IRQ off.
    4.98 + *
    4.99 + * (The other 100-of-100,000 interrupts may have been a correctly
   4.100 + *  functioning device sharing an IRQ with the failing one)
   4.101 + *
   4.102 + * Called under desc->lock
   4.103 + */
   4.104 +
   4.105 +static void
   4.106 +__report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
   4.107 +{
   4.108 +	struct irqaction *action;
   4.109 +
   4.110 +	if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
   4.111 +		printk(KERN_ERR "irq event %d: bogus return value %x\n",
   4.112 +				irq, action_ret);
   4.113 +	} else {
   4.114 +		printk(KERN_ERR "irq %d: nobody cared (try booting with "
   4.115 +				"the \"irqpoll\" option)\n", irq);
   4.116 +	}
   4.117 +	dump_stack();
   4.118 +	printk(KERN_ERR "handlers:\n");
   4.119 +	action = desc->action;
   4.120 +	while (action) {
   4.121 +		printk(KERN_ERR "[<%p>]", action->handler);
   4.122 +		print_symbol(" (%s)",
   4.123 +			(unsigned long)action->handler);
   4.124 +		printk("\n");
   4.125 +		action = action->next;
   4.126 +	}
   4.127 +}
   4.128 +
   4.129 +static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
   4.130 +{
   4.131 +	static int count = 100;
   4.132 +
   4.133 +	if (count > 0) {
   4.134 +		count--;
   4.135 +		__report_bad_irq(irq, desc, action_ret);
   4.136 +	}
   4.137 +}
   4.138 +
   4.139 +void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
   4.140 +			struct pt_regs *regs)
   4.141 +{
   4.142 +	if (action_ret != IRQ_HANDLED) {
   4.143 +		if (!irq_ignore_unhandled(irq))
   4.144 +			desc->irqs_unhandled++;
   4.145 +		if (action_ret != IRQ_NONE)
   4.146 +			report_bad_irq(irq, desc, action_ret);
   4.147 +	}
   4.148 +
   4.149 +	if (unlikely(irqfixup)) {
   4.150 +		/* Don't punish working computers */
   4.151 +		if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) {
   4.152 +			int ok = misrouted_irq(irq, regs);
   4.153 +			if (action_ret == IRQ_NONE)
   4.154 +				desc->irqs_unhandled -= ok;
   4.155 +		}
   4.156 +	}
   4.157 +
   4.158 +	desc->irq_count++;
   4.159 +	if (desc->irq_count < 100000)
   4.160 +		return;
   4.161 +
   4.162 +	desc->irq_count = 0;
   4.163 +	if (desc->irqs_unhandled > 99900) {
   4.164 +		/*
   4.165 +		 * The interrupt is stuck
   4.166 +		 */
   4.167 +		__report_bad_irq(irq, desc, action_ret);
   4.168 +		/*
   4.169 +		 * Now kill the IRQ
   4.170 +		 */
   4.171 +		printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
   4.172 +		desc->status |= IRQ_DISABLED;
   4.173 +		desc->handler->disable(irq);
   4.174 +	}
   4.175 +	desc->irqs_unhandled = 0;
   4.176 +}
   4.177 +
   4.178 +int noirqdebug;
   4.179 +
   4.180 +int __init noirqdebug_setup(char *str)
   4.181 +{
   4.182 +	noirqdebug = 1;
   4.183 +	printk(KERN_INFO "IRQ lockup detection disabled\n");
   4.184 +	return 1;
   4.185 +}
   4.186 +
   4.187 +__setup("noirqdebug", noirqdebug_setup);
   4.188 +
   4.189 +static int __init irqfixup_setup(char *str)
   4.190 +{
   4.191 +	irqfixup = 1;
   4.192 +	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
   4.193 +	printk(KERN_WARNING "This may impact system performance.\n");
   4.194 +	return 1;
   4.195 +}
   4.196 +
   4.197 +__setup("irqfixup", irqfixup_setup);
   4.198 +
   4.199 +static int __init irqpoll_setup(char *str)
   4.200 +{
   4.201 +	irqfixup = 2;
   4.202 +	printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
   4.203 +				"enabled\n");
   4.204 +	printk(KERN_WARNING "This may significantly impact system "
   4.205 +				"performance\n");
   4.206 +	return 1;
   4.207 +}
   4.208 +
   4.209 +__setup("irqpoll", irqpoll_setup);