ia64/xen-unstable

changeset 9508:db2bd8169e9b

[IA64] Cleanup to irq.c

This patch is a big cleanup to irq.c, with only real
necessary interfaces kept now. The irq subsystem of
xen is very simple since most external devices are
controlled by dom0. Also especially there's no need
to keep CONFIG_XEN everywhere, since this file is
already out of sync of linux version for a long time.
Actually xen defines irq related structures differently
and it's worthy of creating a xen specific version here.

Another change of this patch is to remove ugly check
upon timer/ipi within irq handler. Instead now we'll
check IRQ_GUEST flag of irq_desc to decide whether
to deliver irq to guest. Now all vectors un-registered
are set with IRQ_GUEST enabled, which is identical to
current assumption. Later this flag should be set only
per guest's request. Then Xen_do_irq is not reuqired
as the result of this change.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 11 13:55:36 2006 -0600 (2006-04-11)
parents 67b24fc635ae
children 52fa146a30ae
files xen/arch/ia64/linux-xen/irq_ia64.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/irq.c xen/arch/ia64/xen/xenirq.c
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/irq_ia64.c	Tue Apr 11 13:54:58 2006 -0600
     1.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c	Tue Apr 11 13:55:36 2006 -0600
     1.3 @@ -150,9 +150,6 @@ ia64_handle_irq (ia64_vector vector, str
     1.4  			ia64_setreg(_IA64_REG_CR_TPR, vector);
     1.5  			ia64_srlz_d();
     1.6  
     1.7 -#ifdef XEN
     1.8 -			if (!xen_do_IRQ(vector))
     1.9 -#endif
    1.10  			__do_IRQ(local_vector_to_irq(vector), regs);
    1.11  
    1.12  			/*
     2.1 --- a/xen/arch/ia64/xen/domain.c	Tue Apr 11 13:54:58 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Tue Apr 11 13:55:36 2006 -0600
     2.3 @@ -1081,7 +1081,7 @@ int construct_dom0(struct domain *d,
     2.4  	sync_split_caches();
     2.5  
     2.6  	// FIXME: Hack for keyboard input
     2.7 -	serial_input_init();
     2.8 +	//serial_input_init();
     2.9  
    2.10  	return 0;
    2.11  }
     3.1 --- a/xen/arch/ia64/xen/irq.c	Tue Apr 11 13:54:58 2006 -0600
     3.2 +++ b/xen/arch/ia64/xen/irq.c	Tue Apr 11 13:55:36 2006 -0600
     3.3 @@ -26,59 +26,30 @@
     3.4  #include <linux/config.h>
     3.5  #include <linux/errno.h>
     3.6  #include <linux/module.h>
     3.7 -#ifndef XEN
     3.8 -#include <linux/signal.h>
     3.9 -#endif
    3.10  #include <linux/sched.h>
    3.11  #include <linux/ioport.h>
    3.12  #include <linux/interrupt.h>
    3.13  #include <linux/timex.h>
    3.14  #include <linux/slab.h>
    3.15 -#ifndef XEN
    3.16 -#include <linux/random.h>
    3.17 -#include <linux/cpu.h>
    3.18 -#endif
    3.19  #include <linux/ctype.h>
    3.20 -#ifndef XEN
    3.21 -#include <linux/smp_lock.h>
    3.22 -#endif
    3.23  #include <linux/init.h>
    3.24 -#ifndef XEN
    3.25 -#include <linux/kernel_stat.h>
    3.26 -#endif
    3.27  #include <linux/irq.h>
    3.28 -#ifndef XEN
    3.29 -#include <linux/proc_fs.h>
    3.30 -#endif
    3.31  #include <linux/seq_file.h>
    3.32 -#ifndef XEN
    3.33 -#include <linux/kallsyms.h>
    3.34 -#include <linux/notifier.h>
    3.35 -#endif
    3.36  
    3.37  #include <asm/atomic.h>
    3.38 -#ifndef XEN
    3.39 -#include <asm/cpu.h>
    3.40 -#endif
    3.41  #include <asm/io.h>
    3.42  #include <asm/smp.h>
    3.43  #include <asm/system.h>
    3.44  #include <asm/bitops.h>
    3.45  #include <asm/uaccess.h>
    3.46  #include <asm/pgalloc.h>
    3.47 -#ifndef XEN
    3.48 -#include <asm/tlbflush.h>
    3.49 -#endif
    3.50  #include <asm/delay.h>
    3.51  #include <asm/irq.h>
    3.52  
    3.53 -#ifdef XEN
    3.54  #include <xen/event.h>
    3.55  #define _irq_desc irq_desc
    3.56  #define irq_descp(irq) &irq_desc[irq]
    3.57  #define apicid_to_phys_cpu_present(x)	1
    3.58 -#endif
    3.59 -
    3.60  
    3.61  /*
    3.62   * Linux has a controller-independent x86 interrupt architecture.
    3.63 @@ -101,48 +72,17 @@
    3.64   */
    3.65  irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = {
    3.66  	[0 ... NR_IRQS-1] = {
    3.67 -		.status = IRQ_DISABLED,
    3.68 +		.status = IRQ_DISABLED | IRQ_GUEST,
    3.69  		.handler = &no_irq_type,
    3.70  		.lock = SPIN_LOCK_UNLOCKED
    3.71  	}
    3.72  };
    3.73  
    3.74  /*
    3.75 - * This is updated when the user sets irq affinity via /proc
    3.76 - */
    3.77 -cpumask_t    __cacheline_aligned pending_irq_cpumask[NR_IRQS];
    3.78 -
    3.79 -#ifdef CONFIG_IA64_GENERIC
    3.80 -irq_desc_t * __ia64_irq_desc (unsigned int irq)
    3.81 -{
    3.82 -	return _irq_desc + irq;
    3.83 -}
    3.84 -
    3.85 -ia64_vector __ia64_irq_to_vector (unsigned int irq)
    3.86 -{
    3.87 -	return (ia64_vector) irq;
    3.88 -}
    3.89 -
    3.90 -unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
    3.91 -{
    3.92 -	return (unsigned int) vec;
    3.93 -}
    3.94 -#endif
    3.95 -
    3.96 -#ifndef XEN
    3.97 -static void register_irq_proc (unsigned int irq);
    3.98 -#endif
    3.99 -
   3.100 -/*
   3.101   * Special irq handlers.
   3.102   */
   3.103  
   3.104 -#ifdef XEN
   3.105  void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
   3.106 -#else
   3.107 -irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
   3.108 -{ return IRQ_NONE; }
   3.109 -#endif
   3.110  
   3.111  /*
   3.112   * Generic no controller code
   3.113 @@ -158,23 +98,7 @@ static void ack_none(unsigned int irq)
   3.114   * each architecture has to answer this themselves, it doesn't deserve
   3.115   * a generic callback i think.
   3.116   */
   3.117 -#ifdef CONFIG_X86
   3.118 -	printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
   3.119 -#ifdef CONFIG_X86_LOCAL_APIC
   3.120 -	/*
   3.121 -	 * Currently unexpected vectors happen only on SMP and APIC.
   3.122 -	 * We _must_ ack these because every local APIC has only N
   3.123 -	 * irq slots per priority level, and a 'hanging, unacked' IRQ
   3.124 -	 * holds up an irq slot - in excessive cases (when multiple
   3.125 -	 * unexpected vectors occur) that might lock up the APIC
   3.126 -	 * completely.
   3.127 -	 */
   3.128 -	ack_APIC_irq();
   3.129 -#endif
   3.130 -#endif
   3.131 -#ifdef CONFIG_IA64
   3.132  	printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
   3.133 -#endif
   3.134  }
   3.135  
   3.136  /* startup is the same as "enable", shutdown is same as "disable" */
   3.137 @@ -192,161 +116,15 @@ struct hw_interrupt_type no_irq_type = {
   3.138  };
   3.139  
   3.140  atomic_t irq_err_count;
   3.141 -#ifdef CONFIG_X86_IO_APIC
   3.142 -#ifdef APIC_MISMATCH_DEBUG
   3.143 -atomic_t irq_mis_count;
   3.144 -#endif
   3.145 -#endif
   3.146  
   3.147 -/*
   3.148 - * Generic, controller-independent functions:
   3.149 +/* Some placeholder here, which are used by other files and we
   3.150 + * don't want to change too much now. Later they should be cleaned.
   3.151   */
   3.152 -
   3.153 -#ifndef XEN
   3.154 -int show_interrupts(struct seq_file *p, void *v)
   3.155 -{
   3.156 -	int j, i = *(loff_t *) v;
   3.157 -	struct irqaction * action;
   3.158 -	irq_desc_t *idesc;
   3.159 -	unsigned long flags;
   3.160 -
   3.161 -	if (i == 0) {
   3.162 -		seq_puts(p, "           ");
   3.163 -		for (j=0; j<NR_CPUS; j++)
   3.164 -			if (cpu_online(j))
   3.165 -				seq_printf(p, "CPU%d       ",j);
   3.166 -		seq_putc(p, '\n');
   3.167 -	}
   3.168 -
   3.169 -	if (i < NR_IRQS) {
   3.170 -		idesc = irq_descp(i);
   3.171 -		spin_lock_irqsave(&idesc->lock, flags);
   3.172 -		action = idesc->action;
   3.173 -		if (!action)
   3.174 -			goto skip;
   3.175 -		seq_printf(p, "%3d: ",i);
   3.176 -#ifndef CONFIG_SMP
   3.177 -		seq_printf(p, "%10u ", kstat_irqs(i));
   3.178 -#else
   3.179 -		for (j = 0; j < NR_CPUS; j++)
   3.180 -			if (cpu_online(j))
   3.181 -				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
   3.182 -#endif
   3.183 -		seq_printf(p, " %14s", idesc->handler->typename);
   3.184 -		seq_printf(p, "  %s", action->name);
   3.185 -
   3.186 -		for (action=action->next; action; action = action->next)
   3.187 -			seq_printf(p, ", %s", action->name);
   3.188 -
   3.189 -		seq_putc(p, '\n');
   3.190 -skip:
   3.191 -		spin_unlock_irqrestore(&idesc->lock, flags);
   3.192 -	} else if (i == NR_IRQS) {
   3.193 -		seq_puts(p, "NMI: ");
   3.194 -		for (j = 0; j < NR_CPUS; j++)
   3.195 -			if (cpu_online(j))
   3.196 -				seq_printf(p, "%10u ", nmi_count(j));
   3.197 -		seq_putc(p, '\n');
   3.198 -#ifdef CONFIG_X86_LOCAL_APIC
   3.199 -		seq_puts(p, "LOC: ");
   3.200 -		for (j = 0; j < NR_CPUS; j++)
   3.201 -			if (cpu_online(j))
   3.202 -				seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
   3.203 -		seq_putc(p, '\n');
   3.204 -#endif
   3.205 -		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
   3.206 -#ifdef CONFIG_X86_IO_APIC
   3.207 -#ifdef APIC_MISMATCH_DEBUG
   3.208 -		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
   3.209 -#endif
   3.210 -#endif
   3.211 -	}
   3.212 -	return 0;
   3.213 -}
   3.214 -#endif
   3.215 -
   3.216  #ifdef CONFIG_SMP
   3.217 -inline void synchronize_irq(unsigned int irq)
   3.218 -{
   3.219 -#ifndef XEN
   3.220 -	struct irq_desc *desc = irq_desc + irq;
   3.221 -
   3.222 -	while (desc->status & IRQ_INPROGRESS)
   3.223 -		cpu_relax();
   3.224 -#endif
   3.225 -}
   3.226 +inline void synchronize_irq(unsigned int irq) {}
   3.227  EXPORT_SYMBOL(synchronize_irq);
   3.228  #endif
   3.229  
   3.230 -/*
   3.231 - * This should really return information about whether
   3.232 - * we should do bottom half handling etc. Right now we
   3.233 - * end up _always_ checking the bottom half, which is a
   3.234 - * waste of time and is not what some drivers would
   3.235 - * prefer.
   3.236 - */
   3.237 -int handle_IRQ_event(unsigned int irq,
   3.238 -		struct pt_regs *regs, struct irqaction *action)
   3.239 -{
   3.240 -#ifndef XEN
   3.241 -	int status = 1;	/* Force the "do bottom halves" bit */
   3.242 -#endif
   3.243 -	int retval = 0;
   3.244 -
   3.245 -#ifndef XEN
   3.246 -	if (!(action->flags & SA_INTERRUPT))
   3.247 -#endif
   3.248 -		local_irq_enable();
   3.249 -
   3.250 -#ifdef XEN
   3.251 -		action->handler(irq, action->dev_id, regs);
   3.252 -#else
   3.253 -	do {
   3.254 -		status |= action->flags;
   3.255 -		retval |= action->handler(irq, action->dev_id, regs);
   3.256 -		action = action->next;
   3.257 -	} while (action);
   3.258 -	if (status & SA_SAMPLE_RANDOM)
   3.259 -		add_interrupt_randomness(irq);
   3.260 -#endif
   3.261 -	local_irq_disable();
   3.262 -	return retval;
   3.263 -}
   3.264 -
   3.265 -#ifndef XEN
   3.266 -static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
   3.267 -{
   3.268 -	struct irqaction *action;
   3.269 -
   3.270 -	if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
   3.271 -		printk(KERN_ERR "irq event %d: bogus return value %x\n",
   3.272 -				irq, action_ret);
   3.273 -	} else {
   3.274 -		printk(KERN_ERR "irq %d: nobody cared!\n", irq);
   3.275 -	}
   3.276 -	dump_stack();
   3.277 -	printk(KERN_ERR "handlers:\n");
   3.278 -	action = desc->action;
   3.279 -	do {
   3.280 -		printk(KERN_ERR "[<%p>]", action->handler);
   3.281 -		print_symbol(" (%s)",
   3.282 -			(unsigned long)action->handler);
   3.283 -		printk("\n");
   3.284 -		action = action->next;
   3.285 -	} while (action);
   3.286 -}
   3.287 -
   3.288 -static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
   3.289 -{
   3.290 -	static int count = 100;
   3.291 -
   3.292 -	if (count) {
   3.293 -		count--;
   3.294 -		__report_bad_irq(irq, desc, action_ret);
   3.295 -	}
   3.296 -}
   3.297 -#endif
   3.298 -
   3.299  static int noirqdebug;
   3.300  
   3.301  static int __init noirqdebug_setup(char *str)
   3.302 @@ -359,142 +137,12 @@ static int __init noirqdebug_setup(char 
   3.303  __setup("noirqdebug", noirqdebug_setup);
   3.304  
   3.305  /*
   3.306 - * If 99,900 of the previous 100,000 interrupts have not been handled then
   3.307 - * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
   3.308 - * turn the IRQ off.
   3.309 - *
   3.310 - * (The other 100-of-100,000 interrupts may have been a correctly-functioning
   3.311 - *  device sharing an IRQ with the failing one)
   3.312 - *
   3.313 - * Called under desc->lock
   3.314 - */
   3.315 -#ifndef XEN
   3.316 -static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
   3.317 -{
   3.318 -	if (action_ret != IRQ_HANDLED) {
   3.319 -		desc->irqs_unhandled++;
   3.320 -		if (action_ret != IRQ_NONE)
   3.321 -			report_bad_irq(irq, desc, action_ret);
   3.322 -	}
   3.323 -
   3.324 -	desc->irq_count++;
   3.325 -	if (desc->irq_count < 100000)
   3.326 -		return;
   3.327 -
   3.328 -	desc->irq_count = 0;
   3.329 -	if (desc->irqs_unhandled > 99900) {
   3.330 -		/*
   3.331 -		 * The interrupt is stuck
   3.332 -		 */
   3.333 -		__report_bad_irq(irq, desc, action_ret);
   3.334 -		/*
   3.335 -		 * Now kill the IRQ
   3.336 -		 */
   3.337 -		printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
   3.338 -		desc->status |= IRQ_DISABLED;
   3.339 -		desc->handler->disable(irq);
   3.340 -	}
   3.341 -	desc->irqs_unhandled = 0;
   3.342 -}
   3.343 -#endif
   3.344 -
   3.345 -/*
   3.346   * Generic enable/disable code: this just calls
   3.347   * down into the PIC-specific version for the actual
   3.348   * hardware disable after having gotten the irq
   3.349   * controller lock.
   3.350   */
   3.351  
   3.352 -/**
   3.353 - *	disable_irq_nosync - disable an irq without waiting
   3.354 - *	@irq: Interrupt to disable
   3.355 - *
   3.356 - *	Disable the selected interrupt line.  Disables and Enables are
   3.357 - *	nested.
   3.358 - *	Unlike disable_irq(), this function does not ensure existing
   3.359 - *	instances of the IRQ handler have completed before returning.
   3.360 - *
   3.361 - *	This function may be called from IRQ context.
   3.362 - */
   3.363 -
   3.364 -inline void disable_irq_nosync(unsigned int irq)
   3.365 -{
   3.366 -	irq_desc_t *desc = irq_descp(irq);
   3.367 -	unsigned long flags;
   3.368 -
   3.369 -	spin_lock_irqsave(&desc->lock, flags);
   3.370 -	if (!desc->depth++) {
   3.371 -		desc->status |= IRQ_DISABLED;
   3.372 -		desc->handler->disable(irq);
   3.373 -	}
   3.374 -	spin_unlock_irqrestore(&desc->lock, flags);
   3.375 -}
   3.376 -EXPORT_SYMBOL(disable_irq_nosync);
   3.377 -
   3.378 -/**
   3.379 - *	disable_irq - disable an irq and wait for completion
   3.380 - *	@irq: Interrupt to disable
   3.381 - *
   3.382 - *	Disable the selected interrupt line.  Enables and Disables are
   3.383 - *	nested.
   3.384 - *	This function waits for any pending IRQ handlers for this interrupt
   3.385 - *	to complete before returning. If you use this function while
   3.386 - *	holding a resource the IRQ handler may need you will deadlock.
   3.387 - *
   3.388 - *	This function may be called - with care - from IRQ context.
   3.389 - */
   3.390 -
   3.391 -void disable_irq(unsigned int irq)
   3.392 -{
   3.393 -	irq_desc_t *desc = irq_descp(irq);
   3.394 -
   3.395 -	disable_irq_nosync(irq);
   3.396 -	if (desc->action)
   3.397 -		synchronize_irq(irq);
   3.398 -}
   3.399 -EXPORT_SYMBOL(disable_irq);
   3.400 -
   3.401 -/**
   3.402 - *	enable_irq - enable handling of an irq
   3.403 - *	@irq: Interrupt to enable
   3.404 - *
   3.405 - *	Undoes the effect of one call to disable_irq().  If this
   3.406 - *	matches the last disable, processing of interrupts on this
   3.407 - *	IRQ line is re-enabled.
   3.408 - *
   3.409 - *	This function may be called from IRQ context.
   3.410 - */
   3.411 -
   3.412 -void enable_irq(unsigned int irq)
   3.413 -{
   3.414 -	irq_desc_t *desc = irq_descp(irq);
   3.415 -	unsigned long flags;
   3.416 -
   3.417 -	spin_lock_irqsave(&desc->lock, flags);
   3.418 -	switch (desc->depth) {
   3.419 -	case 1: {
   3.420 -		unsigned int status = desc->status & ~IRQ_DISABLED;
   3.421 -		desc->status = status;
   3.422 -#ifndef XEN
   3.423 -		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
   3.424 -			desc->status = status | IRQ_REPLAY;
   3.425 -			hw_resend_irq(desc->handler,irq);
   3.426 -		}
   3.427 -#endif
   3.428 -		desc->handler->enable(irq);
   3.429 -		/* fall-through */
   3.430 -	}
   3.431 -	default:
   3.432 -		desc->depth--;
   3.433 -		break;
   3.434 -	case 0:
   3.435 -		printk(KERN_ERR "enable_irq(%u) unbalanced from %p\n",
   3.436 -		       irq, (void *) __builtin_return_address(0));
   3.437 -	}
   3.438 -	spin_unlock_irqrestore(&desc->lock, flags);
   3.439 -}
   3.440 -EXPORT_SYMBOL(enable_irq);
   3.441 -
   3.442  /*
   3.443   * do_IRQ handles all normal device IRQ's (the special
   3.444   * SMP cross-CPU interrupts have their own specific
   3.445 @@ -506,36 +154,30 @@ fastcall unsigned int __do_IRQ(unsigned 
   3.446  	struct irqaction * action;
   3.447  	unsigned int status;
   3.448  
   3.449 -#ifndef XEN
   3.450 -	kstat_this_cpu.irqs[irq]++;
   3.451 -#endif
   3.452 -	if (desc->status & IRQ_PER_CPU) {
   3.453 -		irqreturn_t action_ret;
   3.454 -
   3.455 +	if (likely(desc->status & IRQ_PER_CPU)) {
   3.456  		/*
   3.457  		 * No locking required for CPU-local interrupts:
   3.458  		 */
   3.459  		desc->handler->ack(irq);
   3.460 -		action_ret = handle_IRQ_event(irq, regs, desc->action);
   3.461 -#ifndef XEN
   3.462 -		if (!noirqdebug)
   3.463 -			note_interrupt(irq, desc, action_ret);
   3.464 -#endif
   3.465 +		local_irq_enable();
   3.466 +		desc->action->handler(irq, desc->action->dev_id, regs);
   3.467 +		local_irq_disable();
   3.468  		desc->handler->end(irq);
   3.469  		return 1;
   3.470  	}
   3.471  
   3.472  	spin_lock(&desc->lock);
   3.473 +
   3.474 +	if (desc->status & IRQ_GUEST) {
   3.475 +		/* __do_IRQ_guest(irq); */
   3.476 +		vcpu_pend_interrupt(dom0->vcpu[0],irq);
   3.477 +		vcpu_wake(dom0->vcpu[0]);
   3.478 +		spin_unlock(&desc->lock);
   3.479 +		return 1;
   3.480 +	}
   3.481 +
   3.482  	desc->handler->ack(irq);
   3.483 -	/*
   3.484 -	 * REPLAY is when Linux resends an IRQ that was dropped earlier
   3.485 -	 * WAITING is used by probe to mark irqs that are being tested
   3.486 -	 */
   3.487 -#ifdef XEN
   3.488  	status = desc->status & ~IRQ_REPLAY;
   3.489 -#else
   3.490 -	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
   3.491 -#endif
   3.492  	status |= IRQ_PENDING; /* we _want_ to handle it */
   3.493  
   3.494  	/*
   3.495 @@ -570,19 +212,13 @@ fastcall unsigned int __do_IRQ(unsigned 
   3.496  	 * SMP environment.
   3.497  	 */
   3.498  	for (;;) {
   3.499 -		irqreturn_t action_ret;
   3.500 -
   3.501 -		spin_unlock(&desc->lock);
   3.502 -
   3.503 -		action_ret = handle_IRQ_event(irq, regs, action);
   3.504 +		spin_unlock_irq(&desc->lock);
   3.505 +		action->handler(irq, action->dev_id, regs);
   3.506 +		spin_lock_irq(&desc->lock);
   3.507  
   3.508 -		spin_lock(&desc->lock);
   3.509 -#ifndef XEN
   3.510 -		if (!noirqdebug)
   3.511 -			note_interrupt(irq, desc, action_ret);
   3.512 -#endif
   3.513  		if (likely(!(desc->status & IRQ_PENDING)))
   3.514  			break;
   3.515 +
   3.516  		desc->status &= ~IRQ_PENDING;
   3.517  	}
   3.518  	desc->status &= ~IRQ_INPROGRESS;
   3.519 @@ -639,7 +275,6 @@ int request_irq(unsigned int irq,
   3.520  	int retval;
   3.521  	struct irqaction * action;
   3.522  
   3.523 -#if 1
   3.524  	/*
   3.525  	 * Sanity-check: shared interrupts should REALLY pass in
   3.526  	 * a real dev-ID, otherwise we'll have trouble later trying
   3.527 @@ -650,7 +285,6 @@ int request_irq(unsigned int irq,
   3.528  		if (!dev_id)
   3.529  			printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
   3.530  	}
   3.531 -#endif
   3.532  
   3.533  	if (irq >= NR_IRQS)
   3.534  		return -EINVAL;
   3.535 @@ -661,17 +295,8 @@ int request_irq(unsigned int irq,
   3.536  	if (!action)
   3.537  		return -ENOMEM;
   3.538  
   3.539 -#ifdef XEN
   3.540  	action->handler = (void *) handler;
   3.541 -#else
   3.542 -	action->handler = handler;
   3.543 -	action->flags = irqflags;
   3.544 -	action->mask = 0;
   3.545 -#endif
   3.546  	action->name = devname;
   3.547 -#ifndef XEN
   3.548 -	action->next = NULL;
   3.549 -#endif
   3.550  	action->dev_id = dev_id;
   3.551  
   3.552  	retval = setup_irq(irq, action);
   3.553 @@ -697,16 +322,9 @@ EXPORT_SYMBOL(request_irq);
   3.554   *	This function must not be called from interrupt context.
   3.555   */
   3.556  
   3.557 -#ifdef XEN
   3.558  void free_irq(unsigned int irq)
   3.559 -#else
   3.560 -void free_irq(unsigned int irq, void *dev_id)
   3.561 -#endif
   3.562  {
   3.563  	irq_desc_t *desc;
   3.564 -#ifndef XEN
   3.565 -	struct irqaction **p;
   3.566 -#endif
   3.567  	unsigned long flags;
   3.568  
   3.569  	if (irq >= NR_IRQS)
   3.570 @@ -714,29 +332,11 @@ void free_irq(unsigned int irq, void *de
   3.571  
   3.572  	desc = irq_descp(irq);
   3.573  	spin_lock_irqsave(&desc->lock,flags);
   3.574 -#ifdef XEN
   3.575  	if (desc->action) {
   3.576  		struct irqaction * action = desc->action;
   3.577  		desc->action = NULL;
   3.578 -#else
   3.579 -	p = &desc->action;
   3.580 -	for (;;) {
   3.581 -		struct irqaction * action = *p;
   3.582 -		if (action) {
   3.583 -			struct irqaction **pp = p;
   3.584 -			p = &action->next;
   3.585 -			if (action->dev_id != dev_id)
   3.586 -				continue;
   3.587 -
   3.588 -			/* Found it - now remove it from the list of entries */
   3.589 -			*pp = action->next;
   3.590 -			if (!desc->action) {
   3.591 -#endif
   3.592  				desc->status |= IRQ_DISABLED;
   3.593  				desc->handler->shutdown(irq);
   3.594 -#ifndef XEN
   3.595 -			}
   3.596 -#endif
   3.597  			spin_unlock_irqrestore(&desc->lock,flags);
   3.598  
   3.599  			/* Wait to make sure it's not being used on another CPU */
   3.600 @@ -746,10 +346,6 @@ void free_irq(unsigned int irq, void *de
   3.601  		}
   3.602  		printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
   3.603  		spin_unlock_irqrestore(&desc->lock,flags);
   3.604 -#ifndef XEN
   3.605 -		return;
   3.606 -	}
   3.607 -#endif
   3.608  }
   3.609  
   3.610  EXPORT_SYMBOL(free_irq);
   3.611 @@ -763,569 +359,32 @@ EXPORT_SYMBOL(free_irq);
   3.612   * disabled.
   3.613   */
   3.614  
   3.615 -#ifndef XEN
   3.616 -static int DECLARE_MUTEX(probe_sem);
   3.617 -
   3.618 -/**
   3.619 - *	probe_irq_on	- begin an interrupt autodetect
   3.620 - *
   3.621 - *	Commence probing for an interrupt. The interrupts are scanned
   3.622 - *	and a mask of potential interrupt lines is returned.
   3.623 - *
   3.624 - */
   3.625 -
   3.626 -unsigned long probe_irq_on(void)
   3.627 -{
   3.628 -	unsigned int i;
   3.629 -	irq_desc_t *desc;
   3.630 -	unsigned long val;
   3.631 -	unsigned long delay;
   3.632 -
   3.633 -	down(&probe_sem);
   3.634 -	/*
   3.635 -	 * something may have generated an irq long ago and we want to
   3.636 -	 * flush such a longstanding irq before considering it as spurious.
   3.637 -	 */
   3.638 -	for (i = NR_IRQS-1; i > 0; i--)  {
   3.639 -		desc = irq_descp(i);
   3.640 -
   3.641 -		spin_lock_irq(&desc->lock);
   3.642 -		if (!desc->action)
   3.643 -			desc->handler->startup(i);
   3.644 -		spin_unlock_irq(&desc->lock);
   3.645 -	}
   3.646 -
   3.647 -	/* Wait for longstanding interrupts to trigger. */
   3.648 -	for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
   3.649 -		/* about 20ms delay */ barrier();
   3.650 -
   3.651 -	/*
   3.652 -	 * enable any unassigned irqs
   3.653 -	 * (we must startup again here because if a longstanding irq
   3.654 -	 * happened in the previous stage, it may have masked itself)
   3.655 -	 */
   3.656 -	for (i = NR_IRQS-1; i > 0; i--) {
   3.657 -		desc = irq_descp(i);
   3.658 -
   3.659 -		spin_lock_irq(&desc->lock);
   3.660 -		if (!desc->action) {
   3.661 -			desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
   3.662 -			if (desc->handler->startup(i))
   3.663 -				desc->status |= IRQ_PENDING;
   3.664 -		}
   3.665 -		spin_unlock_irq(&desc->lock);
   3.666 -	}
   3.667 -
   3.668 -	/*
   3.669 -	 * Wait for spurious interrupts to trigger
   3.670 -	 */
   3.671 -	for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
   3.672 -		/* about 100ms delay */ barrier();
   3.673 -
   3.674 -	/*
   3.675 -	 * Now filter out any obviously spurious interrupts
   3.676 -	 */
   3.677 -	val = 0;
   3.678 -	for (i = 0; i < NR_IRQS; i++) {
   3.679 -		irq_desc_t *desc = irq_descp(i);
   3.680 -		unsigned int status;
   3.681 -
   3.682 -		spin_lock_irq(&desc->lock);
   3.683 -		status = desc->status;
   3.684 -
   3.685 -		if (status & IRQ_AUTODETECT) {
   3.686 -			/* It triggered already - consider it spurious. */
   3.687 -			if (!(status & IRQ_WAITING)) {
   3.688 -				desc->status = status & ~IRQ_AUTODETECT;
   3.689 -				desc->handler->shutdown(i);
   3.690 -			} else
   3.691 -				if (i < 32)
   3.692 -					val |= 1 << i;
   3.693 -		}
   3.694 -		spin_unlock_irq(&desc->lock);
   3.695 -	}
   3.696 -
   3.697 -	return val;
   3.698 -}
   3.699 -
   3.700 -EXPORT_SYMBOL(probe_irq_on);
   3.701 -
   3.702 -/**
   3.703 - *	probe_irq_mask - scan a bitmap of interrupt lines
   3.704 - *	@val:	mask of interrupts to consider
   3.705 - *
   3.706 - *	Scan the ISA bus interrupt lines and return a bitmap of
   3.707 - *	active interrupts. The interrupt probe logic state is then
   3.708 - *	returned to its previous value.
   3.709 - *
   3.710 - *	Note: we need to scan all the irq's even though we will
   3.711 - *	only return ISA irq numbers - just so that we reset them
   3.712 - *	all to a known state.
   3.713 - */
   3.714 -unsigned int probe_irq_mask(unsigned long val)
   3.715 -{
   3.716 -	int i;
   3.717 -	unsigned int mask;
   3.718 -
   3.719 -	mask = 0;
   3.720 -	for (i = 0; i < 16; i++) {
   3.721 -		irq_desc_t *desc = irq_descp(i);
   3.722 -		unsigned int status;
   3.723 -
   3.724 -		spin_lock_irq(&desc->lock);
   3.725 -		status = desc->status;
   3.726 -
   3.727 -		if (status & IRQ_AUTODETECT) {
   3.728 -			if (!(status & IRQ_WAITING))
   3.729 -				mask |= 1 << i;
   3.730 -
   3.731 -			desc->status = status & ~IRQ_AUTODETECT;
   3.732 -			desc->handler->shutdown(i);
   3.733 -		}
   3.734 -		spin_unlock_irq(&desc->lock);
   3.735 -	}
   3.736 -	up(&probe_sem);
   3.737 -
   3.738 -	return mask & val;
   3.739 -}
   3.740 -EXPORT_SYMBOL(probe_irq_mask);
   3.741 -
   3.742 -/**
   3.743 - *	probe_irq_off	- end an interrupt autodetect
   3.744 - *	@val: mask of potential interrupts (unused)
   3.745 - *
   3.746 - *	Scans the unused interrupt lines and returns the line which
   3.747 - *	appears to have triggered the interrupt. If no interrupt was
   3.748 - *	found then zero is returned. If more than one interrupt is
   3.749 - *	found then minus the first candidate is returned to indicate
   3.750 - *	their is doubt.
   3.751 - *
   3.752 - *	The interrupt probe logic state is returned to its previous
   3.753 - *	value.
   3.754 - *
   3.755 - *	BUGS: When used in a module (which arguably shouldn't happen)
   3.756 - *	nothing prevents two IRQ probe callers from overlapping. The
   3.757 - *	results of this are non-optimal.
   3.758 - */
   3.759 -
   3.760 -int probe_irq_off(unsigned long val)
   3.761 -{
   3.762 -	int i, irq_found, nr_irqs;
   3.763 -
   3.764 -	nr_irqs = 0;
   3.765 -	irq_found = 0;
   3.766 -	for (i = 0; i < NR_IRQS; i++) {
   3.767 -		irq_desc_t *desc = irq_descp(i);
   3.768 -		unsigned int status;
   3.769 -
   3.770 -		spin_lock_irq(&desc->lock);
   3.771 -		status = desc->status;
   3.772 -
   3.773 -		if (status & IRQ_AUTODETECT) {
   3.774 -			if (!(status & IRQ_WAITING)) {
   3.775 -				if (!nr_irqs)
   3.776 -					irq_found = i;
   3.777 -				nr_irqs++;
   3.778 -			}
   3.779 -			desc->status = status & ~IRQ_AUTODETECT;
   3.780 -			desc->handler->shutdown(i);
   3.781 -		}
   3.782 -		spin_unlock_irq(&desc->lock);
   3.783 -	}
   3.784 -	up(&probe_sem);
   3.785 -
   3.786 -	if (nr_irqs > 1)
   3.787 -		irq_found = -irq_found;
   3.788 -	return irq_found;
   3.789 -}
   3.790 -
   3.791 -EXPORT_SYMBOL(probe_irq_off);
   3.792 -#endif
   3.793 -
   3.794  int setup_irq(unsigned int irq, struct irqaction * new)
   3.795  {
   3.796 -#ifndef XEN
   3.797 -	int shared = 0;
   3.798 -#endif
   3.799  	unsigned long flags;
   3.800  	struct irqaction *old, **p;
   3.801  	irq_desc_t *desc = irq_descp(irq);
   3.802  
   3.803 -#ifndef XEN
   3.804 -	if (desc->handler == &no_irq_type)
   3.805 -		return -ENOSYS;
   3.806 -	/*
   3.807 -	 * Some drivers like serial.c use request_irq() heavily,
   3.808 -	 * so we have to be careful not to interfere with a
   3.809 -	 * running system.
   3.810 -	 */
   3.811 -	if (new->flags & SA_SAMPLE_RANDOM) {
   3.812 -		/*
   3.813 -		 * This function might sleep, we want to call it first,
   3.814 -		 * outside of the atomic block.
   3.815 -		 * Yes, this might clear the entropy pool if the wrong
   3.816 -		 * driver is attempted to be loaded, without actually
   3.817 -		 * installing a new handler, but is this really a problem,
   3.818 -		 * only the sysadmin is able to do this.
   3.819 -		 */
   3.820 -		rand_initialize_irq(irq);
   3.821 -	}
   3.822 -
   3.823 -	if (new->flags & SA_PERCPU_IRQ) {
   3.824 -		desc->status |= IRQ_PER_CPU;
   3.825 -		desc->handler = &irq_type_ia64_lsapic;
   3.826 -	}
   3.827 -#endif
   3.828 -
   3.829  	/*
   3.830  	 * The following block of code has to be executed atomically
   3.831  	 */
   3.832  	spin_lock_irqsave(&desc->lock,flags);
   3.833  	p = &desc->action;
   3.834  	if ((old = *p) != NULL) {
   3.835 -#ifdef XEN
   3.836 -		if (1) {
   3.837 -		/* Can't share interrupts unless both agree to */
   3.838 -#else
   3.839 -		if (!(old->flags & new->flags & SA_SHIRQ)) {
   3.840 -#endif
   3.841 -			spin_unlock_irqrestore(&desc->lock,flags);
   3.842 -			return -EBUSY;
   3.843 -		}
   3.844 -
   3.845 -#ifndef XEN
   3.846 -		/* add new interrupt at end of irq queue */
   3.847 -		do {
   3.848 -			p = &old->next;
   3.849 -			old = *p;
   3.850 -		} while (old);
   3.851 -		shared = 1;
   3.852 -#endif
   3.853 +		spin_unlock_irqrestore(&desc->lock,flags);
   3.854 +		return -EBUSY;
   3.855  	}
   3.856  
   3.857  	*p = new;
   3.858  
   3.859 -#ifndef XEN
   3.860 -	if (!shared) {
   3.861 -#else
   3.862 -	{
   3.863 -#endif
   3.864 -		desc->depth = 0;
   3.865 -#ifdef XEN
   3.866 -		desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS);
   3.867 -#else
   3.868 -		desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
   3.869 -#endif
   3.870 -		desc->handler->startup(irq);
   3.871 -	}
   3.872 +	desc->depth = 0;
   3.873 +	desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_GUEST);
   3.874 +	desc->handler->startup(irq);
   3.875  	spin_unlock_irqrestore(&desc->lock,flags);
   3.876  
   3.877 -#ifndef XEN
   3.878 -	register_irq_proc(irq);
   3.879 -#endif
   3.880  	return 0;
   3.881  }
   3.882  
   3.883 -#ifndef XEN
   3.884 -
   3.885 -static struct proc_dir_entry * root_irq_dir;
   3.886 -static struct proc_dir_entry * irq_dir [NR_IRQS];
   3.887 -
   3.888 -#ifdef CONFIG_SMP
   3.889 -
   3.890 -static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
   3.891 -
   3.892 -static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
   3.893 -
   3.894 -static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
   3.895 -
   3.896 -void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
   3.897 -{
   3.898 -	cpumask_t mask = CPU_MASK_NONE;
   3.899 -
   3.900 -	cpu_set(cpu_logical_id(hwid), mask);
   3.901 -
   3.902 -	if (irq < NR_IRQS) {
   3.903 -		irq_affinity[irq] = mask;
   3.904 -		irq_redir[irq] = (char) (redir & 0xff);
   3.905 -	}
   3.906 -}
   3.907 -
   3.908 -static int irq_affinity_read_proc (char *page, char **start, off_t off,
   3.909 -			int count, int *eof, void *data)
   3.910 -{
   3.911 -	int len = sprintf(page, "%s", irq_redir[(long)data] ? "r " : "");
   3.912 -
   3.913 -	len += cpumask_scnprintf(page+len, count, irq_affinity[(long)data]);
   3.914 -	if (count - len < 2)
   3.915 -		return -EINVAL;
   3.916 -	len += sprintf(page + len, "\n");
   3.917 -	return len;
   3.918 -}
   3.919 -
   3.920 -static int irq_affinity_write_proc (struct file *file, const char *buffer,
   3.921 -				    unsigned long count, void *data)
   3.922 -{
   3.923 -	unsigned int irq = (unsigned long) data;
   3.924 -	int full_count = count, err;
   3.925 -	cpumask_t new_value, tmp;
   3.926 -#	define R_PREFIX_LEN 16
   3.927 -	char rbuf[R_PREFIX_LEN];
   3.928 -	int rlen;
   3.929 -	int prelen;
   3.930 -	irq_desc_t *desc = irq_descp(irq);
   3.931 -	unsigned long flags;
   3.932 -
   3.933 -	if (!desc->handler->set_affinity)
   3.934 -		return -EIO;
   3.935 -
   3.936 -	/*
   3.937 -	 * If string being written starts with a prefix of 'r' or 'R'
   3.938 -	 * and some limited number of spaces, set IA64_IRQ_REDIRECTED.
   3.939 -	 * If more than (R_PREFIX_LEN - 2) spaces are passed, they won't
   3.940 -	 * all be trimmed as part of prelen, the untrimmed spaces will
   3.941 -	 * cause the hex parsing to fail, and this write() syscall will
   3.942 -	 * fail with EINVAL.
   3.943 -	 */
   3.944 -
   3.945 -	if (!count)
   3.946 -		return -EINVAL;
   3.947 -	rlen = min(sizeof(rbuf)-1, count);
   3.948 -	if (copy_from_user(rbuf, buffer, rlen))
   3.949 -		return -EFAULT;
   3.950 -	rbuf[rlen] = 0;
   3.951 -	prelen = 0;
   3.952 -	if (tolower(*rbuf) == 'r') {
   3.953 -		prelen = strspn(rbuf, "Rr ");
   3.954 -		irq |= IA64_IRQ_REDIRECTED;
   3.955 -	}
   3.956 -
   3.957 -	err = cpumask_parse(buffer+prelen, count-prelen, new_value);
   3.958 -	if (err)
   3.959 -		return err;
   3.960 -
   3.961 -	/*
   3.962 -	 * Do not allow disabling IRQs completely - it's a too easy
   3.963 -	 * way to make the system unusable accidentally :-) At least
   3.964 -	 * one online CPU still has to be targeted.
   3.965 -	 */
   3.966 -	cpus_and(tmp, new_value, cpu_online_map);
   3.967 -	if (cpus_empty(tmp))
   3.968 -		return -EINVAL;
   3.969 -
   3.970 -	spin_lock_irqsave(&desc->lock, flags);
   3.971 -	pending_irq_cpumask[irq] = new_value;
   3.972 -	spin_unlock_irqrestore(&desc->lock, flags);
   3.973 -
   3.974 -	return full_count;
   3.975 -}
   3.976 -
   3.977 -void move_irq(int irq)
   3.978 -{
   3.979 -	/* note - we hold desc->lock */
   3.980 -	cpumask_t tmp;
   3.981 -	irq_desc_t *desc = irq_descp(irq);
   3.982 -
   3.983 -	if (!cpus_empty(pending_irq_cpumask[irq])) {
   3.984 -		cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
   3.985 -		if (unlikely(!cpus_empty(tmp))) {
   3.986 -			desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
   3.987 -		}
   3.988 -		cpus_clear(pending_irq_cpumask[irq]);
   3.989 -	}
   3.990 -}
   3.991 -
   3.992 -
   3.993 -#endif /* CONFIG_SMP */
   3.994 -#endif
   3.995 -
   3.996 -#ifdef CONFIG_HOTPLUG_CPU
   3.997 -unsigned int vectors_in_migration[NR_IRQS];
   3.998 -
   3.999 -/*
  3.1000 - * Since cpu_online_map is already updated, we just need to check for
  3.1001 - * affinity that has zeros
  3.1002 - */
  3.1003 -static void migrate_irqs(void)
  3.1004 -{
  3.1005 -	cpumask_t	mask;
  3.1006 -	irq_desc_t *desc;
  3.1007 -	int 		irq, new_cpu;
  3.1008 -
  3.1009 -	for (irq=0; irq < NR_IRQS; irq++) {
  3.1010 -		desc = irq_descp(irq);
  3.1011 -
  3.1012 -		/*
  3.1013 -		 * No handling for now.
  3.1014 -		 * TBD: Implement a disable function so we can now
  3.1015 -		 * tell CPU not to respond to these local intr sources.
  3.1016 -		 * such as ITV,CPEI,MCA etc.
  3.1017 -		 */
  3.1018 -		if (desc->status == IRQ_PER_CPU)
  3.1019 -			continue;
  3.1020 -
  3.1021 -		cpus_and(mask, irq_affinity[irq], cpu_online_map);
  3.1022 -		if (any_online_cpu(mask) == NR_CPUS) {
  3.1023 -			/*
  3.1024 -			 * Save it for phase 2 processing
  3.1025 -			 */
  3.1026 -			vectors_in_migration[irq] = irq;
  3.1027 -
  3.1028 -			new_cpu = any_online_cpu(cpu_online_map);
  3.1029 -			mask = cpumask_of_cpu(new_cpu);
  3.1030 -
  3.1031 -			/*
  3.1032 -			 * Al three are essential, currently WARN_ON.. maybe panic?
  3.1033 -			 */
  3.1034 -			if (desc->handler && desc->handler->disable &&
  3.1035 -				desc->handler->enable && desc->handler->set_affinity) {
  3.1036 -				desc->handler->disable(irq);
  3.1037 -				desc->handler->set_affinity(irq, mask);
  3.1038 -				desc->handler->enable(irq);
  3.1039 -			} else {
  3.1040 -				WARN_ON((!(desc->handler) || !(desc->handler->disable) ||
  3.1041 -						!(desc->handler->enable) ||
  3.1042 -						!(desc->handler->set_affinity)));
  3.1043 -			}
  3.1044 -		}
  3.1045 -	}
  3.1046 -}
  3.1047 -
  3.1048 -void fixup_irqs(void)
  3.1049 -{
  3.1050 -	unsigned int irq;
  3.1051 -	extern void ia64_process_pending_intr(void);
  3.1052 -
  3.1053 -	ia64_set_itv(1<<16);
  3.1054 -	/*
  3.1055 -	 * Phase 1: Locate irq's bound to this cpu and
  3.1056 -	 * relocate them for cpu removal.
  3.1057 -	 */
  3.1058 -	migrate_irqs();
  3.1059 -
  3.1060 -	/*
  3.1061 -	 * Phase 2: Perform interrupt processing for all entries reported in
  3.1062 -	 * local APIC.
  3.1063 -	 */
  3.1064 -	ia64_process_pending_intr();
  3.1065 -
  3.1066 -	/*
  3.1067 -	 * Phase 3: Now handle any interrupts not captured in local APIC.
  3.1068 -	 * This is to account for cases that device interrupted during the time the
  3.1069 -	 * rte was being disabled and re-programmed.
  3.1070 -	 */
  3.1071 -	for (irq=0; irq < NR_IRQS; irq++) {
  3.1072 -		if (vectors_in_migration[irq]) {
  3.1073 -			vectors_in_migration[irq]=0;
  3.1074 -			do_IRQ(irq, NULL);
  3.1075 -		}
  3.1076 -	}
  3.1077 -
  3.1078 -	/*
  3.1079 -	 * Now let processor die. We do irq disable and max_xtp() to
  3.1080 -	 * ensure there is no more interrupts routed to this processor.
  3.1081 -	 * But the local timer interrupt can have 1 pending which we
  3.1082 -	 * take care in timer_interrupt().
  3.1083 -	 */
  3.1084 -	max_xtp();
  3.1085 -	local_irq_disable();
  3.1086 -}
  3.1087 -#endif
  3.1088 -
  3.1089 -#ifndef XEN
  3.1090 -static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
  3.1091 -			int count, int *eof, void *data)
  3.1092 -{
  3.1093 -	int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
  3.1094 -	if (count - len < 2)
  3.1095 -		return -EINVAL;
  3.1096 -	len += sprintf(page + len, "\n");
  3.1097 -	return len;
  3.1098 -}
  3.1099 -
  3.1100 -static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
  3.1101 -					unsigned long count, void *data)
  3.1102 -{
  3.1103 -	cpumask_t *mask = (cpumask_t *)data;
  3.1104 -	unsigned long full_count = count, err;
  3.1105 -	cpumask_t new_value;
  3.1106 -
  3.1107 -	err = cpumask_parse(buffer, count, new_value);
  3.1108 -	if (err)
  3.1109 -		return err;
  3.1110 -
  3.1111 -	*mask = new_value;
  3.1112 -	return full_count;
  3.1113 -}
  3.1114 -
  3.1115 -#define MAX_NAMELEN 10
  3.1116 -
  3.1117 -static void register_irq_proc (unsigned int irq)
  3.1118 -{
  3.1119 -	char name [MAX_NAMELEN];
  3.1120 -
  3.1121 -	if (!root_irq_dir || (irq_descp(irq)->handler == &no_irq_type) || irq_dir[irq])
  3.1122 -		return;
  3.1123 -
  3.1124 -	memset(name, 0, MAX_NAMELEN);
  3.1125 -	sprintf(name, "%d", irq);
  3.1126 -
  3.1127 -	/* create /proc/irq/1234 */
  3.1128 -	irq_dir[irq] = proc_mkdir(name, root_irq_dir);
  3.1129 -
  3.1130 -#ifdef CONFIG_SMP
  3.1131 -	{
  3.1132 -		struct proc_dir_entry *entry;
  3.1133 -
  3.1134 -		/* create /proc/irq/1234/smp_affinity */
  3.1135 -		entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
  3.1136 -
  3.1137 -		if (entry) {
  3.1138 -			entry->nlink = 1;
  3.1139 -			entry->data = (void *)(long)irq;
  3.1140 -			entry->read_proc = irq_affinity_read_proc;
  3.1141 -			entry->write_proc = irq_affinity_write_proc;
  3.1142 -		}
  3.1143 -
  3.1144 -		smp_affinity_entry[irq] = entry;
  3.1145 -	}
  3.1146 -#endif
  3.1147 -}
  3.1148 -
  3.1149 -cpumask_t prof_cpu_mask = CPU_MASK_ALL;
  3.1150 -
  3.1151 -void init_irq_proc (void)
  3.1152 -{
  3.1153 -	struct proc_dir_entry *entry;
  3.1154 -	int i;
  3.1155 -
  3.1156 -	/* create /proc/irq */
  3.1157 -	root_irq_dir = proc_mkdir("irq", 0);
  3.1158 -
  3.1159 -	/* create /proc/irq/prof_cpu_mask */
  3.1160 -	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
  3.1161 -
  3.1162 -	if (!entry)
  3.1163 -		return;
  3.1164 -
  3.1165 -	entry->nlink = 1;
  3.1166 -	entry->data = (void *)&prof_cpu_mask;
  3.1167 -	entry->read_proc = prof_cpu_mask_read_proc;
  3.1168 -	entry->write_proc = prof_cpu_mask_write_proc;
  3.1169 -
  3.1170 -	/*
  3.1171 -	 * Create entries for all existing IRQs.
  3.1172 -	 */
  3.1173 -	for (i = 0; i < NR_IRQS; i++) {
  3.1174 -		if (irq_descp(i)->handler == &no_irq_type)
  3.1175 -			continue;
  3.1176 -		register_irq_proc(i);
  3.1177 -	}
  3.1178 -}
  3.1179 -#endif
  3.1180 -
  3.1181 -
  3.1182 -#ifdef XEN
  3.1183  /*
  3.1184   * HANDLING OF GUEST-BOUND PHYSICAL IRQS
  3.1185   */
  3.1186 @@ -1338,8 +397,7 @@ typedef struct {
  3.1187      struct domain *guest[IRQ_MAX_GUESTS];
  3.1188  } irq_guest_action_t;
  3.1189  
  3.1190 -/*
  3.1191 -static void __do_IRQ_guest(int irq)
  3.1192 +void __do_IRQ_guest(int irq)
  3.1193  {
  3.1194      irq_desc_t         *desc = &irq_desc[irq];
  3.1195      irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
  3.1196 @@ -1354,7 +412,7 @@ static void __do_IRQ_guest(int irq)
  3.1197          send_guest_pirq(d, irq);
  3.1198      }
  3.1199  }
  3.1200 - */
  3.1201 +
  3.1202  int pirq_guest_unmask(struct domain *d)
  3.1203  {
  3.1204      irq_desc_t    *desc;
  3.1205 @@ -1486,10 +544,6 @@ int pirq_guest_unbind(struct domain *d, 
  3.1206      return 0;
  3.1207  }
  3.1208  
  3.1209 -#endif
  3.1210 -
  3.1211 -#ifdef XEN
  3.1212 -#ifdef IA64
  3.1213  // this is a temporary hack until real console input is implemented
  3.1214  extern void domain_pend_keyboard_interrupt(int irq);
  3.1215  irqreturn_t guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
  3.1216 @@ -1509,5 +563,3 @@ void serial_input_init(void)
  3.1217  		while(1);
  3.1218  	}
  3.1219  }
  3.1220 -#endif
  3.1221 -#endif
     4.1 --- a/xen/arch/ia64/xen/xenirq.c	Tue Apr 11 13:54:58 2006 -0600
     4.2 +++ b/xen/arch/ia64/xen/xenirq.c	Tue Apr 11 13:55:36 2006 -0600
     4.3 @@ -30,32 +30,6 @@ xen_debug_irq(ia64_vector vector, struct
     4.4  	}
     4.5  }
     4.6  
     4.7 -
     4.8 -int
     4.9 -xen_do_IRQ(ia64_vector vector)
    4.10 -{
    4.11 -	if (vector != IA64_TIMER_VECTOR && vector != IA64_IPI_VECTOR) {
    4.12 -		extern void vcpu_pend_interrupt(void *, int);
    4.13 -#if 0
    4.14 -		if (firsttime[vector]) {
    4.15 -			printf("**** (iterate) First received int on vector=%lu,itc=%lx\n",
    4.16 -				(unsigned long) vector, ia64_get_itc());
    4.17 -			firsttime[vector] = 0;
    4.18 -		}
    4.19 -		if (firstpend[vector]) {
    4.20 -			printf("**** First pended int on vector=%lu,itc=%lx\n",
    4.21 -				(unsigned long) vector, ia64_get_itc());
    4.22 -			firstpend[vector] = 0;
    4.23 -		}
    4.24 -#endif
    4.25 -		//FIXME: TEMPORARY HACK!!!!
    4.26 -		vcpu_pend_interrupt(dom0->vcpu[0],vector);
    4.27 -		vcpu_wake(dom0->vcpu[0]);
    4.28 -		return(1);
    4.29 -	}
    4.30 -	return(0);
    4.31 -}
    4.32 -
    4.33  /*
    4.34   * Exit an interrupt context. Process softirqs if needed and possible:
    4.35   */