ia64/xen-unstable

changeset 7707:0915074c356e

Rationalise the kernel event-channel binding interfaces. The
new interfaces are simpler and should be implementable by any
architecture.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Nov 08 15:58:31 2005 +0100 (2005-11-08)
parents 37ad91483bd3
children 98bcd8fbd5e3
files linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c linux-2.6-xen-sparse/drivers/xen/blkback/interface.c linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6-xen-sparse/drivers/xen/blktap/interface.c linux-2.6-xen-sparse/drivers/xen/console/console.c linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c linux-2.6-xen-sparse/include/asm-xen/evtchn.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Tue Nov 08 15:15:02 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c	Tue Nov 08 15:58:31 2005 +0100
     1.3 @@ -30,12 +30,6 @@ unsigned int bind_virq_to_evtchn(int vir
     1.4      return op.u.bind_virq.port;
     1.5  }
     1.6  
     1.7 -int bind_virq_to_irq(int virq, int cpu)
     1.8 -{
     1.9 -	printk("bind_virq_to_irq called... FIXME??\n");
    1.10 -	while(1);
    1.11 -}
    1.12 -
    1.13  #if 0
    1.14  void notify_remote_via_irq(int virq)
    1.15  {
    1.16 @@ -44,19 +38,6 @@ void notify_remote_via_irq(int virq)
    1.17  }
    1.18  #endif
    1.19  
    1.20 -void unbind_virq_from_evtchn(int virq)
    1.21 -{
    1.22 -    evtchn_op_t op;
    1.23 -
    1.24 -    op.cmd = EVTCHNOP_close;
    1.25 -//    op.u.close.dom = DOMID_SELF;
    1.26 -    op.u.close.port = virq_to_evtchn[virq];
    1.27 -    if ( HYPERVISOR_event_channel_op(&op) != 0 )
    1.28 -	BUG();
    1.29 -
    1.30 -    virq_to_evtchn[virq] = -1;
    1.31 -}
    1.32 -
    1.33  int bind_evtchn_to_irqhandler(unsigned int evtchn,
    1.34                     irqreturn_t (*handler)(int, void *, struct pt_regs *),
    1.35                     unsigned long irqflags, const char * devname, void *dev_id)
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c	Tue Nov 08 15:15:02 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c	Tue Nov 08 15:58:31 2005 +0100
     2.3 @@ -127,13 +127,13 @@ static inline int __prepare_ICR2 (unsign
     2.4  	return SET_APIC_DEST_FIELD(mask);
     2.5  }
     2.6  
     2.7 -DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
     2.8 +DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
     2.9  
    2.10  static inline void __send_IPI_one(unsigned int cpu, int vector)
    2.11  {
    2.12 -	int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
    2.13 -	BUG_ON(evtchn < 0);
    2.14 -	notify_remote_via_evtchn(evtchn);
    2.15 +	int irq = per_cpu(ipi_to_irq, cpu)[vector];
    2.16 +	BUG_ON(irq < 0);
    2.17 +	notify_remote_via_irq(irq);
    2.18  }
    2.19  
    2.20  void __send_IPI_shortcut(unsigned int shortcut, int vector)
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c	Tue Nov 08 15:15:02 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c	Tue Nov 08 15:58:31 2005 +0100
     3.3 @@ -748,10 +748,19 @@ static void __init hpet_time_init(void)
     3.4  /* Dynamically-mapped IRQ. */
     3.5  DEFINE_PER_CPU(int, timer_irq);
     3.6  
     3.7 -static struct irqaction irq_timer = {
     3.8 -	timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer0",
     3.9 -	NULL, NULL
    3.10 -};
    3.11 +extern void (*late_time_init)(void);
    3.12 +static void setup_cpu0_timer_irq(void)
    3.13 +{
    3.14 +	per_cpu(timer_irq, 0) =
    3.15 +		bind_virq_to_irqhandler(
    3.16 +			VIRQ_TIMER,
    3.17 +			0,
    3.18 +			timer_interrupt,
    3.19 +			SA_INTERRUPT,
    3.20 +			"timer0",
    3.21 +			NULL);
    3.22 +	BUG_ON(per_cpu(timer_irq, 0) < 0);
    3.23 +}
    3.24  
    3.25  void __init time_init(void)
    3.26  {
    3.27 @@ -785,8 +794,8 @@ void __init time_init(void)
    3.28  	rdtscll(vxtime.last_tsc);
    3.29  #endif
    3.30  
    3.31 -	per_cpu(timer_irq, 0) = bind_virq_to_irq(VIRQ_TIMER, 0);
    3.32 -	(void)setup_irq(per_cpu(timer_irq, 0), &irq_timer);
    3.33 +	/* Cannot request_irq() until kmem is initialised. */
    3.34 +	late_time_init = setup_cpu0_timer_irq;
    3.35  }
    3.36  
    3.37  /* Convert jiffies to system time. */
    3.38 @@ -865,17 +874,22 @@ void local_setup_timer(unsigned int cpu)
    3.39  			per_cpu(shadow_time, cpu).system_timestamp;
    3.40  	} while (read_seqretry(&xtime_lock, seq));
    3.41  
    3.42 -	per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER, cpu);
    3.43  	sprintf(timer_name[cpu], "timer%d", cpu);
    3.44 -	BUG_ON(request_irq(per_cpu(timer_irq, cpu), timer_interrupt,
    3.45 -	                   SA_INTERRUPT, timer_name[cpu], NULL));
    3.46 +	per_cpu(timer_irq, cpu) =
    3.47 +		bind_virq_to_irqhandler(
    3.48 +			VIRQ_TIMER,
    3.49 +			cpu,
    3.50 +			timer_interrupt,
    3.51 +			SA_INTERRUPT,
    3.52 +			timer_name[cpu],
    3.53 +			NULL);
    3.54 +	BUG_ON(per_cpu(timer_irq, cpu) < 0);
    3.55  }
    3.56  
    3.57  void local_teardown_timer(unsigned int cpu)
    3.58  {
    3.59  	BUG_ON(cpu == 0);
    3.60 -	free_irq(per_cpu(timer_irq, cpu), NULL);
    3.61 -	unbind_virq_from_irq(VIRQ_TIMER, cpu);
    3.62 +	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
    3.63  }
    3.64  #endif
    3.65  
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Tue Nov 08 15:15:02 2005 +0100
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Tue Nov 08 15:58:31 2005 +0100
     4.3 @@ -52,16 +52,29 @@ static spinlock_t irq_mapping_update_loc
     4.4  
     4.5  /* IRQ <-> event-channel mappings. */
     4.6  static int evtchn_to_irq[NR_EVENT_CHANNELS];
     4.7 -static int irq_to_evtchn[NR_IRQS];
     4.8 +
     4.9 +/* Packed IRQ information: binding type, sub-type index, and event channel. */
    4.10 +static u32 irq_info[NR_IRQS];
    4.11 +/* Binding types. */
    4.12 +enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
    4.13 +/* Constructor for packed IRQ information. */
    4.14 +#define mk_irq_info(type, index, evtchn)				\
    4.15 +	(((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
    4.16 +/* Convenient shorthand for packed representation of an unbound IRQ. */
    4.17 +#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
    4.18 +/* Accessor macros for packed IRQ information. */
    4.19 +#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
    4.20 +#define index_from_irq(irq)  ((u8)(irq_info[irq] >> 16))
    4.21 +#define type_from_irq(irq)   ((u8)(irq_info[irq] >> 24))
    4.22  
    4.23  /* IRQ <-> VIRQ mapping. */
    4.24  DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
    4.25  
    4.26 -/* evtchn <-> IPI mapping. */
    4.27 +/* IRQ <-> IPI mapping. */
    4.28  #ifndef NR_IPIS
    4.29  #define NR_IPIS 1 
    4.30  #endif
    4.31 -DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
    4.32 +DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
    4.33  
    4.34  /* Reference counts for bindings to IRQs. */
    4.35  static int irq_bindcount[NR_IRQS];
    4.36 @@ -93,6 +106,8 @@ static void init_evtchn_cpu_bindings(voi
    4.37  	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
    4.38  }
    4.39  
    4.40 +#define cpu_from_evtchn(evtchn)		(cpu_evtchn[evtchn])
    4.41 +
    4.42  #else
    4.43  
    4.44  #define active_evtchns(cpu,sh,idx)		\
    4.45 @@ -100,6 +115,7 @@ static void init_evtchn_cpu_bindings(voi
    4.46  	 ~(sh)->evtchn_mask[idx])
    4.47  #define bind_evtchn_to_cpu(chn,cpu)	((void)0)
    4.48  #define init_evtchn_cpu_bindings()	((void)0)
    4.49 +#define cpu_from_evtchn(evtchn)		(0)
    4.50  
    4.51  #endif
    4.52  
    4.53 @@ -121,7 +137,8 @@ extern asmlinkage unsigned int do_IRQ(st
    4.54  } while (0)
    4.55  #endif
    4.56  
    4.57 -#define VALID_EVTCHN(_chn) ((_chn) >= 0)
    4.58 +/* Xen will never allocate port zero for any purpose. */
    4.59 +#define VALID_EVTCHN(chn)	((chn) != 0)
    4.60  
    4.61  /*
    4.62   * Force a proper event-channel callback from Xen after clearing the
    4.63 @@ -179,7 +196,26 @@ static int find_unbound_irq(void)
    4.64  	return irq;
    4.65  }
    4.66  
    4.67 -int bind_virq_to_irq(int virq, int cpu)
    4.68 +static int bind_evtchn_to_irq(unsigned int evtchn)
    4.69 +{
    4.70 +	int irq;
    4.71 +
    4.72 +	spin_lock(&irq_mapping_update_lock);
    4.73 +
    4.74 +	if ((irq = evtchn_to_irq[evtchn]) == -1) {
    4.75 +		irq = find_unbound_irq();
    4.76 +		evtchn_to_irq[evtchn] = irq;
    4.77 +		irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
    4.78 +	}
    4.79 +
    4.80 +	irq_bindcount[irq]++;
    4.81 +
    4.82 +	spin_unlock(&irq_mapping_update_lock);
    4.83 +    
    4.84 +	return irq;
    4.85 +}
    4.86 +
    4.87 +static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
    4.88  {
    4.89  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
    4.90  	int evtchn, irq;
    4.91 @@ -194,7 +230,7 @@ int bind_virq_to_irq(int virq, int cpu)
    4.92  
    4.93  		irq = find_unbound_irq();
    4.94  		evtchn_to_irq[evtchn] = irq;
    4.95 -		irq_to_evtchn[irq]    = evtchn;
    4.96 +		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
    4.97  
    4.98  		per_cpu(virq_to_irq, cpu)[virq] = irq;
    4.99  
   4.100 @@ -207,59 +243,26 @@ int bind_virq_to_irq(int virq, int cpu)
   4.101      
   4.102  	return irq;
   4.103  }
   4.104 -EXPORT_SYMBOL(bind_virq_to_irq);
   4.105 -
   4.106 -void unbind_virq_from_irq(int virq, int cpu)
   4.107 -{
   4.108 -	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   4.109 -	int irq    = per_cpu(virq_to_irq, cpu)[virq];
   4.110 -	int evtchn = irq_to_evtchn[irq];
   4.111 -
   4.112 -	spin_lock(&irq_mapping_update_lock);
   4.113 -
   4.114 -	if (--irq_bindcount[irq] == 0) {
   4.115 -		op.u.close.port = evtchn;
   4.116 -		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   4.117  
   4.118 -		/*
   4.119 -		 * This is a slight hack. Interdomain ports can be allocated 
   4.120 -		 * directly by userspace, and at that point they get bound by 
   4.121 -		 * Xen to vcpu 0. We therefore need to make sure that if we get
   4.122 -		 * an event on an event channel we don't know about vcpu 0 
   4.123 -		 * handles it. Binding channels to vcpu 0 when closing them
   4.124 -		 * achieves this.
   4.125 -		 */
   4.126 -		bind_evtchn_to_cpu(evtchn, 0);
   4.127 -		evtchn_to_irq[evtchn] = -1;
   4.128 -		irq_to_evtchn[irq]    = -1;
   4.129 -		per_cpu(virq_to_irq, cpu)[virq] = -1;
   4.130 -	}
   4.131 -
   4.132 -	spin_unlock(&irq_mapping_update_lock);
   4.133 -}
   4.134 -EXPORT_SYMBOL(unbind_virq_from_irq);
   4.135 -
   4.136 -int bind_ipi_to_irq(int ipi, int cpu)
   4.137 +static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
   4.138  {
   4.139  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
   4.140  	int evtchn, irq;
   4.141  
   4.142  	spin_lock(&irq_mapping_update_lock);
   4.143  
   4.144 -	if ((evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == -1) {
   4.145 +	if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
   4.146  		op.u.bind_ipi.vcpu = cpu;
   4.147  		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   4.148  		evtchn = op.u.bind_ipi.port;
   4.149  
   4.150  		irq = find_unbound_irq();
   4.151  		evtchn_to_irq[evtchn] = irq;
   4.152 -		irq_to_evtchn[irq]    = evtchn;
   4.153 +		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
   4.154  
   4.155 -		per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
   4.156 +		per_cpu(ipi_to_irq, cpu)[ipi] = irq;
   4.157  
   4.158  		bind_evtchn_to_cpu(evtchn, cpu);
   4.159 -	} else {
   4.160 -		irq = evtchn_to_irq[evtchn];
   4.161  	}
   4.162  
   4.163  	irq_bindcount[irq]++;
   4.164 @@ -268,63 +271,36 @@ int bind_ipi_to_irq(int ipi, int cpu)
   4.165  
   4.166  	return irq;
   4.167  }
   4.168 -EXPORT_SYMBOL(bind_ipi_to_irq);
   4.169  
   4.170 -void unbind_ipi_from_irq(int ipi, int cpu)
   4.171 +static void unbind_from_irq(unsigned int irq)
   4.172  {
   4.173  	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   4.174 -	int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
   4.175 -	int irq    = evtchn_to_irq[evtchn];
   4.176 +	int evtchn = evtchn_from_irq(irq);
   4.177  
   4.178  	spin_lock(&irq_mapping_update_lock);
   4.179  
   4.180 -	if (--irq_bindcount[irq] == 0) {
   4.181 +	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
   4.182  		op.u.close.port = evtchn;
   4.183  		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   4.184  
   4.185 -		/* See comments in unbind_virq_from_irq */
   4.186 -		bind_evtchn_to_cpu(evtchn, 0);
   4.187 -		evtchn_to_irq[evtchn] = -1;
   4.188 -		irq_to_evtchn[irq]    = -1;
   4.189 -		per_cpu(ipi_to_evtchn, cpu)[ipi] = -1;
   4.190 -	}
   4.191 -
   4.192 -	spin_unlock(&irq_mapping_update_lock);
   4.193 -}
   4.194 -EXPORT_SYMBOL(unbind_ipi_from_irq);
   4.195 -
   4.196 -static int bind_evtchn_to_irq(unsigned int evtchn)
   4.197 -{
   4.198 -	int irq;
   4.199 -
   4.200 -	spin_lock(&irq_mapping_update_lock);
   4.201 +		switch (type_from_irq(irq)) {
   4.202 +		case IRQT_VIRQ:
   4.203 +			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
   4.204 +				[index_from_irq(irq)] = -1;
   4.205 +			break;
   4.206 +		case IRQT_IPI:
   4.207 +			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
   4.208 +				[index_from_irq(irq)] = -1;
   4.209 +			break;
   4.210 +		default:
   4.211 +			break;
   4.212 +		}
   4.213  
   4.214 -	if ((irq = evtchn_to_irq[evtchn]) == -1) {
   4.215 -		irq = find_unbound_irq();
   4.216 -		evtchn_to_irq[evtchn] = irq;
   4.217 -		irq_to_evtchn[irq]    = evtchn;
   4.218 -	}
   4.219 -
   4.220 -	irq_bindcount[irq]++;
   4.221 -
   4.222 -	spin_unlock(&irq_mapping_update_lock);
   4.223 -    
   4.224 -	return irq;
   4.225 -}
   4.226 -
   4.227 -static void unbind_evtchn_from_irq(unsigned int irq)
   4.228 -{
   4.229 -	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   4.230 -	int evtchn = irq_to_evtchn[irq];
   4.231 -
   4.232 -	spin_lock(&irq_mapping_update_lock);
   4.233 -
   4.234 -	if ((--irq_bindcount[irq] == 0) && (evtchn != -1)) {
   4.235 -		op.u.close.port = evtchn;
   4.236 -		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
   4.237 +		/* Closed ports are implicitly re-bound to VCPU0. */
   4.238 +		bind_evtchn_to_cpu(evtchn, 0);
   4.239  
   4.240  		evtchn_to_irq[evtchn] = -1;
   4.241 -		irq_to_evtchn[irq]    = -1;
   4.242 +		irq_info[irq] = IRQ_UNBOUND;
   4.243  	}
   4.244  
   4.245  	spin_unlock(&irq_mapping_update_lock);
   4.246 @@ -343,7 +319,7 @@ int bind_evtchn_to_irqhandler(
   4.247  	irq = bind_evtchn_to_irq(evtchn);
   4.248  	retval = request_irq(irq, handler, irqflags, devname, dev_id);
   4.249  	if (retval != 0) {
   4.250 -		unbind_evtchn_from_irq(irq);
   4.251 +		unbind_from_irq(irq);
   4.252  		return retval;
   4.253  	}
   4.254  
   4.255 @@ -351,12 +327,56 @@ int bind_evtchn_to_irqhandler(
   4.256  }
   4.257  EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
   4.258  
   4.259 -void unbind_evtchn_from_irqhandler(unsigned int irq, void *dev_id)
   4.260 +int bind_virq_to_irqhandler(
   4.261 +	unsigned int virq,
   4.262 +	unsigned int cpu,
   4.263 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   4.264 +	unsigned long irqflags,
   4.265 +	const char *devname,
   4.266 +	void *dev_id)
   4.267 +{
   4.268 +	unsigned int irq;
   4.269 +	int retval;
   4.270 +
   4.271 +	irq = bind_virq_to_irq(virq, cpu);
   4.272 +	retval = request_irq(irq, handler, irqflags, devname, dev_id);
   4.273 +	if (retval != 0) {
   4.274 +		unbind_from_irq(irq);
   4.275 +		return retval;
   4.276 +	}
   4.277 +
   4.278 +	return irq;
   4.279 +}
   4.280 +EXPORT_SYMBOL(bind_virq_to_irqhandler);
   4.281 +
   4.282 +int bind_ipi_to_irqhandler(
   4.283 +	unsigned int ipi,
   4.284 +	unsigned int cpu,
   4.285 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   4.286 +	unsigned long irqflags,
   4.287 +	const char *devname,
   4.288 +	void *dev_id)
   4.289 +{
   4.290 +	unsigned int irq;
   4.291 +	int retval;
   4.292 +
   4.293 +	irq = bind_ipi_to_irq(ipi, cpu);
   4.294 +	retval = request_irq(irq, handler, irqflags, devname, dev_id);
   4.295 +	if (retval != 0) {
   4.296 +		unbind_from_irq(irq);
   4.297 +		return retval;
   4.298 +	}
   4.299 +
   4.300 +	return irq;
   4.301 +}
   4.302 +EXPORT_SYMBOL(bind_ipi_to_irqhandler);
   4.303 +
   4.304 +void unbind_from_irqhandler(unsigned int irq, void *dev_id)
   4.305  {
   4.306  	free_irq(irq, dev_id);
   4.307 -	unbind_evtchn_from_irq(irq);
   4.308 +	unbind_from_irq(irq);
   4.309  }
   4.310 -EXPORT_SYMBOL(unbind_evtchn_from_irqhandler);
   4.311 +EXPORT_SYMBOL(unbind_from_irqhandler);
   4.312  
   4.313  #ifdef CONFIG_SMP
   4.314  static void do_nothing_function(void *ign)
   4.315 @@ -371,7 +391,8 @@ static void rebind_irq_to_cpu(unsigned i
   4.316  	int evtchn;
   4.317  
   4.318  	spin_lock(&irq_mapping_update_lock);
   4.319 -	evtchn = irq_to_evtchn[irq];
   4.320 +
   4.321 +	evtchn = evtchn_from_irq(irq);
   4.322  	if (!VALID_EVTCHN(evtchn)) {
   4.323  		spin_unlock(&irq_mapping_update_lock);
   4.324  		return;
   4.325 @@ -418,7 +439,7 @@ static void set_affinity_irq(unsigned ir
   4.326  
   4.327  static unsigned int startup_dynirq(unsigned int irq)
   4.328  {
   4.329 -	int evtchn = irq_to_evtchn[irq];
   4.330 +	int evtchn = evtchn_from_irq(irq);
   4.331  
   4.332  	if (VALID_EVTCHN(evtchn))
   4.333  		unmask_evtchn(evtchn);
   4.334 @@ -427,7 +448,7 @@ static unsigned int startup_dynirq(unsig
   4.335  
   4.336  static void shutdown_dynirq(unsigned int irq)
   4.337  {
   4.338 -	int evtchn = irq_to_evtchn[irq];
   4.339 +	int evtchn = evtchn_from_irq(irq);
   4.340  
   4.341  	if (VALID_EVTCHN(evtchn))
   4.342  		mask_evtchn(evtchn);
   4.343 @@ -435,7 +456,7 @@ static void shutdown_dynirq(unsigned int
   4.344  
   4.345  static void enable_dynirq(unsigned int irq)
   4.346  {
   4.347 -	int evtchn = irq_to_evtchn[irq];
   4.348 +	int evtchn = evtchn_from_irq(irq);
   4.349  
   4.350  	if (VALID_EVTCHN(evtchn))
   4.351  		unmask_evtchn(evtchn);
   4.352 @@ -443,7 +464,7 @@ static void enable_dynirq(unsigned int i
   4.353  
   4.354  static void disable_dynirq(unsigned int irq)
   4.355  {
   4.356 -	int evtchn = irq_to_evtchn[irq];
   4.357 +	int evtchn = evtchn_from_irq(irq);
   4.358  
   4.359  	if (VALID_EVTCHN(evtchn))
   4.360  		mask_evtchn(evtchn);
   4.361 @@ -451,7 +472,7 @@ static void disable_dynirq(unsigned int 
   4.362  
   4.363  static void ack_dynirq(unsigned int irq)
   4.364  {
   4.365 -	int evtchn = irq_to_evtchn[irq];
   4.366 +	int evtchn = evtchn_from_irq(irq);
   4.367  
   4.368  	if (VALID_EVTCHN(evtchn)) {
   4.369  		mask_evtchn(evtchn);
   4.370 @@ -461,7 +482,7 @@ static void ack_dynirq(unsigned int irq)
   4.371  
   4.372  static void end_dynirq(unsigned int irq)
   4.373  {
   4.374 -	int evtchn = irq_to_evtchn[irq];
   4.375 +	int evtchn = evtchn_from_irq(irq);
   4.376  
   4.377  	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
   4.378  		unmask_evtchn(evtchn);
   4.379 @@ -507,7 +528,7 @@ static inline void pirq_query_unmask(int
   4.380  static unsigned int startup_pirq(unsigned int irq)
   4.381  {
   4.382  	evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
   4.383 -	int evtchn = irq_to_evtchn[irq];
   4.384 +	int evtchn = evtchn_from_irq(irq);
   4.385  
   4.386  	if (VALID_EVTCHN(evtchn))
   4.387  		goto out;
   4.388 @@ -527,7 +548,7 @@ static unsigned int startup_pirq(unsigne
   4.389  
   4.390  	bind_evtchn_to_cpu(evtchn, 0);
   4.391  	evtchn_to_irq[evtchn] = irq;
   4.392 -	irq_to_evtchn[irq]    = evtchn;
   4.393 +	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
   4.394  
   4.395   out:
   4.396  	unmask_evtchn(evtchn);
   4.397 @@ -539,7 +560,7 @@ static unsigned int startup_pirq(unsigne
   4.398  static void shutdown_pirq(unsigned int irq)
   4.399  {
   4.400  	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   4.401 -	int evtchn = irq_to_evtchn[irq];
   4.402 +	int evtchn = evtchn_from_irq(irq);
   4.403  
   4.404  	if (!VALID_EVTCHN(evtchn))
   4.405  		return;
   4.406 @@ -551,12 +572,12 @@ static void shutdown_pirq(unsigned int i
   4.407  
   4.408  	bind_evtchn_to_cpu(evtchn, 0);
   4.409  	evtchn_to_irq[evtchn] = -1;
   4.410 -	irq_to_evtchn[irq]    = -1;
   4.411 +	irq_info[irq] = IRQ_UNBOUND;
   4.412  }
   4.413  
   4.414  static void enable_pirq(unsigned int irq)
   4.415  {
   4.416 -	int evtchn = irq_to_evtchn[irq];
   4.417 +	int evtchn = evtchn_from_irq(irq);
   4.418  
   4.419  	if (VALID_EVTCHN(evtchn)) {
   4.420  		unmask_evtchn(evtchn);
   4.421 @@ -566,7 +587,7 @@ static void enable_pirq(unsigned int irq
   4.422  
   4.423  static void disable_pirq(unsigned int irq)
   4.424  {
   4.425 -	int evtchn = irq_to_evtchn[irq];
   4.426 +	int evtchn = evtchn_from_irq(irq);
   4.427  
   4.428  	if (VALID_EVTCHN(evtchn))
   4.429  		mask_evtchn(evtchn);
   4.430 @@ -574,7 +595,7 @@ static void disable_pirq(unsigned int ir
   4.431  
   4.432  static void ack_pirq(unsigned int irq)
   4.433  {
   4.434 -	int evtchn = irq_to_evtchn[irq];
   4.435 +	int evtchn = evtchn_from_irq(irq);
   4.436  
   4.437  	if (VALID_EVTCHN(evtchn)) {
   4.438  		mask_evtchn(evtchn);
   4.439 @@ -584,7 +605,7 @@ static void ack_pirq(unsigned int irq)
   4.440  
   4.441  static void end_pirq(unsigned int irq)
   4.442  {
   4.443 -	int evtchn = irq_to_evtchn[irq];
   4.444 +	int evtchn = evtchn_from_irq(irq);
   4.445  
   4.446  	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
   4.447  		unmask_evtchn(evtchn);
   4.448 @@ -605,7 +626,7 @@ static struct hw_interrupt_type pirq_typ
   4.449  
   4.450  void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
   4.451  {
   4.452 -	int evtchn = irq_to_evtchn[i];
   4.453 +	int evtchn = evtchn_from_irq(i);
   4.454  	shared_info_t *s = HYPERVISOR_shared_info;
   4.455  	if (!VALID_EVTCHN(evtchn))
   4.456  		return;
   4.457 @@ -615,7 +636,7 @@ void hw_resend_irq(struct hw_interrupt_t
   4.458  
   4.459  void notify_remote_via_irq(int irq)
   4.460  {
   4.461 -	int evtchn = irq_to_evtchn[irq];
   4.462 +	int evtchn = evtchn_from_irq(irq);
   4.463  
   4.464  	if (VALID_EVTCHN(evtchn))
   4.465  		notify_remote_via_evtchn(evtchn);
   4.466 @@ -635,25 +656,29 @@ void irq_resume(void)
   4.467  
   4.468  	/* Check that no PIRQs are still bound. */
   4.469  	for (pirq = 0; pirq < NR_PIRQS; pirq++)
   4.470 -		BUG_ON(irq_to_evtchn[pirq_to_irq(pirq)] != -1);
   4.471 +		BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
   4.472  
   4.473  	/* Secondary CPUs must have no VIRQ or IPI bindings. */
   4.474  	for (cpu = 1; cpu < NR_CPUS; cpu++) {
   4.475  		for (virq = 0; virq < NR_VIRQS; virq++)
   4.476  			BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
   4.477  		for (ipi = 0; ipi < NR_IPIS; ipi++)
   4.478 -			BUG_ON(per_cpu(ipi_to_evtchn, cpu)[ipi] != -1);
   4.479 +			BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
   4.480  	}
   4.481  
   4.482 -	/* No IRQ -> event-channel mappings. */
   4.483 +	/* No IRQ <-> event-channel mappings. */
   4.484  	for (irq = 0; irq < NR_IRQS; irq++)
   4.485 -		irq_to_evtchn[irq] = -1;
   4.486 +		irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
   4.487 +	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
   4.488 +		evtchn_to_irq[evtchn] = -1;
   4.489  
   4.490  	/* Primary CPU: rebind VIRQs automatically. */
   4.491  	for (virq = 0; virq < NR_VIRQS; virq++) {
   4.492  		if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
   4.493  			continue;
   4.494  
   4.495 +		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
   4.496 +
   4.497  		/* Get a new binding from Xen. */
   4.498  		memset(&op, 0, sizeof(op));
   4.499  		op.cmd              = EVTCHNOP_bind_virq;
   4.500 @@ -664,7 +689,7 @@ void irq_resume(void)
   4.501          
   4.502  		/* Record the new mapping. */
   4.503  		evtchn_to_irq[evtchn] = irq;
   4.504 -		irq_to_evtchn[irq]    = evtchn;
   4.505 +		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
   4.506  
   4.507  		/* Ready for use. */
   4.508  		unmask_evtchn(evtchn);
   4.509 @@ -672,11 +697,10 @@ void irq_resume(void)
   4.510  
   4.511  	/* Primary CPU: rebind IPIs automatically. */
   4.512  	for (ipi = 0; ipi < NR_IPIS; ipi++) {
   4.513 -		if ((evtchn = per_cpu(ipi_to_evtchn, 0)[ipi]) == -1)
   4.514 +		if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
   4.515  			continue;
   4.516  
   4.517 -		irq = evtchn_to_irq[evtchn];
   4.518 -		evtchn_to_irq[evtchn] = -1;
   4.519 +		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
   4.520  
   4.521  		/* Get a new binding from Xen. */
   4.522  		memset(&op, 0, sizeof(op));
   4.523 @@ -687,18 +711,11 @@ void irq_resume(void)
   4.524          
   4.525  		/* Record the new mapping. */
   4.526  		evtchn_to_irq[evtchn] = irq;
   4.527 -		irq_to_evtchn[irq]    = evtchn;
   4.528 +		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
   4.529  
   4.530  		/* Ready for use. */
   4.531  		unmask_evtchn(evtchn);
   4.532  	}
   4.533 -
   4.534 -	/* Remove defunct event-channel -> IRQ mappings. */
   4.535 -	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
   4.536 -		if ((evtchn_to_irq[evtchn] != -1) &&
   4.537 -		    (irq_to_evtchn[evtchn_to_irq[evtchn]] == -1))
   4.538 -			evtchn_to_irq[evtchn] = -1;
   4.539 -	}
   4.540  }
   4.541  
   4.542  void __init init_IRQ(void)
   4.543 @@ -717,7 +734,7 @@ void __init init_IRQ(void)
   4.544  		for (i = 0; i < NR_VIRQS; i++)
   4.545  			per_cpu(virq_to_irq, cpu)[i] = -1;
   4.546  		for (i = 0; i < NR_IPIS; i++)
   4.547 -			per_cpu(ipi_to_evtchn, cpu)[i] = -1;
   4.548 +			per_cpu(ipi_to_irq, cpu)[i] = -1;
   4.549  	}
   4.550  
   4.551  	/* No event-channel -> IRQ mappings. */
   4.552 @@ -728,7 +745,7 @@ void __init init_IRQ(void)
   4.553  
   4.554  	/* No IRQ -> event-channel mappings. */
   4.555  	for (i = 0; i < NR_IRQS; i++)
   4.556 -		irq_to_evtchn[i] = -1;
   4.557 +		irq_info[i] = IRQ_UNBOUND;
   4.558  
   4.559  	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
   4.560  	for (i = 0; i < NR_DYNIRQS; i++) {
     5.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c	Tue Nov 08 15:15:02 2005 +0100
     5.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/smpboot.c	Tue Nov 08 15:58:31 2005 +0100
     5.3 @@ -87,18 +87,27 @@ void __init smp_alloc_memory(void)
     5.4  
     5.5  static void xen_smp_intr_init(unsigned int cpu)
     5.6  {
     5.7 +	sprintf(resched_name[cpu], "resched%d", cpu);
     5.8  	per_cpu(resched_irq, cpu) =
     5.9 -		bind_ipi_to_irq(RESCHEDULE_VECTOR, cpu);
    5.10 -	sprintf(resched_name[cpu], "resched%d", cpu);
    5.11 -	BUG_ON(request_irq(per_cpu(resched_irq, cpu), smp_reschedule_interrupt,
    5.12 -	                   SA_INTERRUPT, resched_name[cpu], NULL));
    5.13 +		bind_ipi_to_irqhandler(
    5.14 +			RESCHEDULE_VECTOR,
    5.15 +			cpu,
    5.16 +			smp_reschedule_interrupt,
    5.17 +			SA_INTERRUPT,
    5.18 +			resched_name[cpu],
    5.19 +			NULL);
    5.20 +	BUG_ON(per_cpu(resched_irq, cpu) < 0);
    5.21  
    5.22 -	per_cpu(callfunc_irq, cpu) =
    5.23 -		bind_ipi_to_irq(CALL_FUNCTION_VECTOR, cpu);
    5.24  	sprintf(callfunc_name[cpu], "callfunc%d", cpu);
    5.25 -	BUG_ON(request_irq(per_cpu(callfunc_irq, cpu),
    5.26 -	                   smp_call_function_interrupt,
    5.27 -	                   SA_INTERRUPT, callfunc_name[cpu], NULL));
    5.28 +	per_cpu(callfunc_irq, cpu) =
    5.29 +		bind_ipi_to_irqhandler(
    5.30 +			CALL_FUNCTION_VECTOR,
    5.31 +			cpu,
    5.32 +			smp_call_function_interrupt,
    5.33 +			SA_INTERRUPT,
    5.34 +			callfunc_name[cpu],
    5.35 +			NULL);
    5.36 +	BUG_ON(per_cpu(callfunc_irq, cpu) < 0);
    5.37  
    5.38  	if (cpu != 0)
    5.39  		local_setup_timer(cpu);
    5.40 @@ -110,11 +119,8 @@ static void xen_smp_intr_exit(unsigned i
    5.41  	if (cpu != 0)
    5.42  		local_teardown_timer(cpu);
    5.43  
    5.44 -	free_irq(per_cpu(resched_irq, cpu), NULL);
    5.45 -	unbind_ipi_from_irq(RESCHEDULE_VECTOR, cpu);
    5.46 -
    5.47 -	free_irq(per_cpu(callfunc_irq, cpu), NULL);
    5.48 -	unbind_ipi_from_irq(CALL_FUNCTION_VECTOR, cpu);
    5.49 +	unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
    5.50 +	unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
    5.51  }
    5.52  #endif
    5.53  
     6.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c	Tue Nov 08 15:15:02 2005 +0100
     6.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/genapic_xen.c	Tue Nov 08 15:58:31 2005 +0100
     6.3 @@ -27,13 +27,13 @@
     6.4  #endif
     6.5  #include <asm-xen/evtchn.h>
     6.6  
     6.7 -DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
     6.8 +DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
     6.9  
    6.10  static inline void __send_IPI_one(unsigned int cpu, int vector)
    6.11  {
    6.12 -	int evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
    6.13 -	BUG_ON(evtchn < 0);
    6.14 -	notify_remote_via_evtchn(evtchn);
    6.15 +	int irq = per_cpu(ipi_to_irq, cpu)[vector];
    6.16 +	BUG_ON(irq < 0);
    6.17 +	notify_remote_via_irq(irq);
    6.18  }
    6.19  
    6.20  void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
     7.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Tue Nov 08 15:15:02 2005 +0100
     7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c	Tue Nov 08 15:58:31 2005 +0100
     7.3 @@ -119,7 +119,7 @@ static void free_blkif(void *arg)
     7.4  	if (!blkif->irq)
     7.5  		return;
     7.6  
     7.7 -	unbind_evtchn_from_irqhandler(blkif->irq, blkif);
     7.8 +	unbind_from_irqhandler(blkif->irq, blkif);
     7.9  	blkif->irq = 0;
    7.10  
    7.11  	vbd_free(&blkif->vbd);
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Tue Nov 08 15:15:02 2005 +0100
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c	Tue Nov 08 15:58:31 2005 +0100
     8.3 @@ -358,7 +358,7 @@ static void blkif_free(struct blkfront_i
     8.4  		info->ring.sring = NULL;
     8.5  	}
     8.6  	if (info->irq)
     8.7 -		unbind_evtchn_from_irqhandler(info->irq, info); 
     8.8 +		unbind_from_irqhandler(info->irq, info); 
     8.9  	info->evtchn = info->irq = 0;
    8.10  }
    8.11  
     9.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Tue Nov 08 15:15:02 2005 +0100
     9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c	Tue Nov 08 15:58:31 2005 +0100
     9.3 @@ -113,7 +113,7 @@ static void free_blkif(void *arg)
     9.4  	blkif_t *blkif = (blkif_t *)arg;
     9.5  
     9.6  	if (blkif->irq)
     9.7 -		unbind_evtchn_from_irqhandler(blkif->irq, blkif);
     9.8 +		unbind_from_irqhandler(blkif->irq, blkif);
     9.9  
    9.10  	if (blkif->blk_ring.sring) {
    9.11  		unmap_frontend_page(blkif);
    10.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/console.c	Tue Nov 08 15:15:02 2005 +0100
    10.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c	Tue Nov 08 15:58:31 2005 +0100
    10.3 @@ -771,15 +771,14 @@ static int __init xencons_init(void)
    10.4  #endif
    10.5  
    10.6  	if (xen_start_info->flags & SIF_INITDOMAIN) {
    10.7 -#ifdef __ia64__
    10.8 -		xencons_priv_irq = bind_virq_to_evtchn(VIRQ_CONSOLE);
    10.9 -		bind_evtchn_to_irqhandler(xencons_priv_irq,
   10.10 -				xencons_priv_interrupt, 0, "console", NULL);
   10.11 -#else
   10.12 -		xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
   10.13 -		(void)request_irq(xencons_priv_irq,
   10.14 -				  xencons_priv_interrupt, 0, "console", NULL);
   10.15 -#endif
   10.16 +		xencons_priv_irq = bind_virq_to_irqhandler(
   10.17 +			VIRQ_CONSOLE,
   10.18 +			0,
   10.19 +			xencons_priv_interrupt,
   10.20 +			0,
   10.21 +			"console",
   10.22 +			NULL);
   10.23 +		BUG_ON(xencons_priv_irq < 0);
   10.24  	} else {
   10.25  		xencons_ring_register_receiver(xencons_rx);
   10.26  	}
    11.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Tue Nov 08 15:15:02 2005 +0100
    11.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c	Tue Nov 08 15:58:31 2005 +0100
    11.3 @@ -86,7 +86,7 @@ int xencons_ring_init(void)
    11.4  	int err;
    11.5  
    11.6  	if (xencons_irq)
    11.7 -		unbind_evtchn_from_irqhandler(xencons_irq, NULL);
    11.8 +		unbind_from_irqhandler(xencons_irq, NULL);
    11.9  	xencons_irq = 0;
   11.10  
   11.11  	if (!xen_start_info->console_evtchn)
    12.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Tue Nov 08 15:15:02 2005 +0100
    12.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Tue Nov 08 15:58:31 2005 +0100
    12.3 @@ -241,7 +241,7 @@ static void free_netif_callback(void *ar
    12.4  	if (!netif->irq)
    12.5  		return;
    12.6  
    12.7 -	unbind_evtchn_from_irqhandler(netif->irq, netif);
    12.8 +	unbind_from_irqhandler(netif->irq, netif);
    12.9  	netif->irq = 0;
   12.10  
   12.11  	unregister_netdev(netif->dev);
    13.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Tue Nov 08 15:15:02 2005 +0100
    13.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Tue Nov 08 15:58:31 2005 +0100
    13.3 @@ -822,9 +822,13 @@ static int __init netback_init(void)
    13.4  
    13.5  	netif_xenbus_init();
    13.6  
    13.7 -	(void)request_irq(bind_virq_to_irq(VIRQ_DEBUG, 0),
    13.8 -			  netif_be_dbg, SA_SHIRQ, 
    13.9 -			  "net-be-dbg", &netif_be_dbg);
   13.10 +	(void)bind_virq_to_irqhandler(
   13.11 +		VIRQ_DEBUG,
   13.12 +		0,
   13.13 +		netif_be_dbg,
   13.14 +		SA_SHIRQ, 
   13.15 +		"net-be-dbg",
   13.16 +		&netif_be_dbg);
   13.17  
   13.18  	return 0;
   13.19  }
    14.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Tue Nov 08 15:15:02 2005 +0100
    14.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Tue Nov 08 15:58:31 2005 +0100
    14.3 @@ -1049,7 +1049,7 @@ static void netif_free(struct netfront_i
    14.4  	info->rx = NULL;
    14.5  
    14.6  	if (info->irq)
    14.7 -		unbind_evtchn_from_irqhandler(info->irq, info->netdev);
    14.8 +		unbind_from_irqhandler(info->irq, info->netdev);
    14.9  	info->evtchn = info->irq = 0;
   14.10  }
   14.11  
    15.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Tue Nov 08 15:15:02 2005 +0100
    15.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c	Tue Nov 08 15:58:31 2005 +0100
    15.3 @@ -162,7 +162,7 @@ static void
    15.4  	tpmif_t *tpmif = (tpmif_t *) arg;
    15.5  
    15.6  	if (tpmif->irq)
    15.7 -		unbind_evtchn_from_irqhandler(tpmif->irq, tpmif);
    15.8 +		unbind_from_irqhandler(tpmif->irq, tpmif);
    15.9  
   15.10  	if (tpmif->tx) {
   15.11  		unmap_frontend_page(tpmif);
    16.1 --- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Tue Nov 08 15:15:02 2005 +0100
    16.2 +++ b/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c	Tue Nov 08 15:58:31 2005 +0100
    16.3 @@ -300,7 +300,7 @@ static void destroy_tpmring(struct tpmfr
    16.4  	}
    16.5  
    16.6  	if (tp->irq)
    16.7 -		unbind_evtchn_from_irqhandler(tp->irq, NULL);
    16.8 +		unbind_from_irqhandler(tp->irq, NULL);
    16.9  	tp->evtchn = tp->irq = 0;
   16.10  }
   16.11  
    17.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Tue Nov 08 15:15:02 2005 +0100
    17.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_comms.c	Tue Nov 08 15:58:31 2005 +0100
    17.3 @@ -177,7 +177,7 @@ int xb_init_comms(void)
    17.4  	int err;
    17.5  
    17.6  	if (xenbus_irq)
    17.7 -		unbind_evtchn_from_irqhandler(xenbus_irq, &xb_waitq);
    17.8 +		unbind_from_irqhandler(xenbus_irq, &xb_waitq);
    17.9  
   17.10  	err = bind_evtchn_to_irqhandler(
   17.11  		xen_start_info->store_evtchn, wake_waiting,
    18.1 --- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Tue Nov 08 15:15:02 2005 +0100
    18.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h	Tue Nov 08 15:58:31 2005 +0100
    18.3 @@ -43,29 +43,41 @@
    18.4   * LOW-LEVEL DEFINITIONS
    18.5   */
    18.6  
    18.7 -/* Dynamically bind a VIRQ source to Linux IRQ space. */
    18.8 -extern int  bind_virq_to_irq(int virq, int cpu);
    18.9 -extern void unbind_virq_from_irq(int virq, int cpu);
   18.10 -
   18.11 -/* Dynamically bind an IPI source to Linux IRQ space. */
   18.12 -extern int  bind_ipi_to_irq(int ipi, int cpu);
   18.13 -extern void unbind_ipi_from_irq(int ipi, int cpu);
   18.14 -
   18.15  /*
   18.16 - * Dynamically bind an event-channel port to an IRQ-like callback handler.
   18.17 + * Dynamically bind an event source to an IRQ-like callback handler.
   18.18   * On some platforms this may not be implemented via the Linux IRQ subsystem.
   18.19   * The IRQ argument passed to the callback handler is the same as returned
   18.20   * from the bind call. It may not correspond to a Linux IRQ number.
   18.21 - * BIND:   Returns IRQ or error.
   18.22 + * Returns IRQ or negative errno.
   18.23   * UNBIND: Takes IRQ to unbind from; automatically closes the event channel.
   18.24   */
   18.25 -extern int  bind_evtchn_to_irqhandler(
   18.26 +extern int bind_evtchn_to_irqhandler(
   18.27  	unsigned int evtchn,
   18.28  	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   18.29  	unsigned long irqflags,
   18.30  	const char *devname,
   18.31  	void *dev_id);
   18.32 -extern void unbind_evtchn_from_irqhandler(unsigned int irq, void *dev_id);
   18.33 +extern int bind_virq_to_irqhandler(
   18.34 +	unsigned int virq,
   18.35 +	unsigned int cpu,
   18.36 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   18.37 +	unsigned long irqflags,
   18.38 +	const char *devname,
   18.39 +	void *dev_id);
   18.40 +extern int bind_ipi_to_irqhandler(
   18.41 +	unsigned int ipi,
   18.42 +	unsigned int cpu,
   18.43 +	irqreturn_t (*handler)(int, void *, struct pt_regs *),
   18.44 +	unsigned long irqflags,
   18.45 +	const char *devname,
   18.46 +	void *dev_id);
   18.47 +
   18.48 +/*
   18.49 + * Common unbind function for all event sources. Takes IRQ to unbind from.
   18.50 + * Automatically closes the underlying event channel (even for bindings
   18.51 + * made with bind_evtchn_to_irqhandler()).
   18.52 + */
   18.53 +extern void unbind_from_irqhandler(unsigned int irq, void *dev_id);
   18.54  
   18.55  /*
   18.56   * Unlike notify_remote_via_evtchn(), this is safe to use across