ia64/xen-unstable

changeset 10355:be05097d5d69

[LINUX] Fix IRQ SMP affinity logic for event channels.
The logic now mimics native x86 behaviour: a request to change
affinity via /proc is held until the next interrupt on that
event channel. So /proc/irq/n/smp_affinity may not change
immediately!
Other notes:
1. CPU-specific interrupts silently ignore requests to change
affinity. For example, resched0, timer0, callfunc0, ...
2. Reading smp_affinity always returns a cpumask containing
a single cpu. An event channel can only be bound to a single
cpu at a time. Neither Xen nor XenLinux implement IRQ
balancing: requires a user-space balancing daemon.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Jun 11 09:54:35 2006 +0100 (2006-06-11)
parents ddc25d4ebf60
children d7543cff88ae
files linux-2.6-xen-sparse/drivers/xen/core/evtchn.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Sat Jun 10 11:07:11 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Sun Jun 11 09:54:35 2006 +0100
     1.3 @@ -54,7 +54,8 @@
     1.4  static DEFINE_SPINLOCK(irq_mapping_update_lock);
     1.5  
     1.6  /* IRQ <-> event-channel mappings. */
     1.7 -static int evtchn_to_irq[NR_EVENT_CHANNELS] = {[0 ...  NR_EVENT_CHANNELS-1] = -1};
     1.8 +static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
     1.9 +	[0 ...  NR_EVENT_CHANNELS-1] = -1 };
    1.10  
    1.11  /* Packed IRQ information: binding type, sub-type index, and event channel. */
    1.12  static u32 irq_info[NR_IRQS];
    1.13 @@ -120,6 +121,11 @@ static inline unsigned long active_evtch
    1.14  
    1.15  static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
    1.16  {
    1.17 +	int irq = evtchn_to_irq[chn];
    1.18 +
    1.19 +	BUG_ON(irq == -1);
    1.20 +	set_native_irq_info(irq, cpumask_of_cpu(cpu));
    1.21 +
    1.22  	clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
    1.23  	set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
    1.24  	cpu_evtchn[chn] = cpu;
    1.25 @@ -127,7 +133,12 @@ static void bind_evtchn_to_cpu(unsigned 
    1.26  
    1.27  static void init_evtchn_cpu_bindings(void)
    1.28  {
    1.29 +	int i;
    1.30 +
    1.31  	/* By default all event channels notify CPU#0. */
    1.32 +	for (i = 0; i < NR_IRQS; i++)
    1.33 +		set_native_irq_info(i, cpumask_of_cpu(0));
    1.34 +
    1.35  	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
    1.36  	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
    1.37  }
    1.38 @@ -430,25 +441,14 @@ void unbind_from_irqhandler(unsigned int
    1.39  }
    1.40  EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
    1.41  
    1.42 -#ifdef CONFIG_SMP
    1.43 -static void do_nothing_function(void *ign)
    1.44 -{
    1.45 -}
    1.46 -#endif
    1.47 -
    1.48  /* Rebind an evtchn so that it gets delivered to a specific cpu */
    1.49  static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
    1.50  {
    1.51  	struct evtchn_bind_vcpu bind_vcpu;
    1.52 -	int evtchn;
    1.53 -
    1.54 -	spin_lock(&irq_mapping_update_lock);
    1.55 +	int evtchn = evtchn_from_irq(irq);
    1.56  
    1.57 -	evtchn = evtchn_from_irq(irq);
    1.58 -	if (!VALID_EVTCHN(evtchn)) {
    1.59 -		spin_unlock(&irq_mapping_update_lock);
    1.60 +	if (!VALID_EVTCHN(evtchn))
    1.61  		return;
    1.62 -	}
    1.63  
    1.64  	/* Send future instances of this interrupt to other vcpu. */
    1.65  	bind_vcpu.port = evtchn;
    1.66 @@ -461,21 +461,6 @@ static void rebind_irq_to_cpu(unsigned i
    1.67  	 */
    1.68  	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
    1.69  		bind_evtchn_to_cpu(evtchn, tcpu);
    1.70 -
    1.71 -	spin_unlock(&irq_mapping_update_lock);
    1.72 -
    1.73 -	/*
    1.74 -	 * Now send the new target processor a NOP IPI. When this returns, it
    1.75 -	 * will check for any pending interrupts, and so service any that got 
    1.76 -	 * delivered to the wrong processor by mistake.
    1.77 -	 * 
    1.78 -	 * XXX: The only time this is called with interrupts disabled is from
    1.79 -	 * the hotplug/hotunplug path. In that case, all cpus are stopped with 
    1.80 -	 * interrupts disabled, and the missed interrupts will be picked up
    1.81 -	 * when they start again. This is kind of a hack.
    1.82 -	 */
    1.83 -	if (!irqs_disabled())
    1.84 -		smp_call_function(do_nothing_function, NULL, 0, 0);
    1.85  }
    1.86  
    1.87  
    1.88 @@ -597,8 +582,8 @@ static unsigned int startup_pirq(unsigne
    1.89  
    1.90  	pirq_query_unmask(irq_to_pirq(irq));
    1.91  
    1.92 +	evtchn_to_irq[evtchn] = irq;
    1.93  	bind_evtchn_to_cpu(evtchn, 0);
    1.94 -	evtchn_to_irq[evtchn] = irq;
    1.95  	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
    1.96  
    1.97   out: