ia64/xen-unstable

changeset 13134:3a28be71b667

[LINUX] Disallow nested event delivery.

This eliminates the risk of overflowing the kernel stack and is a
reasonable policy given that we have no concept of priorities among
event sources.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Dec 20 11:09:56 2006 +0000 (2006-12-20)
parents 516e4faac066
children 301bcae16928
files linux-2.6-xen-sparse/drivers/xen/core/evtchn.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Wed Dec 20 10:41:33 2006 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c	Wed Dec 20 11:09:56 2006 +0000
     1.3 @@ -208,38 +208,51 @@ void force_evtchn_callback(void)
     1.4  /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
     1.5  EXPORT_SYMBOL(force_evtchn_callback);
     1.6  
     1.7 +static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
     1.8 +
     1.9  /* NB. Interrupts are disabled on entry. */
    1.10  asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
    1.11  {
    1.12  	unsigned long  l1, l2;
    1.13 -	unsigned int   l1i, l2i, port;
    1.14 +	unsigned int   l1i, l2i, port, count;
    1.15  	int            irq, cpu = smp_processor_id();
    1.16  	shared_info_t *s = HYPERVISOR_shared_info;
    1.17  	vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];
    1.18  
    1.19 -	vcpu_info->evtchn_upcall_pending = 0;
    1.20 +	do {
    1.21 +		/* Avoid a callback storm when we reenable delivery. */
    1.22 +		vcpu_info->evtchn_upcall_pending = 0;
    1.23 +
    1.24 +		/* Nested invocations bail immediately. */
    1.25 +		if (unlikely(per_cpu(upcall_count, cpu)++))
    1.26 +			return;
    1.27  
    1.28  #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
    1.29 -	/* Clear master pending flag /before/ clearing selector flag. */
    1.30 -	rmb();
    1.31 +		/* Clear master flag /before/ clearing selector flag. */
    1.32 +		rmb();
    1.33  #endif
    1.34 -	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
    1.35 -	while (l1 != 0) {
    1.36 -		l1i = __ffs(l1);
    1.37 -		l1 &= ~(1UL << l1i);
    1.38 +		l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
    1.39 +		while (l1 != 0) {
    1.40 +			l1i = __ffs(l1);
    1.41 +			l1 &= ~(1UL << l1i);
    1.42  
    1.43 -		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
    1.44 -			l2i = __ffs(l2);
    1.45 +			while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
    1.46 +				l2i = __ffs(l2);
    1.47  
    1.48 -			port = (l1i * BITS_PER_LONG) + l2i;
    1.49 -			if ((irq = evtchn_to_irq[port]) != -1)
    1.50 -				do_IRQ(irq, regs);
    1.51 -			else {
    1.52 -				exit_idle();
    1.53 -				evtchn_device_upcall(port);
    1.54 +				port = (l1i * BITS_PER_LONG) + l2i;
    1.55 +				if ((irq = evtchn_to_irq[port]) != -1)
    1.56 +					do_IRQ(irq, regs);
    1.57 +				else {
    1.58 +					exit_idle();
    1.59 +					evtchn_device_upcall(port);
    1.60 +				}
    1.61  			}
    1.62  		}
    1.63 -	}
    1.64 +
    1.65 +		/* If there were nested callbacks then we have more to do. */
    1.66 +		count = per_cpu(upcall_count, cpu);
    1.67 +		per_cpu(upcall_count, cpu) = 0;
    1.68 +	} while (unlikely(count != 1));
    1.69  }
    1.70  
    1.71  static int find_unbound_irq(void)