]> xenbits.xensource.com Git - legacy/linux-2.6.18-xen.git/commitdiff
Process event channels notifications in round-robin order.
authorIan Campbell <ian.campbell@citrix.com>
Fri, 30 Nov 2007 14:35:31 +0000 (14:35 +0000)
committerIan Campbell <ian.campbell@citrix.com>
Fri, 30 Nov 2007 14:35:31 +0000 (14:35 +0000)
Avoids fairness issue resulting from domain 0 processing lowest
numbered event channel first.

Bugzilla #1115 "Event channel port scanning unfair".

From: Scott Rixner <rixner@rice.edu>
From: Diego Ongaro <dieo.ongaro@rice.edu>
From: Alan L. Cox <alc@rice.edu>
Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
drivers/xen/core/evtchn.c

index 1b777c5676015446c60bda4c499c039ea7a44c5b..546c7b70aabf1142208aedf5533afbfbfb595522 100644 (file)
@@ -225,11 +225,14 @@ static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
 /* NB. Interrupts are disabled on entry. */
 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
 {
-       unsigned long  l1, l2;
-       unsigned int   l1i, l2i, port, count;
-       int            irq, cpu = smp_processor_id();
-       shared_info_t *s = HYPERVISOR_shared_info;
-       vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];
+       unsigned long       l1, l2;
+       unsigned long       masked_l1, masked_l2;
+       unsigned int        l1i, l2i, port, count;
+       static unsigned int last_processed_l1i = BITS_PER_LONG - 1, last_processed_l2i = BITS_PER_LONG - 1;
+       int                 irq, cpu = smp_processor_id();
+       shared_info_t      *s = HYPERVISOR_shared_info;
+       vcpu_info_t        *vcpu_info = &s->vcpu_info[cpu];
+
 
        do {
                /* Avoid a callback storm when we reenable delivery. */
@@ -244,13 +247,36 @@ asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
                rmb();
 #endif
                l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+
+               l1i = last_processed_l1i;
+               l2i = last_processed_l2i;
+
                while (l1 != 0) {
-                       l1i = __ffs(l1);
-                       l1 &= ~(1UL << l1i);
 
-                       while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
-                               l2i = __ffs(l2);
+                       l1i = (l1i + 1) % BITS_PER_LONG;
+                       masked_l1 = l1 & ((~0UL) << l1i);
+
+                       if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
+                               l1i = BITS_PER_LONG - 1;
+                               l2i = BITS_PER_LONG - 1;
+                               continue;
+                       }
+                       l1i = __ffs(masked_l1);
+
+                       do {
+                               l2 = active_evtchns(cpu, s, l1i);
+
+                               l2i = (l2i + 1) % BITS_PER_LONG;
+                               masked_l2 = l2 & ((~0UL) << l2i);
 
+                               if (masked_l2 == 0) { /* if we masked out all events, move on */
+                                       l2i = BITS_PER_LONG - 1;
+                                       break;
+                               }
+
+                               l2i = __ffs(masked_l2);
+
+                               /* process port */
                                port = (l1i * BITS_PER_LONG) + l2i;
                                if ((irq = evtchn_to_irq[port]) != -1)
                                        do_IRQ(irq, regs);
@@ -258,7 +284,17 @@ asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
                                        exit_idle();
                                        evtchn_device_upcall(port);
                                }
-                       }
+
+                               /* if this is the final port processed, we'll pick up here+1 next time */
+                               last_processed_l1i = l1i;
+                               last_processed_l2i = l2i;
+
+                       } while (l2i != BITS_PER_LONG - 1);
+
+                       l2 = active_evtchns(cpu, s, l1i);
+                       if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
+                               l1 &= ~(1UL << l1i);
+
                }
 
                /* If there were nested callbacks then we have more to do. */