/* NB. Interrupts are disabled on entry. */
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
{
- unsigned long l1, l2;
- unsigned int l1i, l2i, port, count;
- int irq, cpu = smp_processor_id();
- shared_info_t *s = HYPERVISOR_shared_info;
- vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
+ unsigned long l1, l2;
+ unsigned long masked_l1, masked_l2;
+ unsigned int l1i, l2i, port, count;
+ static unsigned int last_processed_l1i = BITS_PER_LONG - 1, last_processed_l2i = BITS_PER_LONG - 1;
+ int irq, cpu = smp_processor_id();
+ shared_info_t *s = HYPERVISOR_shared_info;
+ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
+
do {
/* Avoid a callback storm when we reenable delivery. */
rmb();
#endif
l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+
+ l1i = last_processed_l1i;
+ l2i = last_processed_l2i;
+
while (l1 != 0) {
- l1i = __ffs(l1);
- l1 &= ~(1UL << l1i);
- while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
- l2i = __ffs(l2);
+ l1i = (l1i + 1) % BITS_PER_LONG;
+ masked_l1 = l1 & ((~0UL) << l1i);
+
+ if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
+ l1i = BITS_PER_LONG - 1;
+ l2i = BITS_PER_LONG - 1;
+ continue;
+ }
+ l1i = __ffs(masked_l1);
+
+ do {
+ l2 = active_evtchns(cpu, s, l1i);
+
+ l2i = (l2i + 1) % BITS_PER_LONG;
+ masked_l2 = l2 & ((~0UL) << l2i);
+ if (masked_l2 == 0) { /* if we masked out all events, move on */
+ l2i = BITS_PER_LONG - 1;
+ break;
+ }
+
+ l2i = __ffs(masked_l2);
+
+ /* process port */
port = (l1i * BITS_PER_LONG) + l2i;
if ((irq = evtchn_to_irq[port]) != -1)
do_IRQ(irq, regs);
exit_idle();
evtchn_device_upcall(port);
}
- }
+
+ /* if this is the final port processed, we'll pick up here+1 next time */
+ last_processed_l1i = l1i;
+ last_processed_l2i = l2i;
+
+ } while (l2i != BITS_PER_LONG - 1);
+
+ l2 = active_evtchns(cpu, s, l1i);
+ if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
+ l1 &= ~(1UL << l1i);
+
}
/* If there were nested callbacks then we have more to do. */