gic.vbase);
}
+static void gic_irq_eoi(void *info)
+{
+ int virq = (int) info;
+ GICC[GICC_DIR] = virq;
+}
+
static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
{
int i = 0, virq;
while ((i = find_next_bit((const long unsigned int *) &eisr,
64, i)) < 64) {
struct pending_irq *p;
+ int cpu, eoi;
+
+ cpu = -1;
+ eoi = 0;
spin_lock_irq(&gic.lock);
lr = GICH[GICH_LR + i];
p = irq_to_pending(v, virq);
if ( p->desc != NULL ) {
p->desc->status &= ~IRQ_INPROGRESS;
- GICC[GICC_DIR] = virq;
+ /* Assume only one pcpu needs to EOI the irq */
+ cpu = p->desc->arch.eoi_cpu;
+ eoi = 1;
}
list_del_init(&p->inflight);
spin_unlock_irq(&v->arch.vgic.lock);
+ if ( eoi ) {
+ /* this is not racy because we can't receive another irq of the
+ * same type until we EOI it. */
+ if ( cpu == smp_processor_id() )
+ gic_irq_eoi((void*)virq);
+ else
+ on_selected_cpus(cpumask_of(cpu), gic_irq_eoi, (void*)virq, 0);
+ }
+
i++;
}
}
desc->handler->end(desc);
desc->status |= IRQ_INPROGRESS;
+ desc->arch.eoi_cpu = smp_processor_id();
/* XXX: inject irq into all guest vcpus */
vgic_vcpu_inject_irq(d->vcpu[0], irq, 0);