}
}
-static void end_level_ioapic_irq (unsigned int irq)
+static void end_level_ioapic_irq (unsigned int irq, u8 vector)
{
unsigned long v;
int i;
*/
i = IO_APIC_VECTOR(irq);
+ /* Manually EOI the old vector if we are moving to the new */
+ if ( vector && i != vector )
+ {
+ int ioapic;
+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
+ io_apic_eoi(ioapic, i);
+ }
+
v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
ack_APIC_irq();
{
}
-static void end_edge_ioapic_irq(unsigned int irq)
- {
- }
+static void end_edge_ioapic_irq(unsigned int irq, u8 vector)
+{
+}
/*
* Level and edge triggered IO-APIC interrupts need different handling,
ack_APIC_irq(); /* ACKTYPE_NONE */
}
-static void end_msi_irq(unsigned int irq)
+static void end_msi_irq(unsigned int irq, u8 vector)
{
if ( !msi_maskable_irq(irq_desc[irq].msi_desc) )
ack_APIC_irq(); /* ACKTYPE_EOI */
ack_APIC_irq();
}
-static void end_lapic_irq(unsigned int irq) { /* nothing */ }
+#define end_lapic_irq end_edge_ioapic_irq
static hw_irq_controller lapic_irq_type = {
.typename = "local-APIC-edge",
void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
static void enable_none(unsigned int vector) { }
+static void end_none(unsigned int irq, u8 vector) { }
static unsigned int startup_none(unsigned int vector) { return 0; }
static void disable_none(unsigned int vector) { }
static void ack_none(unsigned int irq)
}
#define shutdown_none disable_none
-#define end_none enable_none
hw_irq_controller no_irq_type = {
"none",
static int current_vector = FIRST_DYNAMIC_VECTOR, current_offset = 0;
unsigned int old_vector;
int cpu, err;
+ unsigned long flags;
cpumask_t tmp_mask;
if ((cfg->move_in_progress) || cfg->move_cleanup_count)
/* Found one! */
current_vector = vector;
current_offset = offset;
+ local_irq_save(flags);
if (old_vector) {
cfg->move_in_progress = 1;
cpus_copy(cfg->old_domain, cfg->domain);
if (IO_APIC_IRQ(irq))
irq_vector[irq] = vector;
err = 0;
+ local_irq_restore(flags);
break;
}
return err;
desc->status &= ~IRQ_INPROGRESS;
out:
- desc->handler->end(irq);
+ desc->handler->end(irq, regs->entry_vector);
out_no_end:
spin_unlock(&desc->lock);
irq_exit();
switch ( action->ack_type )
{
case ACKTYPE_UNMASK:
- desc->handler->end(irq);
+ desc->handler->end(irq, 0);
break;
case ACKTYPE_EOI:
cpu_eoi_map = action->cpu_eoi_map;
/* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
ASSERT(action->ack_type == ACKTYPE_EOI);
ASSERT(desc->status & IRQ_DISABLED);
- desc->handler->end(irq);
+ desc->handler->end(irq, vector);
return;
}
ASSERT(irq > 0);
desc = irq_to_desc(irq);
spin_lock(&desc->lock);
- desc->handler->end(irq);
+ desc->handler->end(irq, peoi[sp].vector);
spin_unlock(&desc->lock);
}
if ( action->ack_type == ACKTYPE_UNMASK )
{
ASSERT(cpus_empty(action->cpu_eoi_map));
- desc->handler->end(irq);
+ desc->handler->end(irq, 0);
spin_unlock_irq(&desc->lock);
return;
}
case ACKTYPE_UNMASK:
if ( test_and_clear_bit(pirq, d->pirq_mask) &&
(--action->in_flight == 0) )
- desc->handler->end(irq);
+ desc->handler->end(irq, 0);
break;
case ACKTYPE_EOI:
/* NB. If #guests == 0 then we clear the eoi_map later on. */