}
static void
-nop (unsigned int vector)
+nop (struct irq_desc *desc)
{
/* do nothing... */
}
}
static void
-mask_irq (unsigned int irq)
+mask_irq (struct irq_desc *desc)
{
unsigned long flags;
char __iomem *addr;
u32 low32;
int rte_index;
- ia64_vector vec = irq_to_vector(irq);
+ ia64_vector vec = irq_to_vector(desc->irq);
struct iosapic_rte_info *rte;
if (list_empty(&iosapic_intr_info[vec].rtes))
}
static void
-unmask_irq (unsigned int irq)
+unmask_irq (struct irq_desc *desc)
{
unsigned long flags;
char __iomem *addr;
u32 low32;
int rte_index;
- ia64_vector vec = irq_to_vector(irq);
+ ia64_vector vec = irq_to_vector(desc->irq);
struct iosapic_rte_info *rte;
if (list_empty(&iosapic_intr_info[vec].rtes))
*/
static unsigned int
-iosapic_startup_level_irq (unsigned int irq)
+iosapic_startup_level_irq (struct irq_desc *desc)
{
- unmask_irq(irq);
+ unmask_irq(desc);
return 0;
}
static void
-iosapic_end_level_irq (unsigned int irq)
+iosapic_end_level_irq (struct irq_desc *desc)
{
- ia64_vector vec = irq_to_vector(irq);
+ ia64_vector vec = irq_to_vector(desc->irq);
struct iosapic_rte_info *rte;
- move_irq(irq);
+ move_irq(desc->irq);
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
iosapic_eoi(rte->addr, vec);
}
DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
-static unsigned int *__read_mostly irq_channel;
-#define irq_to_channel(irq) irq_channel[irq]
-
unsigned long __read_mostly hpet_address;
/*
ch->event_handler(ch);
}
-static void hpet_msi_unmask(unsigned int irq)
+static void hpet_msi_unmask(struct irq_desc *desc)
{
u32 cfg;
- unsigned int ch_idx = irq_to_channel(irq);
- struct hpet_event_channel *ch = hpet_events + ch_idx;
-
- BUG_ON(ch_idx >= num_hpets_used);
+ struct hpet_event_channel *ch = desc->action->dev_id;
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
cfg |= HPET_TN_FSB;
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
}
-static void hpet_msi_mask(unsigned int irq)
+static void hpet_msi_mask(struct irq_desc *desc)
{
u32 cfg;
- unsigned int ch_idx = irq_to_channel(irq);
- struct hpet_event_channel *ch = hpet_events + ch_idx;
-
- BUG_ON(ch_idx >= num_hpets_used);
+ struct hpet_event_channel *ch = desc->action->dev_id;
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
cfg &= ~HPET_TN_FSB;
msg->address_hi = 0;
}
-static unsigned int hpet_msi_startup(unsigned int irq)
+static unsigned int hpet_msi_startup(struct irq_desc *desc)
{
- hpet_msi_unmask(irq);
+ hpet_msi_unmask(desc);
return 0;
}
-static void hpet_msi_shutdown(unsigned int irq)
-{
- hpet_msi_mask(irq);
-}
+#define hpet_msi_shutdown hpet_msi_mask
-static void hpet_msi_ack(unsigned int irq)
+static void hpet_msi_ack(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
irq_complete_move(desc);
- move_native_irq(irq);
+ move_native_irq(desc);
ack_APIC_irq();
}
-static void hpet_msi_end(unsigned int irq, u8 vector)
-{
-}
-
static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
{
struct msi_msg msg;
.enable = hpet_msi_unmask,
.disable = hpet_msi_mask,
.ack = hpet_msi_ack,
- .end = hpet_msi_end,
.set_affinity = hpet_msi_set_affinity,
};
hpet_msi_write(desc->action->dev_id, &msg);
}
-static int __init hpet_setup_msi_irq(unsigned int irq)
+static int __init hpet_setup_msi_irq(unsigned int irq, struct hpet_event_channel *ch)
{
int ret;
irq_desc_t *desc = irq_to_desc(irq);
desc->handler = &hpet_msi_type;
- ret = request_irq(irq, hpet_interrupt_handler,
- 0, "HPET", hpet_events + irq_channel[irq]);
+ ret = request_irq(irq, hpet_interrupt_handler, 0, "HPET", ch);
if ( ret < 0 )
return ret;
if ( (irq = create_irq()) < 0 )
return irq;
- irq_channel[irq] = idx;
-
- if ( hpet_setup_msi_irq(irq) )
+ if ( hpet_setup_msi_irq(irq, hpet_events + idx) )
{
destroy_irq(irq);
- irq_channel[irq] = -1;
return -EINVAL;
}
if ( hpet_rate == 0 )
return;
- irq_channel = xmalloc_array(unsigned int, nr_irqs);
- BUG_ON(irq_channel == NULL);
- for ( i = 0; i < nr_irqs; i++ )
- irq_channel[i] = -1;
-
cfg = hpet_read32(HPET_CFG);
hpet_fsb_cap_lookup();
}
else
{
- xfree(irq_channel);
- irq_channel = NULL;
-
hpet_id = hpet_read32(HPET_ID);
if ( !(hpet_id & HPET_ID_LEGSUP) )
return;
static DEFINE_SPINLOCK(i8259A_lock);
-static void mask_and_ack_8259A_irq(unsigned int irq);
+static void mask_and_ack_8259A_irq(struct irq_desc *);
-static unsigned int startup_8259A_irq(unsigned int irq)
+static unsigned int startup_8259A_irq(struct irq_desc *desc)
{
- enable_8259A_irq(irq);
+ enable_8259A_irq(desc);
return 0; /* never anything pending */
}
-static void end_8259A_irq(unsigned int irq, u8 vector)
+static void end_8259A_irq(struct irq_desc *desc, u8 vector)
{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_8259A_irq(irq);
+ if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ enable_8259A_irq(desc);
}
static struct hw_interrupt_type __read_mostly i8259A_irq_type = {
*/
unsigned int __read_mostly io_apic_irqs;
-void disable_8259A_irq(unsigned int irq)
+void disable_8259A_irq(struct irq_desc *desc)
{
- unsigned int mask = 1 << irq;
+ unsigned int mask = 1 << desc->irq;
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
- if (irq & 8)
+ if (desc->irq & 8)
outb(cached_A1,0xA1);
else
outb(cached_21,0x21);
spin_unlock_irqrestore(&i8259A_lock, flags);
}
-void enable_8259A_irq(unsigned int irq)
+void enable_8259A_irq(struct irq_desc *desc)
{
- unsigned int mask = ~(1 << irq);
+ unsigned int mask = ~(1 << desc->irq);
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
- if (irq & 8)
+ if (desc->irq & 8)
outb(cached_A1,0xA1);
else
outb(cached_21,0x21);
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
-static void mask_and_ack_8259A_irq(unsigned int irq)
+static void mask_and_ack_8259A_irq(struct irq_desc *desc)
{
- unsigned int irqmask = 1 << irq;
+ unsigned int irqmask = 1 << desc->irq;
unsigned long flags;
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= irqmask;
handle_real_irq:
- if (irq & 8) {
+ if (desc->irq & 8) {
inb(0xA1); /* DUMMY - (do we need this?) */
outb(cached_A1,0xA1);
- outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
+ outb(0x60 + (desc->irq & 7), 0xA0);/* 'Specific EOI' to slave */
outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
} else {
inb(0x21); /* DUMMY - (do we need this?) */
outb(cached_21,0x21);
- outb(0x60+irq,0x20); /* 'Specific EOI' to master */
+ outb(0x60 + desc->irq, 0x20);/* 'Specific EOI' to master */
}
spin_unlock_irqrestore(&i8259A_lock, flags);
return;
/*
* this is the slow path - should happen rarely.
*/
- if (i8259A_irq_real(irq))
+ if (i8259A_irq_real(desc->irq))
/*
* oops, the IRQ _is_ in service according to the
* 8259A - not spurious, go handle it.
* lets ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
- printk("spurious 8259A interrupt: IRQ%d.\n", irq);
+ printk("spurious 8259A interrupt: IRQ%d.\n", desc->irq);
spurious_irq_mask |= irqmask;
}
/*
__modify_IO_APIC_irq(irq, 0x00008000, 0);
}
-static void mask_IO_APIC_irq (unsigned int irq)
+static void mask_IO_APIC_irq(struct irq_desc *desc)
{
unsigned long flags;
spin_lock_irqsave(&ioapic_lock, flags);
- __mask_IO_APIC_irq(irq);
+ __mask_IO_APIC_irq(desc->irq);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
-static void unmask_IO_APIC_irq (unsigned int irq)
+static void unmask_IO_APIC_irq(struct irq_desc *desc)
{
unsigned long flags;
spin_lock_irqsave(&ioapic_lock, flags);
- __unmask_IO_APIC_irq(irq);
+ __unmask_IO_APIC_irq(desc->irq);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
ioapic_register_intr(irq, IOAPIC_AUTO);
if (!apic && platform_legacy_irq(irq))
- disable_8259A_irq(irq);
+ disable_8259A_irq(irq_to_desc(irq));
}
cfg = irq_cfg(irq);
SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest,
memset(&entry,0,sizeof(entry));
- disable_8259A_irq(0);
+ disable_8259A_irq(irq_to_desc(0));
/* mask LVT0 */
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
*/
ioapic_write_entry(apic, pin, 0, entry);
- enable_8259A_irq(0);
+ enable_8259A_irq(irq_to_desc(0));
}
static inline void UNEXPECTED_IO_APIC(void)
* This is not complete - we should be able to fake
* an edge even if it isn't on the 8259A...
*/
-static unsigned int startup_edge_ioapic_irq(unsigned int irq)
+static unsigned int startup_edge_ioapic_irq(struct irq_desc *desc)
{
int was_pending = 0;
unsigned long flags;
spin_lock_irqsave(&ioapic_lock, flags);
- if (platform_legacy_irq(irq)) {
- disable_8259A_irq(irq);
- if (i8259A_irq_pending(irq))
+ if (platform_legacy_irq(desc->irq)) {
+ disable_8259A_irq(desc);
+ if (i8259A_irq_pending(desc->irq))
was_pending = 1;
}
- __unmask_IO_APIC_irq(irq);
+ __unmask_IO_APIC_irq(desc->irq);
spin_unlock_irqrestore(&ioapic_lock, flags);
return was_pending;
* interrupt for real. This prevents IRQ storms from unhandled
* devices.
*/
-static void ack_edge_ioapic_irq(unsigned int irq)
+static void ack_edge_ioapic_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
irq_complete_move(desc);
- move_native_irq(irq);
+ move_native_irq(desc);
if ((desc->status & (IRQ_PENDING | IRQ_DISABLED))
== (IRQ_PENDING | IRQ_DISABLED))
- mask_IO_APIC_irq(irq);
+ mask_IO_APIC_irq(desc);
ack_APIC_irq();
}
* generic IRQ layer and by the fact that an unacked local
* APIC does not accept IRQs.
*/
-static unsigned int startup_level_ioapic_irq (unsigned int irq)
+static unsigned int startup_level_ioapic_irq(struct irq_desc *desc)
{
- unmask_IO_APIC_irq(irq);
+ unmask_IO_APIC_irq(desc);
return 0; /* don't check for pending */
}
return 0;
}
-static void mask_and_ack_level_ioapic_irq (unsigned int irq)
+static void mask_and_ack_level_ioapic_irq(struct irq_desc *desc)
{
unsigned long v;
int i;
- struct irq_desc *desc = irq_to_desc(irq);
irq_complete_move(desc);
return;
if ( !directed_eoi_enabled )
- mask_IO_APIC_irq(irq);
+ mask_IO_APIC_irq(desc);
/*
* It appears there is an erratum which affects at least version 0x11
* operation to prevent an edge-triggered interrupt escaping meanwhile.
* The idea is from Manfred Spraul. --macro
*/
- i = IO_APIC_VECTOR(irq);
+ i = IO_APIC_VECTOR(desc->irq);
v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
if ( directed_eoi_enabled )
return;
- if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&
- !io_apic_level_ack_pending(irq))
+ if ((desc->status & IRQ_MOVE_PENDING) &&
+ !io_apic_level_ack_pending(desc->irq))
move_masked_irq(desc);
if ( !(v & (1 << (i & 0x1f))) ) {
spin_lock(&ioapic_lock);
- __edge_IO_APIC_irq(irq);
- __level_IO_APIC_irq(irq);
+ __edge_IO_APIC_irq(desc->irq);
+ __level_IO_APIC_irq(desc->irq);
spin_unlock(&ioapic_lock);
}
}
-static void end_level_ioapic_irq (unsigned int irq, u8 vector)
+static void end_level_ioapic_irq(struct irq_desc *desc, u8 vector)
{
unsigned long v;
int i;
{
if ( directed_eoi_enabled )
{
- struct irq_desc *desc = irq_to_desc(irq);
-
if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
{
- eoi_IO_APIC_irq(irq);
+ eoi_IO_APIC_irq(desc->irq);
return;
}
- mask_IO_APIC_irq(irq);
- eoi_IO_APIC_irq(irq);
+ mask_IO_APIC_irq(desc);
+ eoi_IO_APIC_irq(desc->irq);
if ( (desc->status & IRQ_MOVE_PENDING) &&
- !io_apic_level_ack_pending(irq) )
+ !io_apic_level_ack_pending(desc->irq) )
move_masked_irq(desc);
}
- if ( !(irq_desc[irq].status & IRQ_DISABLED) )
- unmask_IO_APIC_irq(irq);
+ if ( !(desc->status & IRQ_DISABLED) )
+ unmask_IO_APIC_irq(desc);
return;
}
* operation to prevent an edge-triggered interrupt escaping meanwhile.
* The idea is from Manfred Spraul. --macro
*/
- i = IO_APIC_VECTOR(irq);
+ i = IO_APIC_VECTOR(desc->irq);
/* Manually EOI the old vector if we are moving to the new */
if ( vector && i != vector )
ack_APIC_irq();
- if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&
- !io_apic_level_ack_pending(irq))
- move_native_irq(irq);
+ if ( (desc->status & IRQ_MOVE_PENDING) &&
+ !io_apic_level_ack_pending(desc->irq) )
+ move_native_irq(desc);
if (!(v & (1 << (i & 0x1f)))) {
spin_lock(&ioapic_lock);
- __mask_IO_APIC_irq(irq);
- __edge_IO_APIC_irq(irq);
- __level_IO_APIC_irq(irq);
- if ( !(irq_desc[irq].status & IRQ_DISABLED) )
- __unmask_IO_APIC_irq(irq);
+ __mask_IO_APIC_irq(desc->irq);
+ __edge_IO_APIC_irq(desc->irq);
+ __level_IO_APIC_irq(desc->irq);
+ if ( !(desc->status & IRQ_DISABLED) )
+ __unmask_IO_APIC_irq(desc->irq);
spin_unlock(&ioapic_lock);
}
}
-static void disable_edge_ioapic_irq(unsigned int irq)
-{
-}
-
-static void end_edge_ioapic_irq(unsigned int irq, u8 vector)
-{
-}
-
-
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
static hw_irq_controller ioapic_edge_type = {
.typename = "IO-APIC-edge",
.startup = startup_edge_ioapic_irq,
- .shutdown = disable_edge_ioapic_irq,
+ .shutdown = irq_shutdown_none,
.enable = unmask_IO_APIC_irq,
- .disable = disable_edge_ioapic_irq,
+ .disable = irq_disable_none,
.ack = ack_edge_ioapic_irq,
- .end = end_edge_ioapic_irq,
.set_affinity = set_ioapic_affinity_irq,
};
.set_affinity = set_ioapic_affinity_irq,
};
-static unsigned int startup_msi_irq(unsigned int irq)
+static unsigned int startup_msi_irq(struct irq_desc *desc)
{
- unmask_msi_irq(irq);
+ unmask_msi_irq(desc);
return 0;
}
-static void ack_msi_irq(unsigned int irq)
+static void ack_msi_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
irq_complete_move(desc);
- move_native_irq(irq);
+ move_native_irq(desc);
if ( msi_maskable_irq(desc->msi_desc) )
ack_APIC_irq(); /* ACKTYPE_NONE */
}
-static void end_msi_irq(unsigned int irq, u8 vector)
+static void end_msi_irq(struct irq_desc *desc, u8 vector)
{
- if ( !msi_maskable_irq(irq_desc[irq].msi_desc) )
+ if ( !msi_maskable_irq(desc->msi_desc) )
ack_APIC_irq(); /* ACKTYPE_EOI */
}
make_8259A_irq(irq);
}
-static void enable_lapic_irq(unsigned int irq)
+static void enable_lapic_irq(struct irq_desc *desc)
{
unsigned long v;
apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
-static void disable_lapic_irq(unsigned int irq)
+static void disable_lapic_irq(struct irq_desc *desc)
{
unsigned long v;
apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
}
-static void ack_lapic_irq(unsigned int irq)
+static void ack_lapic_irq(struct irq_desc *desc)
{
ack_APIC_irq();
}
-#define end_lapic_irq end_edge_ioapic_irq
-
static hw_irq_controller lapic_irq_type = {
.typename = "local-APIC-edge",
.startup = NULL, /* startup_irq() not used for IRQ0 */
.enable = enable_lapic_irq,
.disable = disable_lapic_irq,
.ack = ack_lapic_irq,
- .end = end_lapic_irq,
};
/*
/*
* get/set the timer IRQ vector:
*/
- disable_8259A_irq(0);
+ disable_8259A_irq(irq_to_desc(0));
vector = FIRST_HIPRIORITY_VECTOR;
clear_irq_vector(0);
init_8259A(1);
/* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */
/*timer_ack = 1;*/
- /*enable_8259A_irq(0);*/
+ /*enable_8259A_irq(irq_to_desc(0));*/
pin1 = find_isa_irq_pin(0, mp_INT);
apic1 = find_isa_irq_apic(0, mp_INT);
/*
* Ok, does IRQ0 through the IOAPIC work?
*/
- unmask_IO_APIC_irq(0);
+ unmask_IO_APIC_irq(irq_to_desc(0));
if (timer_irq_works()) {
local_irq_restore(flags);
return;
printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
- disable_8259A_irq(0);
+ disable_8259A_irq(irq_to_desc(0));
irq_desc[0].handler = &lapic_irq_type;
apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
- enable_8259A_irq(0);
+ enable_8259A_irq(irq_to_desc(0));
if (timer_irq_works()) {
local_irq_restore(flags);
ioapic_register_intr(irq, edge_level);
if (!ioapic && platform_legacy_irq(irq))
- disable_8259A_irq(irq);
+ disable_8259A_irq(desc);
spin_lock_irqsave(&ioapic_lock, flags);
__ioapic_write_entry(ioapic, pin, 0, entry);
spin_lock(&desc->lock);
if (!(desc->status & (IRQ_DISABLED | IRQ_GUEST)))
- desc->handler->startup(irq);
+ desc->handler->startup(desc);
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_DISABLED;
desc->status &= ~IRQ_GUEST;
- desc->handler->shutdown(irq);
+ desc->handler->shutdown(desc);
action = desc->action;
desc->action = NULL;
desc->msi_desc = NULL;
void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
-static void enable_none(unsigned int vector) { }
-static void end_none(unsigned int irq, u8 vector) { }
-static unsigned int startup_none(unsigned int vector) { return 0; }
-static void disable_none(unsigned int vector) { }
-static void ack_none(unsigned int irq)
+void irq_actor_none(struct irq_desc *desc) { }
+unsigned int irq_startup_none(struct irq_desc *desc) { return 0; }
+static void ack_none(struct irq_desc *desc)
{
- ack_bad_irq(irq);
+ ack_bad_irq(desc->irq);
}
-#define shutdown_none disable_none
-
hw_irq_controller no_irq_type = {
"none",
- startup_none,
- shutdown_none,
- enable_none,
- disable_none,
+ irq_startup_none,
+ irq_shutdown_none,
+ irq_enable_none,
+ irq_disable_none,
ack_none,
- end_none
};
static vmask_t *irq_get_used_vector_mask(int irq)
cpus_clear(desc->pending_mask);
}
-void move_native_irq(int irq)
+void move_native_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
return;
if (unlikely(desc->status & IRQ_DISABLED))
return;
- desc->handler->disable(irq);
+ desc->handler->disable(desc);
move_masked_irq(desc);
- desc->handler->enable(irq);
+ desc->handler->enable(desc);
}
/* For re-setting irq interrupt affinity for specific irq */
desc = irq_to_desc(irq);
spin_lock(&desc->lock);
- desc->handler->ack(irq);
+ desc->handler->ack(desc);
if ( likely(desc->status & IRQ_GUEST) )
{
s_time_t now = NOW();
if ( now < (desc->rl_quantum_start + MILLISECS(10)) )
{
- desc->handler->disable(irq);
+ desc->handler->disable(desc);
/*
* If handler->disable doesn't actually mask the interrupt, a
* disabled irq still can fire. This check also avoids possible
desc->status &= ~IRQ_INPROGRESS;
out:
- desc->handler->end(irq, regs->entry_vector);
+ if ( desc->handler->end )
+ desc->handler->end(desc, regs->entry_vector);
out_no_end:
spin_unlock(&desc->lock);
irq_exit();
list_for_each_entry_safe ( desc, tmp, &irq_ratelimit_list, rl_link )
{
spin_lock(&desc->lock);
- desc->handler->enable(desc->irq);
+ desc->handler->enable(desc);
list_del(&desc->rl_link);
INIT_LIST_HEAD(&desc->rl_link);
spin_unlock(&desc->lock);
action = desc->action;
desc->action = NULL;
desc->status |= IRQ_DISABLED;
- desc->handler->shutdown(irq);
+ desc->handler->shutdown(desc);
spin_unlock_irqrestore(&desc->lock,flags);
/* Wait to make sure it's not being used on another CPU */
desc->action = new;
desc->status &= ~IRQ_DISABLED;
- desc->handler->startup(irq);
+ desc->handler->startup(desc);
spin_unlock_irqrestore(&desc->lock,flags);
switch ( action->ack_type )
{
case ACKTYPE_UNMASK:
- desc->handler->end(irq, 0);
+ if ( desc->handler->end )
+ desc->handler->end(desc, 0);
break;
case ACKTYPE_EOI:
cpu_eoi_map = action->cpu_eoi_map;
/* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
ASSERT(action->ack_type == ACKTYPE_EOI);
ASSERT(desc->status & IRQ_DISABLED);
- desc->handler->end(irq, vector);
+ if ( desc->handler->end )
+ desc->handler->end(desc, vector);
return;
}
ASSERT(irq > 0);
desc = irq_to_desc(irq);
spin_lock(&desc->lock);
- desc->handler->end(irq, peoi[sp].vector);
+ if ( desc->handler->end )
+ desc->handler->end(desc, peoi[sp].vector);
spin_unlock(&desc->lock);
}
if ( action->ack_type == ACKTYPE_UNMASK )
{
ASSERT(cpus_empty(action->cpu_eoi_map));
- desc->handler->end(irq, 0);
+ if ( desc->handler->end )
+ desc->handler->end(desc, 0);
spin_unlock_irq(&desc->lock);
return;
}
desc->status |= IRQ_GUEST;
desc->status &= ~IRQ_DISABLED;
- desc->handler->startup(irq);
+ desc->handler->startup(desc);
/* Attempt to bind the interrupt target to the correct CPU. */
cpu_set(v->processor, cpumask);
{
case ACKTYPE_UNMASK:
if ( test_and_clear_bool(pirq->masked) &&
- (--action->in_flight == 0) )
- desc->handler->end(irq, 0);
+ (--action->in_flight == 0) &&
+ desc->handler->end )
+ desc->handler->end(desc, 0);
break;
case ACKTYPE_EOI:
/* NB. If #guests == 0 then we clear the eoi_map later on. */
/* Disabling IRQ before releasing the desc_lock avoids an IRQ storm. */
desc->status |= IRQ_DISABLED;
- desc->handler->disable(irq);
+ desc->handler->disable(desc);
/*
* Mark any remaining pending EOIs as ready to flush.
desc->action = NULL;
desc->status &= ~(IRQ_GUEST|IRQ_INPROGRESS);
- desc->handler->shutdown(irq);
+ desc->handler->shutdown(desc);
/* Caller frees the old guest descriptor block. */
return action;
}
if ( desc->handler->disable )
- desc->handler->disable(irq);
+ desc->handler->disable(desc);
if ( desc->handler->set_affinity )
desc->handler->set_affinity(desc, &affinity);
set_affinity = 0;
if ( desc->handler->enable )
- desc->handler->enable(irq);
+ desc->handler->enable(desc);
spin_unlock(&desc->lock);
|| entry->msi_attrib.maskbit;
}
-static void msi_set_mask_bit(unsigned int irq, int flag)
+static void msi_set_mask_bit(struct irq_desc *desc, int flag)
{
- struct msi_desc *entry = irq_desc[irq].msi_desc;
+ struct msi_desc *entry = desc->msi_desc;
- ASSERT(spin_is_locked(&irq_desc[irq].lock));
+ ASSERT(spin_is_locked(&desc->lock));
BUG_ON(!entry || !entry->dev);
switch (entry->msi_attrib.type) {
case PCI_CAP_ID_MSI:
return -1;
}
-void mask_msi_irq(unsigned int irq)
+void mask_msi_irq(struct irq_desc *desc)
{
- msi_set_mask_bit(irq, 1);
+ msi_set_mask_bit(desc, 1);
}
-void unmask_msi_irq(unsigned int irq)
+void unmask_msi_irq(struct irq_desc *desc)
{
- msi_set_mask_bit(irq, 0);
+ msi_set_mask_bit(desc, 0);
}
static struct msi_desc* alloc_msi_entry(void)
write_msi_msg(entry, &entry->msg);
- msi_set_mask_bit(irq, entry->msi_attrib.masked);
+ msi_set_mask_bit(desc, entry->msi_attrib.masked);
if ( entry->msi_attrib.type == PCI_CAP_ID_MSI )
msi_set_enable(pdev, 1);
#include <asm-x86/fixmap.h>
#include <mach_apic.h>
-static struct amd_iommu **__read_mostly irq_to_iommu;
static int __initdata nr_amd_iommus;
unsigned short ivrs_bdf_entries;
iommu->msi_cap + PCI_MSI_FLAGS, control);
}
-static void iommu_msi_unmask(unsigned int irq)
+static void iommu_msi_unmask(struct irq_desc *desc)
{
unsigned long flags;
- struct amd_iommu *iommu = irq_to_iommu[irq];
+ struct amd_iommu *iommu = desc->action->dev_id;
/* FIXME: do not support mask bits at the moment */
if ( iommu->maskbit )
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void iommu_msi_mask(unsigned int irq)
+static void iommu_msi_mask(struct irq_desc *desc)
{
unsigned long flags;
- struct amd_iommu *iommu = irq_to_iommu[irq];
- struct irq_desc *desc = irq_to_desc(irq);
+ struct amd_iommu *iommu = desc->action->dev_id;
irq_complete_move(desc);
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static unsigned int iommu_msi_startup(unsigned int irq)
+static unsigned int iommu_msi_startup(struct irq_desc *desc)
{
- iommu_msi_unmask(irq);
+ iommu_msi_unmask(desc);
return 0;
}
-static void iommu_msi_end(unsigned int irq, u8 vector)
+static void iommu_msi_end(struct irq_desc *desc, u8 vector)
{
- iommu_msi_unmask(irq);
+ iommu_msi_unmask(desc);
ack_APIC_irq();
}
}
irq_desc[irq].handler = &iommu_msi_type;
- irq_to_iommu[irq] = iommu;
ret = request_irq(irq, amd_iommu_page_fault, 0,
"amd_iommu", iommu);
if ( ret )
{
irq_desc[irq].handler = &no_irq_type;
- irq_to_iommu[irq] = NULL;
destroy_irq(irq);
AMD_IOMMU_DEBUG("can't request irq\n");
return 0;
ivrs_mappings = NULL;
}
- /* free irq_to_iommu[] */
- if ( irq_to_iommu )
- {
- xfree(irq_to_iommu);
- irq_to_iommu = NULL;
- }
-
iommu_enabled = 0;
iommu_passthrough = 0;
iommu_intremap = 0;
BUG_ON( !iommu_found() );
- irq_to_iommu = xmalloc_array(struct amd_iommu *, nr_irqs);
- if ( irq_to_iommu == NULL )
- goto error_out;
- memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
-
ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries();
if ( !ivrs_bdf_entries )
}
}
-static struct iommu **irq_to_iommu;
static int iommu_page_fault_do_one(struct iommu *iommu, int type,
u8 fault_reason, u16 source_id, u64 addr)
{
}
}
-static void dma_msi_unmask(unsigned int irq)
+static void dma_msi_unmask(struct irq_desc *desc)
{
- struct iommu *iommu = irq_to_iommu[irq];
+ struct iommu *iommu = desc->action->dev_id;
unsigned long flags;
/* unmask it */
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static void dma_msi_mask(unsigned int irq)
+static void dma_msi_mask(struct irq_desc *desc)
{
unsigned long flags;
- struct iommu *iommu = irq_to_iommu[irq];
- struct irq_desc *desc = irq_to_desc(irq);
+ struct iommu *iommu = desc->action->dev_id;
irq_complete_move(desc);
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static unsigned int dma_msi_startup(unsigned int irq)
+static unsigned int dma_msi_startup(struct irq_desc *desc)
{
- dma_msi_unmask(irq);
+ dma_msi_unmask(desc);
return 0;
}
-static void dma_msi_end(unsigned int irq, u8 vector)
+static void dma_msi_end(struct irq_desc *desc, u8 vector)
{
- dma_msi_unmask(irq);
+ dma_msi_unmask(desc);
ack_APIC_irq();
}
}
irq_desc[irq].handler = &dma_msi_type;
- irq_to_iommu[irq] = iommu;
#ifdef CONFIG_X86
ret = request_irq(irq, iommu_page_fault, 0, "dmar", iommu);
#else
if ( ret )
{
irq_desc[irq].handler = &no_irq_type;
- irq_to_iommu[irq] = NULL;
destroy_irq(irq);
dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: can't request irq\n");
return ret;
platform_quirks_init();
- irq_to_iommu = xmalloc_array(struct iommu*, nr_irqs);
- BUG_ON(!irq_to_iommu);
- memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
-
- if(!irq_to_iommu)
- return -ENOMEM;
-
/* We enable the following features only if they are supported by all VT-d
* engines: Snoop Control, DMA passthrough, Queued Invalidation and
* Interrupt Remapping.
DECLARE_BITMAP(_bits,NR_VECTORS);
} vmask_t;
+struct irq_desc;
+
struct irq_cfg {
s16 vector; /* vector itself is only 8 bits, */
s16 old_vector; /* but we use -1 for unassigned */
asmlinkage void do_IRQ(struct cpu_user_regs *regs);
-void disable_8259A_irq(unsigned int irq);
-void enable_8259A_irq(unsigned int irq);
+void disable_8259A_irq(struct irq_desc *);
+void enable_8259A_irq(struct irq_desc *);
int i8259A_irq_pending(unsigned int irq);
void mask_8259A(void);
void unmask_8259A(void);
int create_irq(void);
void destroy_irq(unsigned int irq);
-struct irq_desc;
extern void irq_complete_move(struct irq_desc *);
extern struct irq_desc *irq_desc;
void __setup_vector_irq(int cpu);
-void move_native_irq(int irq);
+void move_native_irq(struct irq_desc *);
void move_masked_irq(struct irq_desc *);
int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *);
struct msi_desc;
/* Helper functions */
-extern void mask_msi_irq(unsigned int irq);
-extern void unmask_msi_irq(unsigned int irq);
+extern void mask_msi_irq(struct irq_desc *);
+extern void unmask_msi_irq(struct irq_desc *);
extern void set_msi_affinity(struct irq_desc *, const cpumask_t *);
extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc);
extern void pci_disable_msi(struct msi_desc *desc);
*/
struct hw_interrupt_type {
const char *typename;
- unsigned int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
- void (*ack)(unsigned int irq);
- void (*end)(unsigned int irq, u8 vector);
+ unsigned int (*startup)(struct irq_desc *);
+ void (*shutdown)(struct irq_desc *);
+ void (*enable)(struct irq_desc *);
+ void (*disable)(struct irq_desc *);
+ void (*ack)(struct irq_desc *);
+ void (*end)(struct irq_desc *, u8 vector);
void (*set_affinity)(struct irq_desc *, const cpumask_t *);
};
extern hw_irq_controller no_irq_type;
extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
+extern unsigned int irq_startup_none(struct irq_desc *);
+extern void irq_actor_none(struct irq_desc *);
+#define irq_shutdown_none irq_actor_none
+#define irq_disable_none irq_actor_none
+#define irq_enable_none irq_actor_none
struct domain;
struct vcpu;