/* Reference counts for bindings to IRQs. */
static int irq_bindcount[NR_IRQS];
-/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
-
#ifdef CONFIG_SMP
static u8 cpu_evtchn[NR_EVENT_CHANNELS];
.retrigger = resend_irq_on_evtchn,
};
-static inline void pirq_unmask_notify(int irq)
+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+static bool pirq_eoi_does_unmask;
+static DECLARE_BITMAP(pirq_needs_eoi, ALIGN(NR_PIRQS, PAGE_SIZE * 8))
+ __attribute__ ((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)));
+
+static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
{
struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
- if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
- VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+
+ if (pirq_eoi_does_unmask) {
+ if (test_bit(eoi.irq, pirq_needs_eoi))
+ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+ else
+ unmask_evtchn(evtchn);
+ } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
+ if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
+ struct evtchn_unmask unmask = { .port = evtchn };
+ struct multicall_entry mcl[2];
+
+ mcl[0].op = __HYPERVISOR_event_channel_op;
+ mcl[0].args[0] = EVTCHNOP_unmask;
+ mcl[0].args[1] = (unsigned long)&unmask;
+ mcl[1].op = __HYPERVISOR_physdev_op;
+ mcl[1].args[0] = PHYSDEVOP_eoi;
+ mcl[1].args[1] = (unsigned long)&eoi;
+
+ if (HYPERVISOR_multicall(mcl, 2))
+ BUG();
+ } else {
+ unmask_evtchn(evtchn);
+ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+ }
+ } else
+ unmask_evtchn(evtchn);
}
static inline void pirq_query_unmask(int irq)
{
struct physdev_irq_status_query irq_status;
+
+ if (pirq_eoi_does_unmask)
+ return;
irq_status.irq = evtchn_get_xen_pirq(irq);
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
irq_status.flags = 0;
irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
out:
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
+ pirq_unmask_and_notify(evtchn, irq);
return 0;
}
if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
(IRQ_DISABLED|IRQ_PENDING)) {
shutdown_pirq(irq);
- } else if (VALID_EVTCHN(evtchn)) {
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
- }
+ } else if (VALID_EVTCHN(evtchn))
+ pirq_unmask_and_notify(evtchn, irq);
}
static struct hw_interrupt_type pirq_type = {
init_evtchn_cpu_bindings();
+ if (pirq_eoi_does_unmask) {
+ struct physdev_pirq_eoi_mfn eoi_mfn;
+
+ eoi_mfn.mfn = virt_to_bus(pirq_needs_eoi) >> PAGE_SHIFT;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_mfn, &eoi_mfn))
+ BUG();
+ }
+
/* New event-channel space is not 'live' yet. */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
mask_evtchn(evtchn);
void __init xen_init_IRQ(void)
{
unsigned int i;
+ struct physdev_pirq_eoi_mfn eoi_mfn;
init_evtchn_cpu_bindings();
+ BUG_ON(!bitmap_empty(pirq_needs_eoi, PAGE_SIZE * 8));
+ eoi_mfn.mfn = virt_to_bus(pirq_needs_eoi) >> PAGE_SHIFT;
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_mfn, &eoi_mfn) == 0)
+ pirq_eoi_does_unmask = true;
+
/* No event channels are 'live' right now. */
for (i = 0; i < NR_EVENT_CHANNELS; i++)
mask_evtchn(i);
typedef struct physdev_eoi physdev_eoi_t;
DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
+/*
+ * Register a shared page for the hypervisor to indicate whether the guest
+ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
+ * once the guest used this function in that the associated event channel
+ * will automatically get unmasked. The page registered is used as a bit
+ * array indexed by Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_mfn 17
+struct physdev_pirq_eoi_mfn {
+ /* IN */
+ xen_pfn_t mfn;
+};
+typedef struct physdev_pirq_eoi_mfn physdev_pirq_eoi_mfn_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_mfn_t);
+
/*
* Query the status of an IRQ line.
* @arg == pointer to physdev_irq_status_query structure.