]> xenbits.xensource.com Git - legacy/linux-2.6.18-xen.git/commitdiff
linux/x86: use shared page indicating the need for an EOI notification
authorKeir Fraser <keir.fraser@citrix.com>
Fri, 28 Nov 2008 13:30:27 +0000 (13:30 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Fri, 28 Nov 2008 13:30:27 +0000 (13:30 +0000)
Signed-off-by: Jan Beulich <jbeulich@novell.com>
drivers/xen/core/evtchn.c
include/xen/interface/physdev.h

index a0057a3cdd457fb1b1575bf1a89d6204718b0802..765a7c049b8ae4415bd22d318123c7e44bf9b500 100644 (file)
@@ -123,9 +123,6 @@ DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
 /* Reference counts for bindings to IRQs. */
 static int irq_bindcount[NR_IRQS];
 
-/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
-
 #ifdef CONFIG_SMP
 
 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
@@ -756,16 +753,48 @@ static struct hw_interrupt_type dynirq_type = {
        .retrigger = resend_irq_on_evtchn,
 };
 
-static inline void pirq_unmask_notify(int irq)
+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+static bool pirq_eoi_does_unmask;
+static DECLARE_BITMAP(pirq_needs_eoi, ALIGN(NR_PIRQS, PAGE_SIZE * 8))
+       __attribute__ ((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)));
+
+static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
 {
        struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
-       if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
-               VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+
+       if (pirq_eoi_does_unmask) {
+               if (test_bit(eoi.irq, pirq_needs_eoi))
+                       VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+               else
+                       unmask_evtchn(evtchn);
+       } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
+               if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
+                       struct evtchn_unmask unmask = { .port = evtchn };
+                       struct multicall_entry mcl[2];
+
+                       mcl[0].op = __HYPERVISOR_event_channel_op;
+                       mcl[0].args[0] = EVTCHNOP_unmask;
+                       mcl[0].args[1] = (unsigned long)&unmask;
+                       mcl[1].op = __HYPERVISOR_physdev_op;
+                       mcl[1].args[0] = PHYSDEVOP_eoi;
+                       mcl[1].args[1] = (unsigned long)&eoi;
+
+                       if (HYPERVISOR_multicall(mcl, 2))
+                               BUG();
+               } else {
+                       unmask_evtchn(evtchn);
+                       VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+               }
+       } else
+               unmask_evtchn(evtchn);
 }
 
 static inline void pirq_query_unmask(int irq)
 {
        struct physdev_irq_status_query irq_status;
+
+       if (pirq_eoi_does_unmask)
+               return;
        irq_status.irq = evtchn_get_xen_pirq(irq);
        if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
                irq_status.flags = 0;
@@ -806,8 +835,7 @@ static unsigned int startup_pirq(unsigned int irq)
        irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
 
  out:
-       unmask_evtchn(evtchn);
-       pirq_unmask_notify(irq);
+       pirq_unmask_and_notify(evtchn, irq);
 
        return 0;
 }
@@ -859,10 +887,8 @@ static void end_pirq(unsigned int irq)
        if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
            (IRQ_DISABLED|IRQ_PENDING)) {
                shutdown_pirq(irq);
-       } else if (VALID_EVTCHN(evtchn)) {
-               unmask_evtchn(evtchn);
-               pirq_unmask_notify(irq);
-       }
+       } else if (VALID_EVTCHN(evtchn))
+               pirq_unmask_and_notify(evtchn, irq);
 }
 
 static struct hw_interrupt_type pirq_type = {
@@ -1012,6 +1038,14 @@ void irq_resume(void)
 
        init_evtchn_cpu_bindings();
 
+       if (pirq_eoi_does_unmask) {
+               struct physdev_pirq_eoi_mfn eoi_mfn;
+
+               eoi_mfn.mfn = virt_to_bus(pirq_needs_eoi) >> PAGE_SHIFT;
+               if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_mfn, &eoi_mfn))
+                       BUG();
+       }
+
        /* New event-channel space is not 'live' yet. */
        for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
                mask_evtchn(evtchn);
@@ -1098,9 +1132,15 @@ int evtchn_get_xen_pirq(int irq)
 void __init xen_init_IRQ(void)
 {
        unsigned int i;
+       struct physdev_pirq_eoi_mfn eoi_mfn;
 
        init_evtchn_cpu_bindings();
 
+       BUG_ON(!bitmap_empty(pirq_needs_eoi, PAGE_SIZE * 8));
+       eoi_mfn.mfn = virt_to_bus(pirq_needs_eoi) >> PAGE_SHIFT;
+       if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_mfn, &eoi_mfn) == 0)
+               pirq_eoi_does_unmask = true;
+
        /* No event channels are 'live' right now. */
        for (i = 0; i < NR_EVENT_CHANNELS; i++)
                mask_evtchn(i);
index 8057277baab52e2f7fc87ed85e44e3b563e0f962..80a10329907df5e11638715cf81fd59955ae3dde 100644 (file)
@@ -40,6 +40,21 @@ struct physdev_eoi {
 typedef struct physdev_eoi physdev_eoi_t;
 DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
 
+/*
+ * Register a shared page for the hypervisor to indicate whether the guest
+ * must issue PHYSDEVOP_eoi. The semantics of PHYSDEVOP_eoi change slightly
+ * once the guest used this function in that the associated event channel
+ * will automatically get unmasked. The page registered is used as a bit
+ * array indexed by Xen's PIRQ value.
+ */
+#define PHYSDEVOP_pirq_eoi_mfn          17
+struct physdev_pirq_eoi_mfn {
+    /* IN */
+    xen_pfn_t mfn;
+};
+typedef struct physdev_pirq_eoi_mfn physdev_pirq_eoi_mfn_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_pirq_eoi_mfn_t);
+
 /*
  * Query the status of an IRQ line.
  * @arg == pointer to physdev_irq_status_query structure.