#include <linux/irqnr.h>
#include <linux/pci.h>
+#ifdef CONFIG_X86
#include <asm/desc.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
#include <asm/idle.h>
#include <asm/io_apic.h>
-#include <asm/sync_bitops.h>
#include <asm/xen/pci.h>
+#endif
+#include <asm/sync_bitops.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <xen/interface/event_channel.h>
#include <xen/interface/hvm/hvm_op.h>
#include <xen/interface/hvm/params.h>
+#include <xen/interface/physdev.h>
+#include <xen/interface/sched.h>
/*
* This lock protects updates to the following mapping and reference-count
int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
+ struct irq_desc *desc;
mutex_lock(&irq_mapping_update_lock);
irq = evtchn_to_irq[evtchn];
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_edge_irq, "event");
+ desc = irq_to_desc(irq);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
xen_irq_info_evtchn_init(irq, evtchn);
}
{
struct pt_regs *old_regs = set_irq_regs(regs);
+#ifdef CONFIG_X86
exit_idle();
+#endif
irq_enter();
__xen_evtchn_do_upcall();
for (i = 0; i < NR_EVENT_CHANNELS; i++)
mask_evtchn(i);
+#ifdef CONFIG_X86
if (xen_hvm_domain()) {
xen_callback_vector();
native_init_IRQ();
if (xen_initial_domain())
pci_xen_initial_domain();
}
+#endif
}
+#ifdef CONFIG_ARM
+#define IRQ_EVTCHN_CALLBACK 63
+irqreturn_t xen_arm_callback(int irq, void *arg)
+{
+ __xen_evtchn_do_upcall();
+ return 0;
+}
+
+int __init xen_init_IRQ_arm(void)
+{
+ int rc;
+ xen_init_IRQ();
+ rc = request_irq(IRQ_EVTCHN_CALLBACK, xen_arm_callback,
+ IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ "events", "events");
+ if (rc) {
+ printk(KERN_ERR "Error requesting IRQ %d\n", IRQ_EVTCHN_CALLBACK);
+ }
+ return rc;
+}
+core_initcall(xen_init_IRQ_arm);
+#endif