ia64/linux-2.6.18-xen.hg

view drivers/xen/core/evtchn.c @ 661:7886619f623e

linux/pci-msi: translate Xen-provided PIRQs (take 2)

Previously, the kernel depended upon Xen's NR_IRQS to be no larger
than the kernel's NR_PIRQS.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Sep 08 13:13:35 2008 +0100 (2008-09-08)
parents ad374a7a9f3e
children 0ba49e9e74f9
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/module.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/sched.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/version.h>
39 #include <asm/atomic.h>
40 #include <asm/system.h>
41 #include <asm/ptrace.h>
42 #include <asm/synch_bitops.h>
43 #include <xen/evtchn.h>
44 #include <xen/interface/event_channel.h>
45 #include <xen/interface/physdev.h>
46 #include <asm/hypervisor.h>
47 #include <linux/mc146818rtc.h> /* RTC_IRQ */
49 /*
50 * This lock protects updates to the following mapping and reference-count
51 * arrays. The lock does not need to be acquired to read the mapping tables.
52 */
53 static DEFINE_SPINLOCK(irq_mapping_update_lock);
55 /* IRQ <-> event-channel mappings. */
56 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
57 [0 ... NR_EVENT_CHANNELS-1] = -1 };
59 /* Packed IRQ information: binding type, sub-type index, and event channel. */
60 static u32 irq_info[NR_IRQS];
62 /* Binding types. */
63 enum {
64 IRQT_UNBOUND,
65 IRQT_PIRQ,
66 IRQT_VIRQ,
67 IRQT_IPI,
68 IRQT_LOCAL_PORT,
69 IRQT_CALLER_PORT,
70 _IRQT_COUNT
71 };
73 #define _IRQT_BITS 4
74 #define _EVTCHN_BITS 12
75 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
77 /* Constructor for packed IRQ information. */
78 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
79 {
80 BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
82 BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
83 BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
84 BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
85 BUG_ON(index >> _INDEX_BITS);
87 BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
89 return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
90 }
92 /* Convenient shorthand for packed representation of an unbound IRQ. */
93 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
95 /*
96 * Accessors for packed IRQ information.
97 */
99 static inline unsigned int evtchn_from_irq(int irq)
100 {
101 return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
102 }
104 static inline unsigned int index_from_irq(int irq)
105 {
106 return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
107 }
109 static inline unsigned int type_from_irq(int irq)
110 {
111 return irq_info[irq] >> (32 - _IRQT_BITS);
112 }
114 /* IRQ <-> VIRQ mapping. */
115 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
117 /* IRQ <-> IPI mapping. */
118 #ifndef NR_IPIS
119 #define NR_IPIS 1
120 #endif
121 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
123 /* Reference counts for bindings to IRQs. */
124 static int irq_bindcount[NR_IRQS];
126 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
127 static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
129 #ifdef CONFIG_SMP
131 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
132 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
134 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
135 unsigned int idx)
136 {
137 return (sh->evtchn_pending[idx] &
138 cpu_evtchn_mask[cpu][idx] &
139 ~sh->evtchn_mask[idx]);
140 }
142 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
143 {
144 shared_info_t *s = HYPERVISOR_shared_info;
145 int irq = evtchn_to_irq[chn];
147 BUG_ON(!test_bit(chn, s->evtchn_mask));
149 if (irq != -1)
150 set_native_irq_info(irq, cpumask_of_cpu(cpu));
152 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
153 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
154 cpu_evtchn[chn] = cpu;
155 }
157 static void init_evtchn_cpu_bindings(void)
158 {
159 int i;
161 /* By default all event channels notify CPU#0. */
162 for (i = 0; i < NR_IRQS; i++)
163 set_native_irq_info(i, cpumask_of_cpu(0));
165 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
166 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
167 }
169 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
170 {
171 return cpu_evtchn[evtchn];
172 }
174 #else
176 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
177 unsigned int idx)
178 {
179 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
180 }
182 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
183 {
184 }
186 static void init_evtchn_cpu_bindings(void)
187 {
188 }
190 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
191 {
192 return 0;
193 }
195 #endif
197 /* Upcall to generic IRQ layer. */
198 #ifdef CONFIG_X86
199 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
200 void __init xen_init_IRQ(void);
201 void __init init_IRQ(void)
202 {
203 irq_ctx_init(0);
204 xen_init_IRQ();
205 }
206 #if defined (__i386__)
207 static inline void exit_idle(void) {}
208 #define IRQ_REG orig_eax
209 #elif defined (__x86_64__)
210 #include <asm/idle.h>
211 #define IRQ_REG orig_rax
212 #endif
213 #define do_IRQ(irq, regs) do { \
214 (regs)->IRQ_REG = ~(irq); \
215 do_IRQ((regs)); \
216 } while (0)
217 #endif
219 /* Xen will never allocate port zero for any purpose. */
220 #define VALID_EVTCHN(chn) ((chn) != 0)
222 /*
223 * Force a proper event-channel callback from Xen after clearing the
224 * callback mask. We do this in a very simple manner, by making a call
225 * down into Xen. The pending flag will be checked by Xen on return.
226 */
227 void force_evtchn_callback(void)
228 {
229 VOID(HYPERVISOR_xen_version(0, NULL));
230 }
231 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
232 EXPORT_SYMBOL(force_evtchn_callback);
234 static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
235 static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 };
236 static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 };
238 /* NB. Interrupts are disabled on entry. */
239 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
240 {
241 unsigned long l1, l2;
242 unsigned long masked_l1, masked_l2;
243 unsigned int l1i, l2i, port, count;
244 int irq;
245 unsigned int cpu = smp_processor_id();
246 shared_info_t *s = HYPERVISOR_shared_info;
247 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
250 do {
251 /* Avoid a callback storm when we reenable delivery. */
252 vcpu_info->evtchn_upcall_pending = 0;
254 /* Nested invocations bail immediately. */
255 if (unlikely(per_cpu(upcall_count, cpu)++))
256 return;
258 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
259 /* Clear master flag /before/ clearing selector flag. */
260 wmb();
261 #endif
262 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
264 l1i = per_cpu(last_processed_l1i, cpu);
265 l2i = per_cpu(last_processed_l2i, cpu);
267 while (l1 != 0) {
269 l1i = (l1i + 1) % BITS_PER_LONG;
270 masked_l1 = l1 & ((~0UL) << l1i);
272 if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
273 l1i = BITS_PER_LONG - 1;
274 l2i = BITS_PER_LONG - 1;
275 continue;
276 }
277 l1i = __ffs(masked_l1);
279 do {
280 l2 = active_evtchns(cpu, s, l1i);
282 l2i = (l2i + 1) % BITS_PER_LONG;
283 masked_l2 = l2 & ((~0UL) << l2i);
285 if (masked_l2 == 0) { /* if we masked out all events, move on */
286 l2i = BITS_PER_LONG - 1;
287 break;
288 }
290 l2i = __ffs(masked_l2);
292 /* process port */
293 port = (l1i * BITS_PER_LONG) + l2i;
294 if ((irq = evtchn_to_irq[port]) != -1)
295 do_IRQ(irq, regs);
296 else {
297 exit_idle();
298 evtchn_device_upcall(port);
299 }
301 /* if this is the final port processed, we'll pick up here+1 next time */
302 per_cpu(last_processed_l1i, cpu) = l1i;
303 per_cpu(last_processed_l2i, cpu) = l2i;
305 } while (l2i != BITS_PER_LONG - 1);
307 l2 = active_evtchns(cpu, s, l1i);
308 if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
309 l1 &= ~(1UL << l1i);
311 }
313 /* If there were nested callbacks then we have more to do. */
314 count = per_cpu(upcall_count, cpu);
315 per_cpu(upcall_count, cpu) = 0;
316 } while (unlikely(count != 1));
317 }
319 static int find_unbound_irq(void)
320 {
321 static int warned;
322 int dynirq, irq;
324 for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
325 irq = dynirq_to_irq(dynirq);
326 if (irq_bindcount[irq] == 0)
327 return irq;
328 }
330 if (!warned) {
331 warned = 1;
332 printk(KERN_WARNING "No available IRQ to bind to: "
333 "increase NR_DYNIRQS.\n");
334 }
336 return -ENOSPC;
337 }
339 static int bind_caller_port_to_irq(unsigned int caller_port)
340 {
341 int irq;
343 spin_lock(&irq_mapping_update_lock);
345 if ((irq = evtchn_to_irq[caller_port]) == -1) {
346 if ((irq = find_unbound_irq()) < 0)
347 goto out;
349 evtchn_to_irq[caller_port] = irq;
350 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
351 }
353 irq_bindcount[irq]++;
355 out:
356 spin_unlock(&irq_mapping_update_lock);
357 return irq;
358 }
360 static int bind_local_port_to_irq(unsigned int local_port)
361 {
362 int irq;
364 spin_lock(&irq_mapping_update_lock);
366 BUG_ON(evtchn_to_irq[local_port] != -1);
368 if ((irq = find_unbound_irq()) < 0) {
369 struct evtchn_close close = { .port = local_port };
370 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
371 BUG();
372 goto out;
373 }
375 evtchn_to_irq[local_port] = irq;
376 irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
377 irq_bindcount[irq]++;
379 out:
380 spin_unlock(&irq_mapping_update_lock);
381 return irq;
382 }
384 static int bind_listening_port_to_irq(unsigned int remote_domain)
385 {
386 struct evtchn_alloc_unbound alloc_unbound;
387 int err;
389 alloc_unbound.dom = DOMID_SELF;
390 alloc_unbound.remote_dom = remote_domain;
392 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
393 &alloc_unbound);
395 return err ? : bind_local_port_to_irq(alloc_unbound.port);
396 }
398 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
399 unsigned int remote_port)
400 {
401 struct evtchn_bind_interdomain bind_interdomain;
402 int err;
404 bind_interdomain.remote_dom = remote_domain;
405 bind_interdomain.remote_port = remote_port;
407 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
408 &bind_interdomain);
410 return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
411 }
413 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
414 {
415 struct evtchn_bind_virq bind_virq;
416 int evtchn, irq;
418 spin_lock(&irq_mapping_update_lock);
420 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
421 if ((irq = find_unbound_irq()) < 0)
422 goto out;
424 bind_virq.virq = virq;
425 bind_virq.vcpu = cpu;
426 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
427 &bind_virq) != 0)
428 BUG();
429 evtchn = bind_virq.port;
431 evtchn_to_irq[evtchn] = irq;
432 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
434 per_cpu(virq_to_irq, cpu)[virq] = irq;
436 bind_evtchn_to_cpu(evtchn, cpu);
437 }
439 irq_bindcount[irq]++;
441 out:
442 spin_unlock(&irq_mapping_update_lock);
443 return irq;
444 }
446 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
447 {
448 struct evtchn_bind_ipi bind_ipi;
449 int evtchn, irq;
451 spin_lock(&irq_mapping_update_lock);
453 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
454 if ((irq = find_unbound_irq()) < 0)
455 goto out;
457 bind_ipi.vcpu = cpu;
458 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
459 &bind_ipi) != 0)
460 BUG();
461 evtchn = bind_ipi.port;
463 evtchn_to_irq[evtchn] = irq;
464 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
466 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
468 bind_evtchn_to_cpu(evtchn, cpu);
469 }
471 irq_bindcount[irq]++;
473 out:
474 spin_unlock(&irq_mapping_update_lock);
475 return irq;
476 }
478 static void unbind_from_irq(unsigned int irq)
479 {
480 struct evtchn_close close;
481 unsigned int cpu;
482 int evtchn = evtchn_from_irq(irq);
484 spin_lock(&irq_mapping_update_lock);
486 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
487 close.port = evtchn;
488 if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
489 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
490 BUG();
492 switch (type_from_irq(irq)) {
493 case IRQT_VIRQ:
494 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
495 [index_from_irq(irq)] = -1;
496 break;
497 case IRQT_IPI:
498 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
499 [index_from_irq(irq)] = -1;
500 break;
501 default:
502 break;
503 }
505 /* Closed ports are implicitly re-bound to VCPU0. */
506 bind_evtchn_to_cpu(evtchn, 0);
508 evtchn_to_irq[evtchn] = -1;
509 irq_info[irq] = IRQ_UNBOUND;
511 /* Zap stats across IRQ changes of use. */
512 for_each_possible_cpu(cpu)
513 kstat_cpu(cpu).irqs[irq] = 0;
514 }
516 spin_unlock(&irq_mapping_update_lock);
517 }
519 int bind_caller_port_to_irqhandler(
520 unsigned int caller_port,
521 irqreturn_t (*handler)(int, void *, struct pt_regs *),
522 unsigned long irqflags,
523 const char *devname,
524 void *dev_id)
525 {
526 int irq, retval;
528 irq = bind_caller_port_to_irq(caller_port);
529 if (irq < 0)
530 return irq;
532 retval = request_irq(irq, handler, irqflags, devname, dev_id);
533 if (retval != 0) {
534 unbind_from_irq(irq);
535 return retval;
536 }
538 return irq;
539 }
540 EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
542 int bind_listening_port_to_irqhandler(
543 unsigned int remote_domain,
544 irqreturn_t (*handler)(int, void *, struct pt_regs *),
545 unsigned long irqflags,
546 const char *devname,
547 void *dev_id)
548 {
549 int irq, retval;
551 irq = bind_listening_port_to_irq(remote_domain);
552 if (irq < 0)
553 return irq;
555 retval = request_irq(irq, handler, irqflags, devname, dev_id);
556 if (retval != 0) {
557 unbind_from_irq(irq);
558 return retval;
559 }
561 return irq;
562 }
563 EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
565 int bind_interdomain_evtchn_to_irqhandler(
566 unsigned int remote_domain,
567 unsigned int remote_port,
568 irqreturn_t (*handler)(int, void *, struct pt_regs *),
569 unsigned long irqflags,
570 const char *devname,
571 void *dev_id)
572 {
573 int irq, retval;
575 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
576 if (irq < 0)
577 return irq;
579 retval = request_irq(irq, handler, irqflags, devname, dev_id);
580 if (retval != 0) {
581 unbind_from_irq(irq);
582 return retval;
583 }
585 return irq;
586 }
587 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
589 int bind_virq_to_irqhandler(
590 unsigned int virq,
591 unsigned int cpu,
592 irqreturn_t (*handler)(int, void *, struct pt_regs *),
593 unsigned long irqflags,
594 const char *devname,
595 void *dev_id)
596 {
597 int irq, retval;
599 irq = bind_virq_to_irq(virq, cpu);
600 if (irq < 0)
601 return irq;
603 retval = request_irq(irq, handler, irqflags, devname, dev_id);
604 if (retval != 0) {
605 unbind_from_irq(irq);
606 return retval;
607 }
609 return irq;
610 }
611 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
613 int bind_ipi_to_irqhandler(
614 unsigned int ipi,
615 unsigned int cpu,
616 irqreturn_t (*handler)(int, void *, struct pt_regs *),
617 unsigned long irqflags,
618 const char *devname,
619 void *dev_id)
620 {
621 int irq, retval;
623 irq = bind_ipi_to_irq(ipi, cpu);
624 if (irq < 0)
625 return irq;
627 retval = request_irq(irq, handler, irqflags, devname, dev_id);
628 if (retval != 0) {
629 unbind_from_irq(irq);
630 return retval;
631 }
633 return irq;
634 }
635 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
637 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
638 {
639 free_irq(irq, dev_id);
640 unbind_from_irq(irq);
641 }
642 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
644 #ifdef CONFIG_SMP
645 void rebind_evtchn_to_cpu(int port, unsigned int cpu)
646 {
647 struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
648 int masked;
650 masked = test_and_set_evtchn_mask(port);
651 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv) == 0)
652 bind_evtchn_to_cpu(port, cpu);
653 if (!masked)
654 unmask_evtchn(port);
655 }
657 static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
658 {
659 int evtchn = evtchn_from_irq(irq);
661 if (VALID_EVTCHN(evtchn))
662 rebind_evtchn_to_cpu(evtchn, tcpu);
663 }
665 static void set_affinity_irq(unsigned int irq, cpumask_t dest)
666 {
667 unsigned tcpu = first_cpu(dest);
668 rebind_irq_to_cpu(irq, tcpu);
669 }
670 #endif
672 int resend_irq_on_evtchn(unsigned int irq)
673 {
674 int masked, evtchn = evtchn_from_irq(irq);
675 shared_info_t *s = HYPERVISOR_shared_info;
677 if (!VALID_EVTCHN(evtchn))
678 return 1;
680 masked = test_and_set_evtchn_mask(evtchn);
681 synch_set_bit(evtchn, s->evtchn_pending);
682 if (!masked)
683 unmask_evtchn(evtchn);
685 return 1;
686 }
688 /*
689 * Interface to generic handling in irq.c
690 */
692 static unsigned int startup_dynirq(unsigned int irq)
693 {
694 int evtchn = evtchn_from_irq(irq);
696 if (VALID_EVTCHN(evtchn))
697 unmask_evtchn(evtchn);
698 return 0;
699 }
701 static void shutdown_dynirq(unsigned int irq)
702 {
703 int evtchn = evtchn_from_irq(irq);
705 if (VALID_EVTCHN(evtchn))
706 mask_evtchn(evtchn);
707 }
709 static void enable_dynirq(unsigned int irq)
710 {
711 int evtchn = evtchn_from_irq(irq);
713 if (VALID_EVTCHN(evtchn))
714 unmask_evtchn(evtchn);
715 }
717 static void disable_dynirq(unsigned int irq)
718 {
719 int evtchn = evtchn_from_irq(irq);
721 if (VALID_EVTCHN(evtchn))
722 mask_evtchn(evtchn);
723 }
725 static void ack_dynirq(unsigned int irq)
726 {
727 int evtchn = evtchn_from_irq(irq);
729 move_native_irq(irq);
731 if (VALID_EVTCHN(evtchn)) {
732 mask_evtchn(evtchn);
733 clear_evtchn(evtchn);
734 }
735 }
737 static void end_dynirq(unsigned int irq)
738 {
739 int evtchn = evtchn_from_irq(irq);
741 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
742 unmask_evtchn(evtchn);
743 }
745 static struct hw_interrupt_type dynirq_type = {
746 .typename = "Dynamic-irq",
747 .startup = startup_dynirq,
748 .shutdown = shutdown_dynirq,
749 .enable = enable_dynirq,
750 .disable = disable_dynirq,
751 .ack = ack_dynirq,
752 .end = end_dynirq,
753 #ifdef CONFIG_SMP
754 .set_affinity = set_affinity_irq,
755 #endif
756 .retrigger = resend_irq_on_evtchn,
757 };
759 void evtchn_register_pirq(int irq)
760 {
761 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0);
762 }
764 #ifndef CONFIG_X86_IO_APIC
765 #undef IO_APIC_IRQ
766 #define IO_APIC_IRQ(irq) ((irq) >= pirq_to_irq(16))
767 #endif
769 int evtchn_map_pirq(int irq, int xen_pirq)
770 {
771 if (irq < 0) {
772 static DEFINE_SPINLOCK(irq_alloc_lock);
774 irq = pirq_to_irq(NR_PIRQS - 1);
775 spin_lock(&irq_alloc_lock);
776 do {
777 if (!IO_APIC_IRQ(irq))
778 continue;
779 if (!index_from_irq(irq)) {
780 BUG_ON(type_from_irq(irq) != IRQT_UNBOUND);
781 irq_info[irq] = mk_irq_info(IRQT_PIRQ,
782 xen_pirq, 0);
783 break;
784 }
785 } while (--irq);
786 spin_unlock(&irq_alloc_lock);
787 if (irq < pirq_to_irq(16))
788 return -ENOSPC;
789 } else if (!xen_pirq) {
790 if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
791 return -EINVAL;
792 irq_info[irq] = IRQ_UNBOUND;
793 return 0;
794 } else if (type_from_irq(irq) != IRQT_PIRQ
795 || index_from_irq(irq) != xen_pirq) {
796 printk(KERN_ERR "IRQ#%d is already mapped to %d:%u - "
797 "cannot map to PIRQ#%u\n",
798 irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
799 return -EINVAL;
800 }
801 return index_from_irq(irq) ? irq : -EINVAL;
802 }
804 int evtchn_get_xen_pirq(int irq)
805 {
806 if (!IO_APIC_IRQ(irq))
807 return irq;
808 if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
809 return 0;
810 return index_from_irq(irq);
811 }
813 static inline void pirq_unmask_notify(int pirq)
814 {
815 struct physdev_eoi eoi = { .irq = pirq };
816 if (unlikely(test_bit(pirq, pirq_needs_eoi)))
817 VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
818 }
820 static inline void pirq_query_unmask(int pirq)
821 {
822 struct physdev_irq_status_query irq_status;
823 irq_status.irq = pirq;
824 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
825 irq_status.flags = 0;
826 clear_bit(pirq, pirq_needs_eoi);
827 if (irq_status.flags & XENIRQSTAT_needs_eoi)
828 set_bit(pirq, pirq_needs_eoi);
829 }
831 /*
832 * On startup, if there is no action associated with the IRQ then we are
833 * probing. In this case we should not share with others as it will confuse us.
834 */
835 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
837 static unsigned int startup_pirq(unsigned int irq)
838 {
839 struct evtchn_bind_pirq bind_pirq;
840 int evtchn = evtchn_from_irq(irq);
842 if (VALID_EVTCHN(evtchn))
843 goto out;
845 bind_pirq.pirq = evtchn_get_xen_pirq(irq);
846 /* NB. We are happy to share unless we are probing. */
847 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
848 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
849 if (!probing_irq(irq))
850 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
851 irq);
852 return 0;
853 }
854 evtchn = bind_pirq.port;
856 pirq_query_unmask(irq_to_pirq(irq));
858 evtchn_to_irq[evtchn] = irq;
859 bind_evtchn_to_cpu(evtchn, 0);
860 irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
862 out:
863 unmask_evtchn(evtchn);
864 pirq_unmask_notify(irq_to_pirq(irq));
866 return 0;
867 }
869 static void shutdown_pirq(unsigned int irq)
870 {
871 struct evtchn_close close;
872 int evtchn = evtchn_from_irq(irq);
874 if (!VALID_EVTCHN(evtchn))
875 return;
877 mask_evtchn(evtchn);
879 close.port = evtchn;
880 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
881 BUG();
883 bind_evtchn_to_cpu(evtchn, 0);
884 evtchn_to_irq[evtchn] = -1;
885 irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0);
886 }
888 static void enable_pirq(unsigned int irq)
889 {
890 startup_pirq(irq);
891 }
893 static void disable_pirq(unsigned int irq)
894 {
895 }
897 static void ack_pirq(unsigned int irq)
898 {
899 int evtchn = evtchn_from_irq(irq);
901 move_native_irq(irq);
903 if (VALID_EVTCHN(evtchn)) {
904 mask_evtchn(evtchn);
905 clear_evtchn(evtchn);
906 }
907 }
909 static void end_pirq(unsigned int irq)
910 {
911 int evtchn = evtchn_from_irq(irq);
913 if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
914 (IRQ_DISABLED|IRQ_PENDING)) {
915 shutdown_pirq(irq);
916 } else if (VALID_EVTCHN(evtchn)) {
917 unmask_evtchn(evtchn);
918 pirq_unmask_notify(irq_to_pirq(irq));
919 }
920 }
922 static struct hw_interrupt_type pirq_type = {
923 .typename = "Phys-irq",
924 .startup = startup_pirq,
925 .shutdown = shutdown_pirq,
926 .enable = enable_pirq,
927 .disable = disable_pirq,
928 .ack = ack_pirq,
929 .end = end_pirq,
930 #ifdef CONFIG_SMP
931 .set_affinity = set_affinity_irq,
932 #endif
933 .retrigger = resend_irq_on_evtchn,
934 };
936 int irq_ignore_unhandled(unsigned int irq)
937 {
938 struct physdev_irq_status_query irq_status = { .irq = irq };
940 if (!is_running_on_xen())
941 return 0;
943 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
944 return 0;
945 return !!(irq_status.flags & XENIRQSTAT_shared);
946 }
948 void notify_remote_via_irq(int irq)
949 {
950 int evtchn = evtchn_from_irq(irq);
952 if (VALID_EVTCHN(evtchn))
953 notify_remote_via_evtchn(evtchn);
954 }
955 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
957 int irq_to_evtchn_port(int irq)
958 {
959 return evtchn_from_irq(irq);
960 }
961 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
963 void mask_evtchn(int port)
964 {
965 shared_info_t *s = HYPERVISOR_shared_info;
966 synch_set_bit(port, s->evtchn_mask);
967 }
968 EXPORT_SYMBOL_GPL(mask_evtchn);
970 void unmask_evtchn(int port)
971 {
972 shared_info_t *s = HYPERVISOR_shared_info;
973 unsigned int cpu = smp_processor_id();
974 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
976 BUG_ON(!irqs_disabled());
978 /* Slow path (hypercall) if this is a non-local port. */
979 if (unlikely(cpu != cpu_from_evtchn(port))) {
980 struct evtchn_unmask unmask = { .port = port };
981 VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
982 return;
983 }
985 synch_clear_bit(port, s->evtchn_mask);
987 /* Did we miss an interrupt 'edge'? Re-fire if so. */
988 if (synch_test_bit(port, s->evtchn_pending) &&
989 !synch_test_and_set_bit(port / BITS_PER_LONG,
990 &vcpu_info->evtchn_pending_sel))
991 vcpu_info->evtchn_upcall_pending = 1;
992 }
993 EXPORT_SYMBOL_GPL(unmask_evtchn);
995 void disable_all_local_evtchn(void)
996 {
997 unsigned i, cpu = smp_processor_id();
998 shared_info_t *s = HYPERVISOR_shared_info;
1000 for (i = 0; i < NR_EVENT_CHANNELS; ++i)
1001 if (cpu_from_evtchn(i) == cpu)
1002 synch_set_bit(i, &s->evtchn_mask[0]);
1005 static void restore_cpu_virqs(unsigned int cpu)
1007 struct evtchn_bind_virq bind_virq;
1008 int virq, irq, evtchn;
1010 for (virq = 0; virq < NR_VIRQS; virq++) {
1011 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1012 continue;
1014 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
1016 /* Get a new binding from Xen. */
1017 bind_virq.virq = virq;
1018 bind_virq.vcpu = cpu;
1019 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1020 &bind_virq) != 0)
1021 BUG();
1022 evtchn = bind_virq.port;
1024 /* Record the new mapping. */
1025 evtchn_to_irq[evtchn] = irq;
1026 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1027 bind_evtchn_to_cpu(evtchn, cpu);
1029 /* Ready for use. */
1030 unmask_evtchn(evtchn);
1034 static void restore_cpu_ipis(unsigned int cpu)
1036 struct evtchn_bind_ipi bind_ipi;
1037 int ipi, irq, evtchn;
1039 for (ipi = 0; ipi < NR_IPIS; ipi++) {
1040 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1041 continue;
1043 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
1045 /* Get a new binding from Xen. */
1046 bind_ipi.vcpu = cpu;
1047 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1048 &bind_ipi) != 0)
1049 BUG();
1050 evtchn = bind_ipi.port;
1052 /* Record the new mapping. */
1053 evtchn_to_irq[evtchn] = irq;
1054 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1055 bind_evtchn_to_cpu(evtchn, cpu);
1057 /* Ready for use. */
1058 unmask_evtchn(evtchn);
1063 void irq_resume(void)
1065 unsigned int cpu, pirq, irq, evtchn;
1067 init_evtchn_cpu_bindings();
1069 /* New event-channel space is not 'live' yet. */
1070 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1071 mask_evtchn(evtchn);
1073 /* Check that no PIRQs are still bound. */
1074 for (pirq = 0; pirq < NR_PIRQS; pirq++)
1075 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
1077 /* No IRQ <-> event-channel mappings. */
1078 for (irq = 0; irq < NR_IRQS; irq++)
1079 irq_info[irq] &= ~((1U << _EVTCHN_BITS) - 1);
1080 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1081 evtchn_to_irq[evtchn] = -1;
1083 for_each_possible_cpu(cpu) {
1084 restore_cpu_virqs(cpu);
1085 restore_cpu_ipis(cpu);
1090 void __init xen_init_IRQ(void)
1092 unsigned int i;
1094 init_evtchn_cpu_bindings();
1096 /* No event channels are 'live' right now. */
1097 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1098 mask_evtchn(i);
1100 /* No IRQ -> event-channel mappings. */
1101 for (i = 0; i < NR_IRQS; i++)
1102 irq_info[i] = IRQ_UNBOUND;
1104 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1105 for (i = 0; i < NR_DYNIRQS; i++) {
1106 irq_bindcount[dynirq_to_irq(i)] = 0;
1108 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
1109 irq_desc[dynirq_to_irq(i)].action = NULL;
1110 irq_desc[dynirq_to_irq(i)].depth = 1;
1111 irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
1114 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
1115 for (i = 0; i < NR_PIRQS; i++) {
1116 irq_bindcount[pirq_to_irq(i)] = 1;
1118 #ifdef RTC_IRQ
1119 /* If not domain 0, force our RTC driver to fail its probe. */
1120 if ((i == RTC_IRQ) && !is_initial_xendomain())
1121 continue;
1122 #endif
1124 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
1125 irq_desc[pirq_to_irq(i)].action = NULL;
1126 irq_desc[pirq_to_irq(i)].depth = 1;
1127 irq_desc[pirq_to_irq(i)].chip = &pirq_type;