ia64/linux-2.6.18-xen.hg

view drivers/xen/core/evtchn.c @ 671:8ca4d2b16eb3

ia64, evtchn: fix to 669:0ba49e9e74f9.

To keep the previous behavior of event channel for ia64,
set identity_mapped_irq() true.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Sep 16 13:10:39 2008 +0100 (2008-09-16)
parents 0ba49e9e74f9
children 39a4dda735d3
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/module.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/sched.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/version.h>
39 #include <asm/atomic.h>
40 #include <asm/system.h>
41 #include <asm/ptrace.h>
42 #include <asm/synch_bitops.h>
43 #include <xen/evtchn.h>
44 #include <xen/interface/event_channel.h>
45 #include <xen/interface/physdev.h>
46 #include <asm/hypervisor.h>
47 #include <linux/mc146818rtc.h> /* RTC_IRQ */
49 /*
50 * This lock protects updates to the following mapping and reference-count
51 * arrays. The lock does not need to be acquired to read the mapping tables.
52 */
53 static DEFINE_SPINLOCK(irq_mapping_update_lock);
55 /* IRQ <-> event-channel mappings. */
56 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
57 [0 ... NR_EVENT_CHANNELS-1] = -1 };
59 /* Packed IRQ information: binding type, sub-type index, and event channel. */
60 static u32 irq_info[NR_IRQS];
62 /* Binding types. */
63 enum {
64 IRQT_UNBOUND,
65 IRQT_PIRQ,
66 IRQT_VIRQ,
67 IRQT_IPI,
68 IRQT_LOCAL_PORT,
69 IRQT_CALLER_PORT,
70 _IRQT_COUNT
71 };
73 #define _IRQT_BITS 4
74 #define _EVTCHN_BITS 12
75 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
77 /* Constructor for packed IRQ information. */
78 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
79 {
80 BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
82 BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
83 BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
84 BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
85 BUG_ON(index >> _INDEX_BITS);
87 BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
89 return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
90 }
92 /* Convenient shorthand for packed representation of an unbound IRQ. */
93 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
95 /*
96 * Accessors for packed IRQ information.
97 */
99 static inline unsigned int evtchn_from_irq(int irq)
100 {
101 return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
102 }
104 static inline unsigned int index_from_irq(int irq)
105 {
106 return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
107 }
109 static inline unsigned int type_from_irq(int irq)
110 {
111 return irq_info[irq] >> (32 - _IRQT_BITS);
112 }
114 /* IRQ <-> VIRQ mapping. */
115 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
117 /* IRQ <-> IPI mapping. */
118 #ifndef NR_IPIS
119 #define NR_IPIS 1
120 #endif
121 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
123 /* Reference counts for bindings to IRQs. */
124 static int irq_bindcount[NR_IRQS];
126 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
127 static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
129 #ifdef CONFIG_SMP
131 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
132 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
134 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
135 unsigned int idx)
136 {
137 return (sh->evtchn_pending[idx] &
138 cpu_evtchn_mask[cpu][idx] &
139 ~sh->evtchn_mask[idx]);
140 }
142 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
143 {
144 shared_info_t *s = HYPERVISOR_shared_info;
145 int irq = evtchn_to_irq[chn];
147 BUG_ON(!test_bit(chn, s->evtchn_mask));
149 if (irq != -1)
150 set_native_irq_info(irq, cpumask_of_cpu(cpu));
152 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
153 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
154 cpu_evtchn[chn] = cpu;
155 }
157 static void init_evtchn_cpu_bindings(void)
158 {
159 int i;
161 /* By default all event channels notify CPU#0. */
162 for (i = 0; i < NR_IRQS; i++)
163 set_native_irq_info(i, cpumask_of_cpu(0));
165 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
166 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
167 }
169 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
170 {
171 return cpu_evtchn[evtchn];
172 }
174 #else
176 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
177 unsigned int idx)
178 {
179 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
180 }
182 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
183 {
184 }
186 static void init_evtchn_cpu_bindings(void)
187 {
188 }
190 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
191 {
192 return 0;
193 }
195 #endif
197 /* Upcall to generic IRQ layer. */
198 #ifdef CONFIG_X86
199 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
200 void __init xen_init_IRQ(void);
201 void __init init_IRQ(void)
202 {
203 irq_ctx_init(0);
204 xen_init_IRQ();
205 }
206 #if defined (__i386__)
207 static inline void exit_idle(void) {}
208 #define IRQ_REG orig_eax
209 #elif defined (__x86_64__)
210 #include <asm/idle.h>
211 #define IRQ_REG orig_rax
212 #endif
213 #define do_IRQ(irq, regs) do { \
214 (regs)->IRQ_REG = ~(irq); \
215 do_IRQ((regs)); \
216 } while (0)
217 #endif
219 /* Xen will never allocate port zero for any purpose. */
220 #define VALID_EVTCHN(chn) ((chn) != 0)
222 /*
223 * Force a proper event-channel callback from Xen after clearing the
224 * callback mask. We do this in a very simple manner, by making a call
225 * down into Xen. The pending flag will be checked by Xen on return.
226 */
227 void force_evtchn_callback(void)
228 {
229 VOID(HYPERVISOR_xen_version(0, NULL));
230 }
231 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
232 EXPORT_SYMBOL(force_evtchn_callback);
234 static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
235 static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 };
236 static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 };
238 /* NB. Interrupts are disabled on entry. */
239 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
240 {
241 unsigned long l1, l2;
242 unsigned long masked_l1, masked_l2;
243 unsigned int l1i, l2i, port, count;
244 int irq;
245 unsigned int cpu = smp_processor_id();
246 shared_info_t *s = HYPERVISOR_shared_info;
247 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
250 do {
251 /* Avoid a callback storm when we reenable delivery. */
252 vcpu_info->evtchn_upcall_pending = 0;
254 /* Nested invocations bail immediately. */
255 if (unlikely(per_cpu(upcall_count, cpu)++))
256 return;
258 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
259 /* Clear master flag /before/ clearing selector flag. */
260 wmb();
261 #endif
262 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
264 l1i = per_cpu(last_processed_l1i, cpu);
265 l2i = per_cpu(last_processed_l2i, cpu);
267 while (l1 != 0) {
269 l1i = (l1i + 1) % BITS_PER_LONG;
270 masked_l1 = l1 & ((~0UL) << l1i);
272 if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
273 l1i = BITS_PER_LONG - 1;
274 l2i = BITS_PER_LONG - 1;
275 continue;
276 }
277 l1i = __ffs(masked_l1);
279 do {
280 l2 = active_evtchns(cpu, s, l1i);
282 l2i = (l2i + 1) % BITS_PER_LONG;
283 masked_l2 = l2 & ((~0UL) << l2i);
285 if (masked_l2 == 0) { /* if we masked out all events, move on */
286 l2i = BITS_PER_LONG - 1;
287 break;
288 }
290 l2i = __ffs(masked_l2);
292 /* process port */
293 port = (l1i * BITS_PER_LONG) + l2i;
294 if ((irq = evtchn_to_irq[port]) != -1)
295 do_IRQ(irq, regs);
296 else {
297 exit_idle();
298 evtchn_device_upcall(port);
299 }
301 /* if this is the final port processed, we'll pick up here+1 next time */
302 per_cpu(last_processed_l1i, cpu) = l1i;
303 per_cpu(last_processed_l2i, cpu) = l2i;
305 } while (l2i != BITS_PER_LONG - 1);
307 l2 = active_evtchns(cpu, s, l1i);
308 if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
309 l1 &= ~(1UL << l1i);
311 }
313 /* If there were nested callbacks then we have more to do. */
314 count = per_cpu(upcall_count, cpu);
315 per_cpu(upcall_count, cpu) = 0;
316 } while (unlikely(count != 1));
317 }
319 static int find_unbound_irq(void)
320 {
321 static int warned;
322 int irq;
324 for (irq = DYNIRQ_BASE; irq < (DYNIRQ_BASE + NR_DYNIRQS); irq++)
325 if (irq_bindcount[irq] == 0)
326 return irq;
328 if (!warned) {
329 warned = 1;
330 printk(KERN_WARNING "No available IRQ to bind to: "
331 "increase NR_DYNIRQS.\n");
332 }
334 return -ENOSPC;
335 }
337 static int bind_caller_port_to_irq(unsigned int caller_port)
338 {
339 int irq;
341 spin_lock(&irq_mapping_update_lock);
343 if ((irq = evtchn_to_irq[caller_port]) == -1) {
344 if ((irq = find_unbound_irq()) < 0)
345 goto out;
347 evtchn_to_irq[caller_port] = irq;
348 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
349 }
351 irq_bindcount[irq]++;
353 out:
354 spin_unlock(&irq_mapping_update_lock);
355 return irq;
356 }
358 static int bind_local_port_to_irq(unsigned int local_port)
359 {
360 int irq;
362 spin_lock(&irq_mapping_update_lock);
364 BUG_ON(evtchn_to_irq[local_port] != -1);
366 if ((irq = find_unbound_irq()) < 0) {
367 struct evtchn_close close = { .port = local_port };
368 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
369 BUG();
370 goto out;
371 }
373 evtchn_to_irq[local_port] = irq;
374 irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
375 irq_bindcount[irq]++;
377 out:
378 spin_unlock(&irq_mapping_update_lock);
379 return irq;
380 }
382 static int bind_listening_port_to_irq(unsigned int remote_domain)
383 {
384 struct evtchn_alloc_unbound alloc_unbound;
385 int err;
387 alloc_unbound.dom = DOMID_SELF;
388 alloc_unbound.remote_dom = remote_domain;
390 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
391 &alloc_unbound);
393 return err ? : bind_local_port_to_irq(alloc_unbound.port);
394 }
396 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
397 unsigned int remote_port)
398 {
399 struct evtchn_bind_interdomain bind_interdomain;
400 int err;
402 bind_interdomain.remote_dom = remote_domain;
403 bind_interdomain.remote_port = remote_port;
405 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
406 &bind_interdomain);
408 return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
409 }
411 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
412 {
413 struct evtchn_bind_virq bind_virq;
414 int evtchn, irq;
416 spin_lock(&irq_mapping_update_lock);
418 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
419 if ((irq = find_unbound_irq()) < 0)
420 goto out;
422 bind_virq.virq = virq;
423 bind_virq.vcpu = cpu;
424 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
425 &bind_virq) != 0)
426 BUG();
427 evtchn = bind_virq.port;
429 evtchn_to_irq[evtchn] = irq;
430 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
432 per_cpu(virq_to_irq, cpu)[virq] = irq;
434 bind_evtchn_to_cpu(evtchn, cpu);
435 }
437 irq_bindcount[irq]++;
439 out:
440 spin_unlock(&irq_mapping_update_lock);
441 return irq;
442 }
444 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
445 {
446 struct evtchn_bind_ipi bind_ipi;
447 int evtchn, irq;
449 spin_lock(&irq_mapping_update_lock);
451 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
452 if ((irq = find_unbound_irq()) < 0)
453 goto out;
455 bind_ipi.vcpu = cpu;
456 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
457 &bind_ipi) != 0)
458 BUG();
459 evtchn = bind_ipi.port;
461 evtchn_to_irq[evtchn] = irq;
462 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
464 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
466 bind_evtchn_to_cpu(evtchn, cpu);
467 }
469 irq_bindcount[irq]++;
471 out:
472 spin_unlock(&irq_mapping_update_lock);
473 return irq;
474 }
476 static void unbind_from_irq(unsigned int irq)
477 {
478 struct evtchn_close close;
479 unsigned int cpu;
480 int evtchn = evtchn_from_irq(irq);
482 spin_lock(&irq_mapping_update_lock);
484 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
485 close.port = evtchn;
486 if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
487 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
488 BUG();
490 switch (type_from_irq(irq)) {
491 case IRQT_VIRQ:
492 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
493 [index_from_irq(irq)] = -1;
494 break;
495 case IRQT_IPI:
496 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
497 [index_from_irq(irq)] = -1;
498 break;
499 default:
500 break;
501 }
503 /* Closed ports are implicitly re-bound to VCPU0. */
504 bind_evtchn_to_cpu(evtchn, 0);
506 evtchn_to_irq[evtchn] = -1;
507 irq_info[irq] = IRQ_UNBOUND;
509 /* Zap stats across IRQ changes of use. */
510 for_each_possible_cpu(cpu)
511 kstat_cpu(cpu).irqs[irq] = 0;
512 }
514 spin_unlock(&irq_mapping_update_lock);
515 }
517 int bind_caller_port_to_irqhandler(
518 unsigned int caller_port,
519 irqreturn_t (*handler)(int, void *, struct pt_regs *),
520 unsigned long irqflags,
521 const char *devname,
522 void *dev_id)
523 {
524 int irq, retval;
526 irq = bind_caller_port_to_irq(caller_port);
527 if (irq < 0)
528 return irq;
530 retval = request_irq(irq, handler, irqflags, devname, dev_id);
531 if (retval != 0) {
532 unbind_from_irq(irq);
533 return retval;
534 }
536 return irq;
537 }
538 EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
540 int bind_listening_port_to_irqhandler(
541 unsigned int remote_domain,
542 irqreturn_t (*handler)(int, void *, struct pt_regs *),
543 unsigned long irqflags,
544 const char *devname,
545 void *dev_id)
546 {
547 int irq, retval;
549 irq = bind_listening_port_to_irq(remote_domain);
550 if (irq < 0)
551 return irq;
553 retval = request_irq(irq, handler, irqflags, devname, dev_id);
554 if (retval != 0) {
555 unbind_from_irq(irq);
556 return retval;
557 }
559 return irq;
560 }
561 EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
563 int bind_interdomain_evtchn_to_irqhandler(
564 unsigned int remote_domain,
565 unsigned int remote_port,
566 irqreturn_t (*handler)(int, void *, struct pt_regs *),
567 unsigned long irqflags,
568 const char *devname,
569 void *dev_id)
570 {
571 int irq, retval;
573 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
574 if (irq < 0)
575 return irq;
577 retval = request_irq(irq, handler, irqflags, devname, dev_id);
578 if (retval != 0) {
579 unbind_from_irq(irq);
580 return retval;
581 }
583 return irq;
584 }
585 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
587 int bind_virq_to_irqhandler(
588 unsigned int virq,
589 unsigned int cpu,
590 irqreturn_t (*handler)(int, void *, struct pt_regs *),
591 unsigned long irqflags,
592 const char *devname,
593 void *dev_id)
594 {
595 int irq, retval;
597 irq = bind_virq_to_irq(virq, cpu);
598 if (irq < 0)
599 return irq;
601 retval = request_irq(irq, handler, irqflags, devname, dev_id);
602 if (retval != 0) {
603 unbind_from_irq(irq);
604 return retval;
605 }
607 return irq;
608 }
609 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
611 int bind_ipi_to_irqhandler(
612 unsigned int ipi,
613 unsigned int cpu,
614 irqreturn_t (*handler)(int, void *, struct pt_regs *),
615 unsigned long irqflags,
616 const char *devname,
617 void *dev_id)
618 {
619 int irq, retval;
621 irq = bind_ipi_to_irq(ipi, cpu);
622 if (irq < 0)
623 return irq;
625 retval = request_irq(irq, handler, irqflags, devname, dev_id);
626 if (retval != 0) {
627 unbind_from_irq(irq);
628 return retval;
629 }
631 return irq;
632 }
633 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
635 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
636 {
637 free_irq(irq, dev_id);
638 unbind_from_irq(irq);
639 }
640 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
642 #ifdef CONFIG_SMP
643 void rebind_evtchn_to_cpu(int port, unsigned int cpu)
644 {
645 struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
646 int masked;
648 masked = test_and_set_evtchn_mask(port);
649 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv) == 0)
650 bind_evtchn_to_cpu(port, cpu);
651 if (!masked)
652 unmask_evtchn(port);
653 }
655 static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
656 {
657 int evtchn = evtchn_from_irq(irq);
659 if (VALID_EVTCHN(evtchn))
660 rebind_evtchn_to_cpu(evtchn, tcpu);
661 }
663 static void set_affinity_irq(unsigned int irq, cpumask_t dest)
664 {
665 unsigned tcpu = first_cpu(dest);
666 rebind_irq_to_cpu(irq, tcpu);
667 }
668 #endif
670 int resend_irq_on_evtchn(unsigned int irq)
671 {
672 int masked, evtchn = evtchn_from_irq(irq);
673 shared_info_t *s = HYPERVISOR_shared_info;
675 if (!VALID_EVTCHN(evtchn))
676 return 1;
678 masked = test_and_set_evtchn_mask(evtchn);
679 synch_set_bit(evtchn, s->evtchn_pending);
680 if (!masked)
681 unmask_evtchn(evtchn);
683 return 1;
684 }
686 /*
687 * Interface to generic handling in irq.c
688 */
690 static unsigned int startup_dynirq(unsigned int irq)
691 {
692 int evtchn = evtchn_from_irq(irq);
694 if (VALID_EVTCHN(evtchn))
695 unmask_evtchn(evtchn);
696 return 0;
697 }
699 static void shutdown_dynirq(unsigned int irq)
700 {
701 int evtchn = evtchn_from_irq(irq);
703 if (VALID_EVTCHN(evtchn))
704 mask_evtchn(evtchn);
705 }
707 static void enable_dynirq(unsigned int irq)
708 {
709 int evtchn = evtchn_from_irq(irq);
711 if (VALID_EVTCHN(evtchn))
712 unmask_evtchn(evtchn);
713 }
715 static void disable_dynirq(unsigned int irq)
716 {
717 int evtchn = evtchn_from_irq(irq);
719 if (VALID_EVTCHN(evtchn))
720 mask_evtchn(evtchn);
721 }
723 static void ack_dynirq(unsigned int irq)
724 {
725 int evtchn = evtchn_from_irq(irq);
727 move_native_irq(irq);
729 if (VALID_EVTCHN(evtchn)) {
730 mask_evtchn(evtchn);
731 clear_evtchn(evtchn);
732 }
733 }
735 static void end_dynirq(unsigned int irq)
736 {
737 int evtchn = evtchn_from_irq(irq);
739 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
740 unmask_evtchn(evtchn);
741 }
743 static struct hw_interrupt_type dynirq_type = {
744 .typename = "Dynamic-irq",
745 .startup = startup_dynirq,
746 .shutdown = shutdown_dynirq,
747 .enable = enable_dynirq,
748 .disable = disable_dynirq,
749 .ack = ack_dynirq,
750 .end = end_dynirq,
751 #ifdef CONFIG_SMP
752 .set_affinity = set_affinity_irq,
753 #endif
754 .retrigger = resend_irq_on_evtchn,
755 };
757 void evtchn_register_pirq(int irq)
758 {
759 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0);
760 }
762 #if defined(CONFIG_X86_IO_APIC)
763 #define identity_mapped_irq(irq) (!IO_APIC_IRQ((irq) - PIRQ_BASE))
764 #elif defined(CONFIG_X86)
765 #define identity_mapped_irq(irq) (((irq) - PIRQ_BASE) < 16)
766 #else
767 #define identity_mapped_irq(irq) (1)
768 #endif
770 int evtchn_map_pirq(int irq, int xen_pirq)
771 {
772 if (irq < 0) {
773 static DEFINE_SPINLOCK(irq_alloc_lock);
775 irq = PIRQ_BASE + NR_PIRQS - 1;
776 spin_lock(&irq_alloc_lock);
777 do {
778 if (identity_mapped_irq(irq))
779 continue;
780 if (!index_from_irq(irq)) {
781 BUG_ON(type_from_irq(irq) != IRQT_UNBOUND);
782 irq_info[irq] = mk_irq_info(IRQT_PIRQ,
783 xen_pirq, 0);
784 break;
785 }
786 } while (--irq >= PIRQ_BASE);
787 spin_unlock(&irq_alloc_lock);
788 if (irq < PIRQ_BASE)
789 return -ENOSPC;
790 } else if (!xen_pirq) {
791 if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
792 return -EINVAL;
793 irq_info[irq] = IRQ_UNBOUND;
794 return 0;
795 } else if (type_from_irq(irq) != IRQT_PIRQ
796 || index_from_irq(irq) != xen_pirq) {
797 printk(KERN_ERR "IRQ#%d is already mapped to %d:%u - "
798 "cannot map to PIRQ#%u\n",
799 irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
800 return -EINVAL;
801 }
802 return index_from_irq(irq) ? irq : -EINVAL;
803 }
805 int evtchn_get_xen_pirq(int irq)
806 {
807 if (identity_mapped_irq(irq))
808 return irq;
809 BUG_ON(type_from_irq(irq) != IRQT_PIRQ);
810 return index_from_irq(irq);
811 }
813 static inline void pirq_unmask_notify(int irq)
814 {
815 struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
816 if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
817 VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
818 }
820 static inline void pirq_query_unmask(int irq)
821 {
822 struct physdev_irq_status_query irq_status;
823 irq_status.irq = evtchn_get_xen_pirq(irq);
824 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
825 irq_status.flags = 0;
826 clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
827 if (irq_status.flags & XENIRQSTAT_needs_eoi)
828 set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
829 }
831 /*
832 * On startup, if there is no action associated with the IRQ then we are
833 * probing. In this case we should not share with others as it will confuse us.
834 */
835 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
837 static unsigned int startup_pirq(unsigned int irq)
838 {
839 struct evtchn_bind_pirq bind_pirq;
840 int evtchn = evtchn_from_irq(irq);
842 if (VALID_EVTCHN(evtchn))
843 goto out;
845 bind_pirq.pirq = evtchn_get_xen_pirq(irq);
846 /* NB. We are happy to share unless we are probing. */
847 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
848 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
849 if (!probing_irq(irq))
850 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
851 irq);
852 return 0;
853 }
854 evtchn = bind_pirq.port;
856 pirq_query_unmask(irq);
858 evtchn_to_irq[evtchn] = irq;
859 bind_evtchn_to_cpu(evtchn, 0);
860 irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
862 out:
863 unmask_evtchn(evtchn);
864 pirq_unmask_notify(irq);
866 return 0;
867 }
869 static void shutdown_pirq(unsigned int irq)
870 {
871 struct evtchn_close close;
872 int evtchn = evtchn_from_irq(irq);
874 if (!VALID_EVTCHN(evtchn))
875 return;
877 mask_evtchn(evtchn);
879 close.port = evtchn;
880 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
881 BUG();
883 bind_evtchn_to_cpu(evtchn, 0);
884 evtchn_to_irq[evtchn] = -1;
885 irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0);
886 }
888 static void enable_pirq(unsigned int irq)
889 {
890 startup_pirq(irq);
891 }
893 static void disable_pirq(unsigned int irq)
894 {
895 }
897 static void ack_pirq(unsigned int irq)
898 {
899 int evtchn = evtchn_from_irq(irq);
901 move_native_irq(irq);
903 if (VALID_EVTCHN(evtchn)) {
904 mask_evtchn(evtchn);
905 clear_evtchn(evtchn);
906 }
907 }
909 static void end_pirq(unsigned int irq)
910 {
911 int evtchn = evtchn_from_irq(irq);
913 if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
914 (IRQ_DISABLED|IRQ_PENDING)) {
915 shutdown_pirq(irq);
916 } else if (VALID_EVTCHN(evtchn)) {
917 unmask_evtchn(evtchn);
918 pirq_unmask_notify(irq);
919 }
920 }
922 static struct hw_interrupt_type pirq_type = {
923 .typename = "Phys-irq",
924 .startup = startup_pirq,
925 .shutdown = shutdown_pirq,
926 .enable = enable_pirq,
927 .disable = disable_pirq,
928 .ack = ack_pirq,
929 .end = end_pirq,
930 #ifdef CONFIG_SMP
931 .set_affinity = set_affinity_irq,
932 #endif
933 .retrigger = resend_irq_on_evtchn,
934 };
936 int irq_ignore_unhandled(unsigned int irq)
937 {
938 struct physdev_irq_status_query irq_status = { .irq = irq };
940 if (!is_running_on_xen())
941 return 0;
943 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
944 return 0;
945 return !!(irq_status.flags & XENIRQSTAT_shared);
946 }
948 void notify_remote_via_irq(int irq)
949 {
950 int evtchn = evtchn_from_irq(irq);
952 if (VALID_EVTCHN(evtchn))
953 notify_remote_via_evtchn(evtchn);
954 }
955 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
957 int irq_to_evtchn_port(int irq)
958 {
959 return evtchn_from_irq(irq);
960 }
961 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
963 void mask_evtchn(int port)
964 {
965 shared_info_t *s = HYPERVISOR_shared_info;
966 synch_set_bit(port, s->evtchn_mask);
967 }
968 EXPORT_SYMBOL_GPL(mask_evtchn);
970 void unmask_evtchn(int port)
971 {
972 shared_info_t *s = HYPERVISOR_shared_info;
973 unsigned int cpu = smp_processor_id();
974 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
976 BUG_ON(!irqs_disabled());
978 /* Slow path (hypercall) if this is a non-local port. */
979 if (unlikely(cpu != cpu_from_evtchn(port))) {
980 struct evtchn_unmask unmask = { .port = port };
981 VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
982 return;
983 }
985 synch_clear_bit(port, s->evtchn_mask);
987 /* Did we miss an interrupt 'edge'? Re-fire if so. */
988 if (synch_test_bit(port, s->evtchn_pending) &&
989 !synch_test_and_set_bit(port / BITS_PER_LONG,
990 &vcpu_info->evtchn_pending_sel))
991 vcpu_info->evtchn_upcall_pending = 1;
992 }
993 EXPORT_SYMBOL_GPL(unmask_evtchn);
995 void disable_all_local_evtchn(void)
996 {
997 unsigned i, cpu = smp_processor_id();
998 shared_info_t *s = HYPERVISOR_shared_info;
1000 for (i = 0; i < NR_EVENT_CHANNELS; ++i)
1001 if (cpu_from_evtchn(i) == cpu)
1002 synch_set_bit(i, &s->evtchn_mask[0]);
1005 static void restore_cpu_virqs(unsigned int cpu)
1007 struct evtchn_bind_virq bind_virq;
1008 int virq, irq, evtchn;
1010 for (virq = 0; virq < NR_VIRQS; virq++) {
1011 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1012 continue;
1014 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
1016 /* Get a new binding from Xen. */
1017 bind_virq.virq = virq;
1018 bind_virq.vcpu = cpu;
1019 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1020 &bind_virq) != 0)
1021 BUG();
1022 evtchn = bind_virq.port;
1024 /* Record the new mapping. */
1025 evtchn_to_irq[evtchn] = irq;
1026 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1027 bind_evtchn_to_cpu(evtchn, cpu);
1029 /* Ready for use. */
1030 unmask_evtchn(evtchn);
1034 static void restore_cpu_ipis(unsigned int cpu)
1036 struct evtchn_bind_ipi bind_ipi;
1037 int ipi, irq, evtchn;
1039 for (ipi = 0; ipi < NR_IPIS; ipi++) {
1040 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1041 continue;
1043 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
1045 /* Get a new binding from Xen. */
1046 bind_ipi.vcpu = cpu;
1047 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1048 &bind_ipi) != 0)
1049 BUG();
1050 evtchn = bind_ipi.port;
1052 /* Record the new mapping. */
1053 evtchn_to_irq[evtchn] = irq;
1054 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
1055 bind_evtchn_to_cpu(evtchn, cpu);
1057 /* Ready for use. */
1058 unmask_evtchn(evtchn);
1063 void irq_resume(void)
1065 unsigned int cpu, irq, evtchn;
1067 init_evtchn_cpu_bindings();
1069 /* New event-channel space is not 'live' yet. */
1070 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1071 mask_evtchn(evtchn);
1073 /* Check that no PIRQs are still bound. */
1074 for (irq = PIRQ_BASE; irq < (PIRQ_BASE + NR_PIRQS); irq++)
1075 BUG_ON(irq_info[irq] != IRQ_UNBOUND);
1077 /* No IRQ <-> event-channel mappings. */
1078 for (irq = 0; irq < NR_IRQS; irq++)
1079 irq_info[irq] &= ~((1U << _EVTCHN_BITS) - 1);
1080 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1081 evtchn_to_irq[evtchn] = -1;
1083 for_each_possible_cpu(cpu) {
1084 restore_cpu_virqs(cpu);
1085 restore_cpu_ipis(cpu);
1090 void __init xen_init_IRQ(void)
1092 unsigned int i;
1094 init_evtchn_cpu_bindings();
1096 /* No event channels are 'live' right now. */
1097 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1098 mask_evtchn(i);
1100 /* No IRQ -> event-channel mappings. */
1101 for (i = 0; i < NR_IRQS; i++)
1102 irq_info[i] = IRQ_UNBOUND;
1104 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1105 for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) {
1106 irq_bindcount[i] = 0;
1108 irq_desc[i].status = IRQ_DISABLED;
1109 irq_desc[i].action = NULL;
1110 irq_desc[i].depth = 1;
1111 irq_desc[i].chip = &dynirq_type;
1114 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
1115 for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_PIRQS); i++) {
1116 irq_bindcount[i] = 1;
1118 #ifdef RTC_IRQ
1119 /* If not domain 0, force our RTC driver to fail its probe. */
1120 if (identity_mapped_irq(i) && ((i - PIRQ_BASE) == RTC_IRQ)
1121 && !is_initial_xendomain())
1122 continue;
1123 #endif
1125 irq_desc[i].status = IRQ_DISABLED;
1126 irq_desc[i].action = NULL;
1127 irq_desc[i].depth = 1;
1128 irq_desc[i].chip = &pirq_type;