ia64/linux-2.6.18-xen.hg

view drivers/xen/core/evtchn.c @ 659:ad374a7a9f3e

Revert 654:8925ce7552528 (linux/pci-msi: translate Xen-provided PIRQs)

Breaks the -xenU configuration ("MAX_IO_APICS undefined")

Also implicated in kernel crash during save/restore in our automated
tests.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Sep 05 12:39:29 2008 +0100 (2008-09-05)
parents 8925ce755252
children 7886619f623e
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/module.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/sched.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/version.h>
39 #include <asm/atomic.h>
40 #include <asm/system.h>
41 #include <asm/ptrace.h>
42 #include <asm/synch_bitops.h>
43 #include <xen/evtchn.h>
44 #include <xen/interface/event_channel.h>
45 #include <xen/interface/physdev.h>
46 #include <asm/hypervisor.h>
47 #include <linux/mc146818rtc.h> /* RTC_IRQ */
49 /*
50 * This lock protects updates to the following mapping and reference-count
51 * arrays. The lock does not need to be acquired to read the mapping tables.
52 */
53 static DEFINE_SPINLOCK(irq_mapping_update_lock);
55 /* IRQ <-> event-channel mappings. */
56 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
57 [0 ... NR_EVENT_CHANNELS-1] = -1 };
59 /* Packed IRQ information: binding type, sub-type index, and event channel. */
60 static u32 irq_info[NR_IRQS];
62 /* Binding types. */
63 enum {
64 IRQT_UNBOUND,
65 IRQT_PIRQ,
66 IRQT_VIRQ,
67 IRQT_IPI,
68 IRQT_LOCAL_PORT,
69 IRQT_CALLER_PORT
70 };
72 /* Constructor for packed IRQ information. */
73 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
74 {
75 return ((type << 24) | (index << 16) | evtchn);
76 }
78 /* Convenient shorthand for packed representation of an unbound IRQ. */
79 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
81 /*
82 * Accessors for packed IRQ information.
83 */
85 static inline unsigned int evtchn_from_irq(int irq)
86 {
87 return (u16)(irq_info[irq]);
88 }
90 static inline unsigned int index_from_irq(int irq)
91 {
92 return (u8)(irq_info[irq] >> 16);
93 }
95 static inline unsigned int type_from_irq(int irq)
96 {
97 return (u8)(irq_info[irq] >> 24);
98 }
100 /* IRQ <-> VIRQ mapping. */
101 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
103 /* IRQ <-> IPI mapping. */
104 #ifndef NR_IPIS
105 #define NR_IPIS 1
106 #endif
107 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
109 /* Reference counts for bindings to IRQs. */
110 static int irq_bindcount[NR_IRQS];
112 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
113 static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
115 #ifdef CONFIG_SMP
117 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
118 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
120 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
121 unsigned int idx)
122 {
123 return (sh->evtchn_pending[idx] &
124 cpu_evtchn_mask[cpu][idx] &
125 ~sh->evtchn_mask[idx]);
126 }
128 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
129 {
130 shared_info_t *s = HYPERVISOR_shared_info;
131 int irq = evtchn_to_irq[chn];
133 BUG_ON(!test_bit(chn, s->evtchn_mask));
135 if (irq != -1)
136 set_native_irq_info(irq, cpumask_of_cpu(cpu));
138 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
139 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
140 cpu_evtchn[chn] = cpu;
141 }
143 static void init_evtchn_cpu_bindings(void)
144 {
145 int i;
147 /* By default all event channels notify CPU#0. */
148 for (i = 0; i < NR_IRQS; i++)
149 set_native_irq_info(i, cpumask_of_cpu(0));
151 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
152 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
153 }
155 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
156 {
157 return cpu_evtchn[evtchn];
158 }
160 #else
162 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
163 unsigned int idx)
164 {
165 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
166 }
168 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
169 {
170 }
172 static void init_evtchn_cpu_bindings(void)
173 {
174 }
176 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
177 {
178 return 0;
179 }
181 #endif
183 /* Upcall to generic IRQ layer. */
184 #ifdef CONFIG_X86
185 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
186 void __init xen_init_IRQ(void);
187 void __init init_IRQ(void)
188 {
189 irq_ctx_init(0);
190 xen_init_IRQ();
191 }
192 #if defined (__i386__)
193 static inline void exit_idle(void) {}
194 #define IRQ_REG orig_eax
195 #elif defined (__x86_64__)
196 #include <asm/idle.h>
197 #define IRQ_REG orig_rax
198 #endif
199 #define do_IRQ(irq, regs) do { \
200 (regs)->IRQ_REG = ~(irq); \
201 do_IRQ((regs)); \
202 } while (0)
203 #endif
205 /* Xen will never allocate port zero for any purpose. */
206 #define VALID_EVTCHN(chn) ((chn) != 0)
208 /*
209 * Force a proper event-channel callback from Xen after clearing the
210 * callback mask. We do this in a very simple manner, by making a call
211 * down into Xen. The pending flag will be checked by Xen on return.
212 */
213 void force_evtchn_callback(void)
214 {
215 VOID(HYPERVISOR_xen_version(0, NULL));
216 }
217 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
218 EXPORT_SYMBOL(force_evtchn_callback);
220 static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
221 static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 };
222 static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 };
224 /* NB. Interrupts are disabled on entry. */
225 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
226 {
227 unsigned long l1, l2;
228 unsigned long masked_l1, masked_l2;
229 unsigned int l1i, l2i, port, count;
230 int irq;
231 unsigned int cpu = smp_processor_id();
232 shared_info_t *s = HYPERVISOR_shared_info;
233 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
236 do {
237 /* Avoid a callback storm when we reenable delivery. */
238 vcpu_info->evtchn_upcall_pending = 0;
240 /* Nested invocations bail immediately. */
241 if (unlikely(per_cpu(upcall_count, cpu)++))
242 return;
244 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
245 /* Clear master flag /before/ clearing selector flag. */
246 wmb();
247 #endif
248 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
250 l1i = per_cpu(last_processed_l1i, cpu);
251 l2i = per_cpu(last_processed_l2i, cpu);
253 while (l1 != 0) {
255 l1i = (l1i + 1) % BITS_PER_LONG;
256 masked_l1 = l1 & ((~0UL) << l1i);
258 if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
259 l1i = BITS_PER_LONG - 1;
260 l2i = BITS_PER_LONG - 1;
261 continue;
262 }
263 l1i = __ffs(masked_l1);
265 do {
266 l2 = active_evtchns(cpu, s, l1i);
268 l2i = (l2i + 1) % BITS_PER_LONG;
269 masked_l2 = l2 & ((~0UL) << l2i);
271 if (masked_l2 == 0) { /* if we masked out all events, move on */
272 l2i = BITS_PER_LONG - 1;
273 break;
274 }
276 l2i = __ffs(masked_l2);
278 /* process port */
279 port = (l1i * BITS_PER_LONG) + l2i;
280 if ((irq = evtchn_to_irq[port]) != -1)
281 do_IRQ(irq, regs);
282 else {
283 exit_idle();
284 evtchn_device_upcall(port);
285 }
287 /* if this is the final port processed, we'll pick up here+1 next time */
288 per_cpu(last_processed_l1i, cpu) = l1i;
289 per_cpu(last_processed_l2i, cpu) = l2i;
291 } while (l2i != BITS_PER_LONG - 1);
293 l2 = active_evtchns(cpu, s, l1i);
294 if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
295 l1 &= ~(1UL << l1i);
297 }
299 /* If there were nested callbacks then we have more to do. */
300 count = per_cpu(upcall_count, cpu);
301 per_cpu(upcall_count, cpu) = 0;
302 } while (unlikely(count != 1));
303 }
305 static int find_unbound_irq(void)
306 {
307 static int warned;
308 int dynirq, irq;
310 for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
311 irq = dynirq_to_irq(dynirq);
312 if (irq_bindcount[irq] == 0)
313 return irq;
314 }
316 if (!warned) {
317 warned = 1;
318 printk(KERN_WARNING "No available IRQ to bind to: "
319 "increase NR_DYNIRQS.\n");
320 }
322 return -ENOSPC;
323 }
325 static int bind_caller_port_to_irq(unsigned int caller_port)
326 {
327 int irq;
329 spin_lock(&irq_mapping_update_lock);
331 if ((irq = evtchn_to_irq[caller_port]) == -1) {
332 if ((irq = find_unbound_irq()) < 0)
333 goto out;
335 evtchn_to_irq[caller_port] = irq;
336 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
337 }
339 irq_bindcount[irq]++;
341 out:
342 spin_unlock(&irq_mapping_update_lock);
343 return irq;
344 }
346 static int bind_local_port_to_irq(unsigned int local_port)
347 {
348 int irq;
350 spin_lock(&irq_mapping_update_lock);
352 BUG_ON(evtchn_to_irq[local_port] != -1);
354 if ((irq = find_unbound_irq()) < 0) {
355 struct evtchn_close close = { .port = local_port };
356 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
357 BUG();
358 goto out;
359 }
361 evtchn_to_irq[local_port] = irq;
362 irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
363 irq_bindcount[irq]++;
365 out:
366 spin_unlock(&irq_mapping_update_lock);
367 return irq;
368 }
370 static int bind_listening_port_to_irq(unsigned int remote_domain)
371 {
372 struct evtchn_alloc_unbound alloc_unbound;
373 int err;
375 alloc_unbound.dom = DOMID_SELF;
376 alloc_unbound.remote_dom = remote_domain;
378 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
379 &alloc_unbound);
381 return err ? : bind_local_port_to_irq(alloc_unbound.port);
382 }
384 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
385 unsigned int remote_port)
386 {
387 struct evtchn_bind_interdomain bind_interdomain;
388 int err;
390 bind_interdomain.remote_dom = remote_domain;
391 bind_interdomain.remote_port = remote_port;
393 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
394 &bind_interdomain);
396 return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
397 }
399 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
400 {
401 struct evtchn_bind_virq bind_virq;
402 int evtchn, irq;
404 spin_lock(&irq_mapping_update_lock);
406 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
407 if ((irq = find_unbound_irq()) < 0)
408 goto out;
410 bind_virq.virq = virq;
411 bind_virq.vcpu = cpu;
412 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
413 &bind_virq) != 0)
414 BUG();
415 evtchn = bind_virq.port;
417 evtchn_to_irq[evtchn] = irq;
418 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
420 per_cpu(virq_to_irq, cpu)[virq] = irq;
422 bind_evtchn_to_cpu(evtchn, cpu);
423 }
425 irq_bindcount[irq]++;
427 out:
428 spin_unlock(&irq_mapping_update_lock);
429 return irq;
430 }
432 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
433 {
434 struct evtchn_bind_ipi bind_ipi;
435 int evtchn, irq;
437 spin_lock(&irq_mapping_update_lock);
439 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
440 if ((irq = find_unbound_irq()) < 0)
441 goto out;
443 bind_ipi.vcpu = cpu;
444 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
445 &bind_ipi) != 0)
446 BUG();
447 evtchn = bind_ipi.port;
449 evtchn_to_irq[evtchn] = irq;
450 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
452 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
454 bind_evtchn_to_cpu(evtchn, cpu);
455 }
457 irq_bindcount[irq]++;
459 out:
460 spin_unlock(&irq_mapping_update_lock);
461 return irq;
462 }
464 static void unbind_from_irq(unsigned int irq)
465 {
466 struct evtchn_close close;
467 unsigned int cpu;
468 int evtchn = evtchn_from_irq(irq);
470 spin_lock(&irq_mapping_update_lock);
472 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
473 close.port = evtchn;
474 if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
475 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
476 BUG();
478 switch (type_from_irq(irq)) {
479 case IRQT_VIRQ:
480 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
481 [index_from_irq(irq)] = -1;
482 break;
483 case IRQT_IPI:
484 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
485 [index_from_irq(irq)] = -1;
486 break;
487 default:
488 break;
489 }
491 /* Closed ports are implicitly re-bound to VCPU0. */
492 bind_evtchn_to_cpu(evtchn, 0);
494 evtchn_to_irq[evtchn] = -1;
495 irq_info[irq] = IRQ_UNBOUND;
497 /* Zap stats across IRQ changes of use. */
498 for_each_possible_cpu(cpu)
499 kstat_cpu(cpu).irqs[irq] = 0;
500 }
502 spin_unlock(&irq_mapping_update_lock);
503 }
505 int bind_caller_port_to_irqhandler(
506 unsigned int caller_port,
507 irqreturn_t (*handler)(int, void *, struct pt_regs *),
508 unsigned long irqflags,
509 const char *devname,
510 void *dev_id)
511 {
512 int irq, retval;
514 irq = bind_caller_port_to_irq(caller_port);
515 if (irq < 0)
516 return irq;
518 retval = request_irq(irq, handler, irqflags, devname, dev_id);
519 if (retval != 0) {
520 unbind_from_irq(irq);
521 return retval;
522 }
524 return irq;
525 }
526 EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
528 int bind_listening_port_to_irqhandler(
529 unsigned int remote_domain,
530 irqreturn_t (*handler)(int, void *, struct pt_regs *),
531 unsigned long irqflags,
532 const char *devname,
533 void *dev_id)
534 {
535 int irq, retval;
537 irq = bind_listening_port_to_irq(remote_domain);
538 if (irq < 0)
539 return irq;
541 retval = request_irq(irq, handler, irqflags, devname, dev_id);
542 if (retval != 0) {
543 unbind_from_irq(irq);
544 return retval;
545 }
547 return irq;
548 }
549 EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
551 int bind_interdomain_evtchn_to_irqhandler(
552 unsigned int remote_domain,
553 unsigned int remote_port,
554 irqreturn_t (*handler)(int, void *, struct pt_regs *),
555 unsigned long irqflags,
556 const char *devname,
557 void *dev_id)
558 {
559 int irq, retval;
561 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
562 if (irq < 0)
563 return irq;
565 retval = request_irq(irq, handler, irqflags, devname, dev_id);
566 if (retval != 0) {
567 unbind_from_irq(irq);
568 return retval;
569 }
571 return irq;
572 }
573 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
575 int bind_virq_to_irqhandler(
576 unsigned int virq,
577 unsigned int cpu,
578 irqreturn_t (*handler)(int, void *, struct pt_regs *),
579 unsigned long irqflags,
580 const char *devname,
581 void *dev_id)
582 {
583 int irq, retval;
585 irq = bind_virq_to_irq(virq, cpu);
586 if (irq < 0)
587 return irq;
589 retval = request_irq(irq, handler, irqflags, devname, dev_id);
590 if (retval != 0) {
591 unbind_from_irq(irq);
592 return retval;
593 }
595 return irq;
596 }
597 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
599 int bind_ipi_to_irqhandler(
600 unsigned int ipi,
601 unsigned int cpu,
602 irqreturn_t (*handler)(int, void *, struct pt_regs *),
603 unsigned long irqflags,
604 const char *devname,
605 void *dev_id)
606 {
607 int irq, retval;
609 irq = bind_ipi_to_irq(ipi, cpu);
610 if (irq < 0)
611 return irq;
613 retval = request_irq(irq, handler, irqflags, devname, dev_id);
614 if (retval != 0) {
615 unbind_from_irq(irq);
616 return retval;
617 }
619 return irq;
620 }
621 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
623 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
624 {
625 free_irq(irq, dev_id);
626 unbind_from_irq(irq);
627 }
628 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
630 #ifdef CONFIG_SMP
631 void rebind_evtchn_to_cpu(int port, unsigned int cpu)
632 {
633 struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
634 int masked;
636 masked = test_and_set_evtchn_mask(port);
637 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv) == 0)
638 bind_evtchn_to_cpu(port, cpu);
639 if (!masked)
640 unmask_evtchn(port);
641 }
643 static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
644 {
645 int evtchn = evtchn_from_irq(irq);
647 if (VALID_EVTCHN(evtchn))
648 rebind_evtchn_to_cpu(evtchn, tcpu);
649 }
651 static void set_affinity_irq(unsigned int irq, cpumask_t dest)
652 {
653 unsigned tcpu = first_cpu(dest);
654 rebind_irq_to_cpu(irq, tcpu);
655 }
656 #endif
658 int resend_irq_on_evtchn(unsigned int irq)
659 {
660 int masked, evtchn = evtchn_from_irq(irq);
661 shared_info_t *s = HYPERVISOR_shared_info;
663 if (!VALID_EVTCHN(evtchn))
664 return 1;
666 masked = test_and_set_evtchn_mask(evtchn);
667 synch_set_bit(evtchn, s->evtchn_pending);
668 if (!masked)
669 unmask_evtchn(evtchn);
671 return 1;
672 }
674 /*
675 * Interface to generic handling in irq.c
676 */
678 static unsigned int startup_dynirq(unsigned int irq)
679 {
680 int evtchn = evtchn_from_irq(irq);
682 if (VALID_EVTCHN(evtchn))
683 unmask_evtchn(evtchn);
684 return 0;
685 }
687 static void shutdown_dynirq(unsigned int irq)
688 {
689 int evtchn = evtchn_from_irq(irq);
691 if (VALID_EVTCHN(evtchn))
692 mask_evtchn(evtchn);
693 }
695 static void enable_dynirq(unsigned int irq)
696 {
697 int evtchn = evtchn_from_irq(irq);
699 if (VALID_EVTCHN(evtchn))
700 unmask_evtchn(evtchn);
701 }
703 static void disable_dynirq(unsigned int irq)
704 {
705 int evtchn = evtchn_from_irq(irq);
707 if (VALID_EVTCHN(evtchn))
708 mask_evtchn(evtchn);
709 }
711 static void ack_dynirq(unsigned int irq)
712 {
713 int evtchn = evtchn_from_irq(irq);
715 move_native_irq(irq);
717 if (VALID_EVTCHN(evtchn)) {
718 mask_evtchn(evtchn);
719 clear_evtchn(evtchn);
720 }
721 }
723 static void end_dynirq(unsigned int irq)
724 {
725 int evtchn = evtchn_from_irq(irq);
727 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
728 unmask_evtchn(evtchn);
729 }
731 static struct hw_interrupt_type dynirq_type = {
732 .typename = "Dynamic-irq",
733 .startup = startup_dynirq,
734 .shutdown = shutdown_dynirq,
735 .enable = enable_dynirq,
736 .disable = disable_dynirq,
737 .ack = ack_dynirq,
738 .end = end_dynirq,
739 #ifdef CONFIG_SMP
740 .set_affinity = set_affinity_irq,
741 #endif
742 .retrigger = resend_irq_on_evtchn,
743 };
745 static inline void pirq_unmask_notify(int pirq)
746 {
747 struct physdev_eoi eoi = { .irq = pirq };
748 if (unlikely(test_bit(pirq, pirq_needs_eoi)))
749 VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
750 }
752 static inline void pirq_query_unmask(int pirq)
753 {
754 struct physdev_irq_status_query irq_status;
755 irq_status.irq = pirq;
756 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
757 irq_status.flags = 0;
758 clear_bit(pirq, pirq_needs_eoi);
759 if (irq_status.flags & XENIRQSTAT_needs_eoi)
760 set_bit(pirq, pirq_needs_eoi);
761 }
763 /*
764 * On startup, if there is no action associated with the IRQ then we are
765 * probing. In this case we should not share with others as it will confuse us.
766 */
767 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
769 static unsigned int startup_pirq(unsigned int irq)
770 {
771 struct evtchn_bind_pirq bind_pirq;
772 int evtchn = evtchn_from_irq(irq);
774 if (VALID_EVTCHN(evtchn))
775 goto out;
777 bind_pirq.pirq = irq;
778 /* NB. We are happy to share unless we are probing. */
779 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
780 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
781 if (!probing_irq(irq))
782 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
783 irq);
784 return 0;
785 }
786 evtchn = bind_pirq.port;
788 pirq_query_unmask(irq_to_pirq(irq));
790 evtchn_to_irq[evtchn] = irq;
791 bind_evtchn_to_cpu(evtchn, 0);
792 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
794 out:
795 unmask_evtchn(evtchn);
796 pirq_unmask_notify(irq_to_pirq(irq));
798 return 0;
799 }
801 static void shutdown_pirq(unsigned int irq)
802 {
803 struct evtchn_close close;
804 int evtchn = evtchn_from_irq(irq);
806 if (!VALID_EVTCHN(evtchn))
807 return;
809 mask_evtchn(evtchn);
811 close.port = evtchn;
812 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
813 BUG();
815 bind_evtchn_to_cpu(evtchn, 0);
816 evtchn_to_irq[evtchn] = -1;
817 irq_info[irq] = IRQ_UNBOUND;
818 }
820 static void enable_pirq(unsigned int irq)
821 {
822 startup_pirq(irq);
823 }
825 static void disable_pirq(unsigned int irq)
826 {
827 }
829 static void ack_pirq(unsigned int irq)
830 {
831 int evtchn = evtchn_from_irq(irq);
833 move_native_irq(irq);
835 if (VALID_EVTCHN(evtchn)) {
836 mask_evtchn(evtchn);
837 clear_evtchn(evtchn);
838 }
839 }
841 static void end_pirq(unsigned int irq)
842 {
843 int evtchn = evtchn_from_irq(irq);
845 if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
846 (IRQ_DISABLED|IRQ_PENDING)) {
847 shutdown_pirq(irq);
848 } else if (VALID_EVTCHN(evtchn)) {
849 unmask_evtchn(evtchn);
850 pirq_unmask_notify(irq_to_pirq(irq));
851 }
852 }
854 static struct hw_interrupt_type pirq_type = {
855 .typename = "Phys-irq",
856 .startup = startup_pirq,
857 .shutdown = shutdown_pirq,
858 .enable = enable_pirq,
859 .disable = disable_pirq,
860 .ack = ack_pirq,
861 .end = end_pirq,
862 #ifdef CONFIG_SMP
863 .set_affinity = set_affinity_irq,
864 #endif
865 .retrigger = resend_irq_on_evtchn,
866 };
868 int irq_ignore_unhandled(unsigned int irq)
869 {
870 struct physdev_irq_status_query irq_status = { .irq = irq };
872 if (!is_running_on_xen())
873 return 0;
875 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
876 return 0;
877 return !!(irq_status.flags & XENIRQSTAT_shared);
878 }
880 void notify_remote_via_irq(int irq)
881 {
882 int evtchn = evtchn_from_irq(irq);
884 if (VALID_EVTCHN(evtchn))
885 notify_remote_via_evtchn(evtchn);
886 }
887 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
889 int irq_to_evtchn_port(int irq)
890 {
891 return evtchn_from_irq(irq);
892 }
893 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
895 void mask_evtchn(int port)
896 {
897 shared_info_t *s = HYPERVISOR_shared_info;
898 synch_set_bit(port, s->evtchn_mask);
899 }
900 EXPORT_SYMBOL_GPL(mask_evtchn);
902 void unmask_evtchn(int port)
903 {
904 shared_info_t *s = HYPERVISOR_shared_info;
905 unsigned int cpu = smp_processor_id();
906 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
908 BUG_ON(!irqs_disabled());
910 /* Slow path (hypercall) if this is a non-local port. */
911 if (unlikely(cpu != cpu_from_evtchn(port))) {
912 struct evtchn_unmask unmask = { .port = port };
913 VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
914 return;
915 }
917 synch_clear_bit(port, s->evtchn_mask);
919 /* Did we miss an interrupt 'edge'? Re-fire if so. */
920 if (synch_test_bit(port, s->evtchn_pending) &&
921 !synch_test_and_set_bit(port / BITS_PER_LONG,
922 &vcpu_info->evtchn_pending_sel))
923 vcpu_info->evtchn_upcall_pending = 1;
924 }
925 EXPORT_SYMBOL_GPL(unmask_evtchn);
927 void disable_all_local_evtchn(void)
928 {
929 unsigned i, cpu = smp_processor_id();
930 shared_info_t *s = HYPERVISOR_shared_info;
932 for (i = 0; i < NR_EVENT_CHANNELS; ++i)
933 if (cpu_from_evtchn(i) == cpu)
934 synch_set_bit(i, &s->evtchn_mask[0]);
935 }
937 static void restore_cpu_virqs(unsigned int cpu)
938 {
939 struct evtchn_bind_virq bind_virq;
940 int virq, irq, evtchn;
942 for (virq = 0; virq < NR_VIRQS; virq++) {
943 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
944 continue;
946 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
948 /* Get a new binding from Xen. */
949 bind_virq.virq = virq;
950 bind_virq.vcpu = cpu;
951 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
952 &bind_virq) != 0)
953 BUG();
954 evtchn = bind_virq.port;
956 /* Record the new mapping. */
957 evtchn_to_irq[evtchn] = irq;
958 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
959 bind_evtchn_to_cpu(evtchn, cpu);
961 /* Ready for use. */
962 unmask_evtchn(evtchn);
963 }
964 }
966 static void restore_cpu_ipis(unsigned int cpu)
967 {
968 struct evtchn_bind_ipi bind_ipi;
969 int ipi, irq, evtchn;
971 for (ipi = 0; ipi < NR_IPIS; ipi++) {
972 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
973 continue;
975 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
977 /* Get a new binding from Xen. */
978 bind_ipi.vcpu = cpu;
979 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
980 &bind_ipi) != 0)
981 BUG();
982 evtchn = bind_ipi.port;
984 /* Record the new mapping. */
985 evtchn_to_irq[evtchn] = irq;
986 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
987 bind_evtchn_to_cpu(evtchn, cpu);
989 /* Ready for use. */
990 unmask_evtchn(evtchn);
992 }
993 }
995 void irq_resume(void)
996 {
997 unsigned int cpu, pirq, irq, evtchn;
999 init_evtchn_cpu_bindings();
1001 /* New event-channel space is not 'live' yet. */
1002 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1003 mask_evtchn(evtchn);
1005 /* Check that no PIRQs are still bound. */
1006 for (pirq = 0; pirq < NR_PIRQS; pirq++)
1007 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
1009 /* No IRQ <-> event-channel mappings. */
1010 for (irq = 0; irq < NR_IRQS; irq++)
1011 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
1012 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1013 evtchn_to_irq[evtchn] = -1;
1015 for_each_possible_cpu(cpu) {
1016 restore_cpu_virqs(cpu);
1017 restore_cpu_ipis(cpu);
1022 void __init xen_init_IRQ(void)
1024 unsigned int i;
1026 init_evtchn_cpu_bindings();
1028 /* No event channels are 'live' right now. */
1029 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1030 mask_evtchn(i);
1032 /* No IRQ -> event-channel mappings. */
1033 for (i = 0; i < NR_IRQS; i++)
1034 irq_info[i] = IRQ_UNBOUND;
1036 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
1037 for (i = 0; i < NR_DYNIRQS; i++) {
1038 irq_bindcount[dynirq_to_irq(i)] = 0;
1040 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
1041 irq_desc[dynirq_to_irq(i)].action = NULL;
1042 irq_desc[dynirq_to_irq(i)].depth = 1;
1043 irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
1046 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
1047 for (i = 0; i < NR_PIRQS; i++) {
1048 irq_bindcount[pirq_to_irq(i)] = 1;
1050 #ifdef RTC_IRQ
1051 /* If not domain 0, force our RTC driver to fail its probe. */
1052 if ((i == RTC_IRQ) && !is_initial_xendomain())
1053 continue;
1054 #endif
1056 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
1057 irq_desc[pirq_to_irq(i)].action = NULL;
1058 irq_desc[pirq_to_irq(i)].depth = 1;
1059 irq_desc[pirq_to_irq(i)].chip = &pirq_type;