ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/core/evtchn.c @ 13860:ac465fc7c78f

linux: Zap /proc/interrupts count when a dynamic IRQ is unbound.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Wed Feb 07 00:39:48 2007 +0000 (2007-02-07)
parents 373b09ddc905
children e47738923a05
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/irq.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/version.h>
40 #include <asm/atomic.h>
41 #include <asm/system.h>
42 #include <asm/ptrace.h>
43 #include <asm/synch_bitops.h>
44 #include <xen/evtchn.h>
45 #include <xen/interface/event_channel.h>
46 #include <xen/interface/physdev.h>
47 #include <asm/hypervisor.h>
48 #include <linux/mc146818rtc.h> /* RTC_IRQ */
50 /*
51 * This lock protects updates to the following mapping and reference-count
52 * arrays. The lock does not need to be acquired to read the mapping tables.
53 */
54 static DEFINE_SPINLOCK(irq_mapping_update_lock);
56 /* IRQ <-> event-channel mappings. */
57 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
58 [0 ... NR_EVENT_CHANNELS-1] = -1 };
60 /* Packed IRQ information: binding type, sub-type index, and event channel. */
61 static u32 irq_info[NR_IRQS];
63 /* Binding types. */
64 enum {
65 IRQT_UNBOUND,
66 IRQT_PIRQ,
67 IRQT_VIRQ,
68 IRQT_IPI,
69 IRQT_LOCAL_PORT,
70 IRQT_CALLER_PORT
71 };
73 /* Constructor for packed IRQ information. */
74 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
75 {
76 return ((type << 24) | (index << 16) | evtchn);
77 }
79 /* Convenient shorthand for packed representation of an unbound IRQ. */
80 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
82 /*
83 * Accessors for packed IRQ information.
84 */
86 static inline unsigned int evtchn_from_irq(int irq)
87 {
88 return (u16)(irq_info[irq]);
89 }
91 static inline unsigned int index_from_irq(int irq)
92 {
93 return (u8)(irq_info[irq] >> 16);
94 }
96 static inline unsigned int type_from_irq(int irq)
97 {
98 return (u8)(irq_info[irq] >> 24);
99 }
101 /* IRQ <-> VIRQ mapping. */
102 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
104 /* IRQ <-> IPI mapping. */
105 #ifndef NR_IPIS
106 #define NR_IPIS 1
107 #endif
108 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
110 /* Reference counts for bindings to IRQs. */
111 static int irq_bindcount[NR_IRQS];
113 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
114 static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
116 #ifdef CONFIG_SMP
118 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
119 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
121 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
122 unsigned int idx)
123 {
124 return (sh->evtchn_pending[idx] &
125 cpu_evtchn_mask[cpu][idx] &
126 ~sh->evtchn_mask[idx]);
127 }
129 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
130 {
131 int irq = evtchn_to_irq[chn];
133 BUG_ON(irq == -1);
134 set_native_irq_info(irq, cpumask_of_cpu(cpu));
136 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
137 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
138 cpu_evtchn[chn] = cpu;
139 }
141 static void init_evtchn_cpu_bindings(void)
142 {
143 int i;
145 /* By default all event channels notify CPU#0. */
146 for (i = 0; i < NR_IRQS; i++)
147 set_native_irq_info(i, cpumask_of_cpu(0));
149 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
150 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
151 }
153 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
154 {
155 return cpu_evtchn[evtchn];
156 }
158 #else
160 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
161 unsigned int idx)
162 {
163 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
164 }
166 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
167 {
168 }
170 static void init_evtchn_cpu_bindings(void)
171 {
172 }
174 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
175 {
176 return 0;
177 }
179 #endif
181 /* Upcall to generic IRQ layer. */
182 #ifdef CONFIG_X86
183 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
184 void __init xen_init_IRQ(void);
185 void __init init_IRQ(void)
186 {
187 irq_ctx_init(0);
188 xen_init_IRQ();
189 }
190 #if defined (__i386__)
191 static inline void exit_idle(void) {}
192 #define IRQ_REG orig_eax
193 #elif defined (__x86_64__)
194 #include <asm/idle.h>
195 #define IRQ_REG orig_rax
196 #endif
197 #define do_IRQ(irq, regs) do { \
198 (regs)->IRQ_REG = ~(irq); \
199 do_IRQ((regs)); \
200 } while (0)
201 #endif
203 /* Xen will never allocate port zero for any purpose. */
204 #define VALID_EVTCHN(chn) ((chn) != 0)
206 /*
207 * Force a proper event-channel callback from Xen after clearing the
208 * callback mask. We do this in a very simple manner, by making a call
209 * down into Xen. The pending flag will be checked by Xen on return.
210 */
211 void force_evtchn_callback(void)
212 {
213 (void)HYPERVISOR_xen_version(0, NULL);
214 }
215 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
216 EXPORT_SYMBOL(force_evtchn_callback);
218 static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
220 /* NB. Interrupts are disabled on entry. */
221 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
222 {
223 unsigned long l1, l2;
224 unsigned int l1i, l2i, port, count;
225 int irq, cpu = smp_processor_id();
226 shared_info_t *s = HYPERVISOR_shared_info;
227 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
229 do {
230 /* Avoid a callback storm when we reenable delivery. */
231 vcpu_info->evtchn_upcall_pending = 0;
233 /* Nested invocations bail immediately. */
234 if (unlikely(per_cpu(upcall_count, cpu)++))
235 return;
237 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
238 /* Clear master flag /before/ clearing selector flag. */
239 rmb();
240 #endif
241 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
242 while (l1 != 0) {
243 l1i = __ffs(l1);
244 l1 &= ~(1UL << l1i);
246 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
247 l2i = __ffs(l2);
249 port = (l1i * BITS_PER_LONG) + l2i;
250 if ((irq = evtchn_to_irq[port]) != -1)
251 do_IRQ(irq, regs);
252 else {
253 exit_idle();
254 evtchn_device_upcall(port);
255 }
256 }
257 }
259 /* If there were nested callbacks then we have more to do. */
260 count = per_cpu(upcall_count, cpu);
261 per_cpu(upcall_count, cpu) = 0;
262 } while (unlikely(count != 1));
263 }
265 static int find_unbound_irq(void)
266 {
267 static int warned;
268 int dynirq, irq;
270 for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
271 irq = dynirq_to_irq(dynirq);
272 if (irq_bindcount[irq] == 0)
273 return irq;
274 }
276 if (!warned) {
277 warned = 1;
278 printk(KERN_WARNING "No available IRQ to bind to: "
279 "increase NR_DYNIRQS.\n");
280 }
282 return -ENOSPC;
283 }
285 static int bind_caller_port_to_irq(unsigned int caller_port)
286 {
287 int irq;
289 spin_lock(&irq_mapping_update_lock);
291 if ((irq = evtchn_to_irq[caller_port]) == -1) {
292 if ((irq = find_unbound_irq()) < 0)
293 goto out;
295 evtchn_to_irq[caller_port] = irq;
296 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
297 }
299 irq_bindcount[irq]++;
301 out:
302 spin_unlock(&irq_mapping_update_lock);
303 return irq;
304 }
306 static int bind_local_port_to_irq(unsigned int local_port)
307 {
308 int irq;
310 spin_lock(&irq_mapping_update_lock);
312 BUG_ON(evtchn_to_irq[local_port] != -1);
314 if ((irq = find_unbound_irq()) < 0) {
315 struct evtchn_close close = { .port = local_port };
316 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
317 BUG();
318 goto out;
319 }
321 evtchn_to_irq[local_port] = irq;
322 irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
323 irq_bindcount[irq]++;
325 out:
326 spin_unlock(&irq_mapping_update_lock);
327 return irq;
328 }
330 static int bind_listening_port_to_irq(unsigned int remote_domain)
331 {
332 struct evtchn_alloc_unbound alloc_unbound;
333 int err;
335 alloc_unbound.dom = DOMID_SELF;
336 alloc_unbound.remote_dom = remote_domain;
338 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
339 &alloc_unbound);
341 return err ? : bind_local_port_to_irq(alloc_unbound.port);
342 }
344 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
345 unsigned int remote_port)
346 {
347 struct evtchn_bind_interdomain bind_interdomain;
348 int err;
350 bind_interdomain.remote_dom = remote_domain;
351 bind_interdomain.remote_port = remote_port;
353 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
354 &bind_interdomain);
356 return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
357 }
359 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
360 {
361 struct evtchn_bind_virq bind_virq;
362 int evtchn, irq;
364 spin_lock(&irq_mapping_update_lock);
366 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
367 if ((irq = find_unbound_irq()) < 0)
368 goto out;
370 bind_virq.virq = virq;
371 bind_virq.vcpu = cpu;
372 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
373 &bind_virq) != 0)
374 BUG();
375 evtchn = bind_virq.port;
377 evtchn_to_irq[evtchn] = irq;
378 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
380 per_cpu(virq_to_irq, cpu)[virq] = irq;
382 bind_evtchn_to_cpu(evtchn, cpu);
383 }
385 irq_bindcount[irq]++;
387 out:
388 spin_unlock(&irq_mapping_update_lock);
389 return irq;
390 }
392 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
393 {
394 struct evtchn_bind_ipi bind_ipi;
395 int evtchn, irq;
397 spin_lock(&irq_mapping_update_lock);
399 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
400 if ((irq = find_unbound_irq()) < 0)
401 goto out;
403 bind_ipi.vcpu = cpu;
404 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
405 &bind_ipi) != 0)
406 BUG();
407 evtchn = bind_ipi.port;
409 evtchn_to_irq[evtchn] = irq;
410 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
412 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
414 bind_evtchn_to_cpu(evtchn, cpu);
415 }
417 irq_bindcount[irq]++;
419 out:
420 spin_unlock(&irq_mapping_update_lock);
421 return irq;
422 }
424 static void unbind_from_irq(unsigned int irq)
425 {
426 struct evtchn_close close;
427 int cpu, evtchn = evtchn_from_irq(irq);
429 spin_lock(&irq_mapping_update_lock);
431 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
432 close.port = evtchn;
433 if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
434 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
435 BUG();
437 switch (type_from_irq(irq)) {
438 case IRQT_VIRQ:
439 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
440 [index_from_irq(irq)] = -1;
441 break;
442 case IRQT_IPI:
443 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
444 [index_from_irq(irq)] = -1;
445 break;
446 default:
447 break;
448 }
450 /* Closed ports are implicitly re-bound to VCPU0. */
451 bind_evtchn_to_cpu(evtchn, 0);
453 evtchn_to_irq[evtchn] = -1;
454 irq_info[irq] = IRQ_UNBOUND;
456 /* Zap stats across IRQ changes of use. */
457 for_each_possible_cpu(cpu)
458 kstat_cpu(cpu).irqs[irq] = 0;
459 }
461 spin_unlock(&irq_mapping_update_lock);
462 }
464 int bind_caller_port_to_irqhandler(
465 unsigned int caller_port,
466 irqreturn_t (*handler)(int, void *, struct pt_regs *),
467 unsigned long irqflags,
468 const char *devname,
469 void *dev_id)
470 {
471 int irq, retval;
473 irq = bind_caller_port_to_irq(caller_port);
474 if (irq < 0)
475 return irq;
477 retval = request_irq(irq, handler, irqflags, devname, dev_id);
478 if (retval != 0) {
479 unbind_from_irq(irq);
480 return retval;
481 }
483 return irq;
484 }
485 EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
487 int bind_listening_port_to_irqhandler(
488 unsigned int remote_domain,
489 irqreturn_t (*handler)(int, void *, struct pt_regs *),
490 unsigned long irqflags,
491 const char *devname,
492 void *dev_id)
493 {
494 int irq, retval;
496 irq = bind_listening_port_to_irq(remote_domain);
497 if (irq < 0)
498 return irq;
500 retval = request_irq(irq, handler, irqflags, devname, dev_id);
501 if (retval != 0) {
502 unbind_from_irq(irq);
503 return retval;
504 }
506 return irq;
507 }
508 EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
510 int bind_interdomain_evtchn_to_irqhandler(
511 unsigned int remote_domain,
512 unsigned int remote_port,
513 irqreturn_t (*handler)(int, void *, struct pt_regs *),
514 unsigned long irqflags,
515 const char *devname,
516 void *dev_id)
517 {
518 int irq, retval;
520 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
521 if (irq < 0)
522 return irq;
524 retval = request_irq(irq, handler, irqflags, devname, dev_id);
525 if (retval != 0) {
526 unbind_from_irq(irq);
527 return retval;
528 }
530 return irq;
531 }
532 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
534 int bind_virq_to_irqhandler(
535 unsigned int virq,
536 unsigned int cpu,
537 irqreturn_t (*handler)(int, void *, struct pt_regs *),
538 unsigned long irqflags,
539 const char *devname,
540 void *dev_id)
541 {
542 int irq, retval;
544 irq = bind_virq_to_irq(virq, cpu);
545 if (irq < 0)
546 return irq;
548 retval = request_irq(irq, handler, irqflags, devname, dev_id);
549 if (retval != 0) {
550 unbind_from_irq(irq);
551 return retval;
552 }
554 return irq;
555 }
556 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
558 int bind_ipi_to_irqhandler(
559 unsigned int ipi,
560 unsigned int cpu,
561 irqreturn_t (*handler)(int, void *, struct pt_regs *),
562 unsigned long irqflags,
563 const char *devname,
564 void *dev_id)
565 {
566 int irq, retval;
568 irq = bind_ipi_to_irq(ipi, cpu);
569 if (irq < 0)
570 return irq;
572 retval = request_irq(irq, handler, irqflags, devname, dev_id);
573 if (retval != 0) {
574 unbind_from_irq(irq);
575 return retval;
576 }
578 return irq;
579 }
580 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
582 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
583 {
584 free_irq(irq, dev_id);
585 unbind_from_irq(irq);
586 }
587 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
589 #ifdef CONFIG_SMP
590 /* Rebind an evtchn so that it gets delivered to a specific cpu */
591 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
592 {
593 struct evtchn_bind_vcpu bind_vcpu;
594 int evtchn = evtchn_from_irq(irq);
596 if (!VALID_EVTCHN(evtchn))
597 return;
599 /* Send future instances of this interrupt to other vcpu. */
600 bind_vcpu.port = evtchn;
601 bind_vcpu.vcpu = tcpu;
603 /*
604 * If this fails, it usually just indicates that we're dealing with a
605 * virq or IPI channel, which don't actually need to be rebound. Ignore
606 * it, but don't do the xenlinux-level rebind in that case.
607 */
608 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
609 bind_evtchn_to_cpu(evtchn, tcpu);
610 }
612 static void set_affinity_irq(unsigned irq, cpumask_t dest)
613 {
614 unsigned tcpu = first_cpu(dest);
615 rebind_irq_to_cpu(irq, tcpu);
616 }
617 #endif
619 int resend_irq_on_evtchn(unsigned int irq)
620 {
621 int masked, evtchn = evtchn_from_irq(irq);
622 shared_info_t *s = HYPERVISOR_shared_info;
624 if (!VALID_EVTCHN(evtchn))
625 return 1;
627 masked = synch_test_and_set_bit(evtchn, s->evtchn_mask);
628 synch_set_bit(evtchn, s->evtchn_pending);
629 if (!masked)
630 unmask_evtchn(evtchn);
632 return 1;
633 }
635 /*
636 * Interface to generic handling in irq.c
637 */
639 static unsigned int startup_dynirq(unsigned int irq)
640 {
641 int evtchn = evtchn_from_irq(irq);
643 if (VALID_EVTCHN(evtchn))
644 unmask_evtchn(evtchn);
645 return 0;
646 }
648 static void shutdown_dynirq(unsigned int irq)
649 {
650 int evtchn = evtchn_from_irq(irq);
652 if (VALID_EVTCHN(evtchn))
653 mask_evtchn(evtchn);
654 }
656 static void enable_dynirq(unsigned int irq)
657 {
658 int evtchn = evtchn_from_irq(irq);
660 if (VALID_EVTCHN(evtchn))
661 unmask_evtchn(evtchn);
662 }
664 static void disable_dynirq(unsigned int irq)
665 {
666 int evtchn = evtchn_from_irq(irq);
668 if (VALID_EVTCHN(evtchn))
669 mask_evtchn(evtchn);
670 }
672 static void ack_dynirq(unsigned int irq)
673 {
674 int evtchn = evtchn_from_irq(irq);
676 move_native_irq(irq);
678 if (VALID_EVTCHN(evtchn)) {
679 mask_evtchn(evtchn);
680 clear_evtchn(evtchn);
681 }
682 }
684 static void end_dynirq(unsigned int irq)
685 {
686 int evtchn = evtchn_from_irq(irq);
688 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
689 unmask_evtchn(evtchn);
690 }
692 static struct hw_interrupt_type dynirq_type = {
693 .typename = "Dynamic-irq",
694 .startup = startup_dynirq,
695 .shutdown = shutdown_dynirq,
696 .enable = enable_dynirq,
697 .disable = disable_dynirq,
698 .ack = ack_dynirq,
699 .end = end_dynirq,
700 #ifdef CONFIG_SMP
701 .set_affinity = set_affinity_irq,
702 #endif
703 .retrigger = resend_irq_on_evtchn,
704 };
706 static inline void pirq_unmask_notify(int pirq)
707 {
708 struct physdev_eoi eoi = { .irq = pirq };
709 if (unlikely(test_bit(pirq, pirq_needs_eoi)))
710 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
711 }
713 static inline void pirq_query_unmask(int pirq)
714 {
715 struct physdev_irq_status_query irq_status;
716 irq_status.irq = pirq;
717 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
718 clear_bit(pirq, pirq_needs_eoi);
719 if (irq_status.flags & XENIRQSTAT_needs_eoi)
720 set_bit(pirq, pirq_needs_eoi);
721 }
723 /*
724 * On startup, if there is no action associated with the IRQ then we are
725 * probing. In this case we should not share with others as it will confuse us.
726 */
727 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
729 static unsigned int startup_pirq(unsigned int irq)
730 {
731 struct evtchn_bind_pirq bind_pirq;
732 int evtchn = evtchn_from_irq(irq);
734 if (VALID_EVTCHN(evtchn))
735 goto out;
737 bind_pirq.pirq = irq;
738 /* NB. We are happy to share unless we are probing. */
739 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
740 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
741 if (!probing_irq(irq))
742 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
743 irq);
744 return 0;
745 }
746 evtchn = bind_pirq.port;
748 pirq_query_unmask(irq_to_pirq(irq));
750 evtchn_to_irq[evtchn] = irq;
751 bind_evtchn_to_cpu(evtchn, 0);
752 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
754 out:
755 unmask_evtchn(evtchn);
756 pirq_unmask_notify(irq_to_pirq(irq));
758 return 0;
759 }
761 static void shutdown_pirq(unsigned int irq)
762 {
763 struct evtchn_close close;
764 int evtchn = evtchn_from_irq(irq);
766 if (!VALID_EVTCHN(evtchn))
767 return;
769 mask_evtchn(evtchn);
771 close.port = evtchn;
772 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
773 BUG();
775 bind_evtchn_to_cpu(evtchn, 0);
776 evtchn_to_irq[evtchn] = -1;
777 irq_info[irq] = IRQ_UNBOUND;
778 }
780 static void enable_pirq(unsigned int irq)
781 {
782 int evtchn = evtchn_from_irq(irq);
784 if (VALID_EVTCHN(evtchn)) {
785 unmask_evtchn(evtchn);
786 pirq_unmask_notify(irq_to_pirq(irq));
787 }
788 }
790 static void disable_pirq(unsigned int irq)
791 {
792 int evtchn = evtchn_from_irq(irq);
794 if (VALID_EVTCHN(evtchn))
795 mask_evtchn(evtchn);
796 }
798 static void ack_pirq(unsigned int irq)
799 {
800 int evtchn = evtchn_from_irq(irq);
802 move_native_irq(irq);
804 if (VALID_EVTCHN(evtchn)) {
805 mask_evtchn(evtchn);
806 clear_evtchn(evtchn);
807 }
808 }
810 static void end_pirq(unsigned int irq)
811 {
812 int evtchn = evtchn_from_irq(irq);
814 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
815 unmask_evtchn(evtchn);
816 pirq_unmask_notify(irq_to_pirq(irq));
817 }
818 }
820 static struct hw_interrupt_type pirq_type = {
821 .typename = "Phys-irq",
822 .startup = startup_pirq,
823 .shutdown = shutdown_pirq,
824 .enable = enable_pirq,
825 .disable = disable_pirq,
826 .ack = ack_pirq,
827 .end = end_pirq,
828 #ifdef CONFIG_SMP
829 .set_affinity = set_affinity_irq,
830 #endif
831 .retrigger = resend_irq_on_evtchn,
832 };
834 int irq_ignore_unhandled(unsigned int irq)
835 {
836 struct physdev_irq_status_query irq_status = { .irq = irq };
838 if (!is_running_on_xen())
839 return 0;
841 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
842 return !!(irq_status.flags & XENIRQSTAT_shared);
843 }
845 void notify_remote_via_irq(int irq)
846 {
847 int evtchn = evtchn_from_irq(irq);
849 if (VALID_EVTCHN(evtchn))
850 notify_remote_via_evtchn(evtchn);
851 }
852 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
854 int irq_to_evtchn_port(int irq)
855 {
856 return evtchn_from_irq(irq);
857 }
858 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
860 void mask_evtchn(int port)
861 {
862 shared_info_t *s = HYPERVISOR_shared_info;
863 synch_set_bit(port, s->evtchn_mask);
864 }
865 EXPORT_SYMBOL_GPL(mask_evtchn);
867 void unmask_evtchn(int port)
868 {
869 shared_info_t *s = HYPERVISOR_shared_info;
870 unsigned int cpu = smp_processor_id();
871 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
873 BUG_ON(!irqs_disabled());
875 /* Slow path (hypercall) if this is a non-local port. */
876 if (unlikely(cpu != cpu_from_evtchn(port))) {
877 struct evtchn_unmask unmask = { .port = port };
878 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
879 return;
880 }
882 synch_clear_bit(port, s->evtchn_mask);
884 /* Did we miss an interrupt 'edge'? Re-fire if so. */
885 if (synch_test_bit(port, s->evtchn_pending) &&
886 !synch_test_and_set_bit(port / BITS_PER_LONG,
887 &vcpu_info->evtchn_pending_sel))
888 vcpu_info->evtchn_upcall_pending = 1;
889 }
890 EXPORT_SYMBOL_GPL(unmask_evtchn);
892 void irq_resume(void)
893 {
894 struct evtchn_bind_virq bind_virq;
895 struct evtchn_bind_ipi bind_ipi;
896 int cpu, pirq, virq, ipi, irq, evtchn;
898 init_evtchn_cpu_bindings();
900 /* New event-channel space is not 'live' yet. */
901 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
902 mask_evtchn(evtchn);
904 /* Check that no PIRQs are still bound. */
905 for (pirq = 0; pirq < NR_PIRQS; pirq++)
906 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
908 /* Secondary CPUs must have no VIRQ or IPI bindings. */
909 for_each_possible_cpu(cpu) {
910 if (cpu == 0)
911 continue;
912 for (virq = 0; virq < NR_VIRQS; virq++)
913 BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
914 for (ipi = 0; ipi < NR_IPIS; ipi++)
915 BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
916 }
918 /* No IRQ <-> event-channel mappings. */
919 for (irq = 0; irq < NR_IRQS; irq++)
920 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
921 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
922 evtchn_to_irq[evtchn] = -1;
924 /* Primary CPU: rebind VIRQs automatically. */
925 for (virq = 0; virq < NR_VIRQS; virq++) {
926 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
927 continue;
929 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
931 /* Get a new binding from Xen. */
932 bind_virq.virq = virq;
933 bind_virq.vcpu = 0;
934 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
935 &bind_virq) != 0)
936 BUG();
937 evtchn = bind_virq.port;
939 /* Record the new mapping. */
940 evtchn_to_irq[evtchn] = irq;
941 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
943 /* Ready for use. */
944 unmask_evtchn(evtchn);
945 }
947 /* Primary CPU: rebind IPIs automatically. */
948 for (ipi = 0; ipi < NR_IPIS; ipi++) {
949 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
950 continue;
952 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
954 /* Get a new binding from Xen. */
955 bind_ipi.vcpu = 0;
956 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
957 &bind_ipi) != 0)
958 BUG();
959 evtchn = bind_ipi.port;
961 /* Record the new mapping. */
962 evtchn_to_irq[evtchn] = irq;
963 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
965 /* Ready for use. */
966 unmask_evtchn(evtchn);
967 }
968 }
970 void __init xen_init_IRQ(void)
971 {
972 int i;
974 init_evtchn_cpu_bindings();
976 /* No event channels are 'live' right now. */
977 for (i = 0; i < NR_EVENT_CHANNELS; i++)
978 mask_evtchn(i);
980 /* No IRQ -> event-channel mappings. */
981 for (i = 0; i < NR_IRQS; i++)
982 irq_info[i] = IRQ_UNBOUND;
984 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
985 for (i = 0; i < NR_DYNIRQS; i++) {
986 irq_bindcount[dynirq_to_irq(i)] = 0;
988 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
989 irq_desc[dynirq_to_irq(i)].action = NULL;
990 irq_desc[dynirq_to_irq(i)].depth = 1;
991 irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
992 }
994 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
995 for (i = 0; i < NR_PIRQS; i++) {
996 irq_bindcount[pirq_to_irq(i)] = 1;
998 #ifdef RTC_IRQ
999 /* If not domain 0, force our RTC driver to fail its probe. */
1000 if ((i == RTC_IRQ) && !is_initial_xendomain())
1001 continue;
1002 #endif
1004 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
1005 irq_desc[pirq_to_irq(i)].action = NULL;
1006 irq_desc[pirq_to_irq(i)].depth = 1;
1007 irq_desc[pirq_to_irq(i)].chip = &pirq_type;