direct-io.hg

view linux-2.6-xen-sparse/drivers/xen/core/evtchn.c @ 10472:fc1c6dfd1807

[LINUX] Transparent virtualization fixes.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author kaf24@firebug.cl.cam.ac.uk
date Wed Jun 21 16:54:09 2006 +0100 (2006-06-21)
parents be05097d5d69
children c9696012fe05
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/irq.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/version.h>
40 #include <asm/atomic.h>
41 #include <asm/system.h>
42 #include <asm/ptrace.h>
43 #include <asm/synch_bitops.h>
44 #include <xen/evtchn.h>
45 #include <xen/interface/event_channel.h>
46 #include <xen/interface/physdev.h>
47 #include <asm/hypervisor.h>
48 #include <linux/mc146818rtc.h> /* RTC_IRQ */
50 /*
51 * This lock protects updates to the following mapping and reference-count
52 * arrays. The lock does not need to be acquired to read the mapping tables.
53 */
54 static DEFINE_SPINLOCK(irq_mapping_update_lock);
56 /* IRQ <-> event-channel mappings. */
57 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
58 [0 ... NR_EVENT_CHANNELS-1] = -1 };
60 /* Packed IRQ information: binding type, sub-type index, and event channel. */
61 static u32 irq_info[NR_IRQS];
63 /* Binding types. */
64 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
66 /* Constructor for packed IRQ information. */
67 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
68 {
69 return ((type << 24) | (index << 16) | evtchn);
70 }
72 /* Convenient shorthand for packed representation of an unbound IRQ. */
73 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
75 /*
76 * Accessors for packed IRQ information.
77 */
79 static inline unsigned int evtchn_from_irq(int irq)
80 {
81 return (u16)(irq_info[irq]);
82 }
84 static inline unsigned int index_from_irq(int irq)
85 {
86 return (u8)(irq_info[irq] >> 16);
87 }
89 static inline unsigned int type_from_irq(int irq)
90 {
91 return (u8)(irq_info[irq] >> 24);
92 }
94 /* IRQ <-> VIRQ mapping. */
95 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
97 /* IRQ <-> IPI mapping. */
98 #ifndef NR_IPIS
99 #define NR_IPIS 1
100 #endif
101 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
103 /* Reference counts for bindings to IRQs. */
104 static int irq_bindcount[NR_IRQS];
106 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
107 static unsigned long pirq_needs_eoi[NR_PIRQS/sizeof(unsigned long)];
109 #ifdef CONFIG_SMP
111 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
112 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
114 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
115 unsigned int idx)
116 {
117 return (sh->evtchn_pending[idx] &
118 cpu_evtchn_mask[cpu][idx] &
119 ~sh->evtchn_mask[idx]);
120 }
122 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
123 {
124 int irq = evtchn_to_irq[chn];
126 BUG_ON(irq == -1);
127 set_native_irq_info(irq, cpumask_of_cpu(cpu));
129 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
130 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
131 cpu_evtchn[chn] = cpu;
132 }
134 static void init_evtchn_cpu_bindings(void)
135 {
136 int i;
138 /* By default all event channels notify CPU#0. */
139 for (i = 0; i < NR_IRQS; i++)
140 set_native_irq_info(i, cpumask_of_cpu(0));
142 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
143 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
144 }
146 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
147 {
148 return cpu_evtchn[evtchn];
149 }
151 #else
153 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
154 unsigned int idx)
155 {
156 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
157 }
159 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
160 {
161 }
163 static void init_evtchn_cpu_bindings(void)
164 {
165 }
167 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
168 {
169 return 0;
170 }
172 #endif
174 /* Upcall to generic IRQ layer. */
175 #ifdef CONFIG_X86
176 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
177 void __init xen_init_IRQ(void);
178 void __init init_IRQ(void)
179 {
180 irq_ctx_init(0);
181 xen_init_IRQ();
182 }
183 #if defined (__i386__)
184 static inline void exit_idle(void) {}
185 #define IRQ_REG orig_eax
186 #elif defined (__x86_64__)
187 #include <asm/idle.h>
188 #define IRQ_REG orig_rax
189 #endif
190 #define do_IRQ(irq, regs) do { \
191 (regs)->IRQ_REG = ~(irq); \
192 do_IRQ((regs)); \
193 } while (0)
194 #endif
196 /* Xen will never allocate port zero for any purpose. */
197 #define VALID_EVTCHN(chn) ((chn) != 0)
199 /*
200 * Force a proper event-channel callback from Xen after clearing the
201 * callback mask. We do this in a very simple manner, by making a call
202 * down into Xen. The pending flag will be checked by Xen on return.
203 */
204 void force_evtchn_callback(void)
205 {
206 (void)HYPERVISOR_xen_version(0, NULL);
207 }
208 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
209 EXPORT_SYMBOL(force_evtchn_callback);
211 /* NB. Interrupts are disabled on entry. */
212 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
213 {
214 unsigned long l1, l2;
215 unsigned int l1i, l2i, port;
216 int irq, cpu = smp_processor_id();
217 shared_info_t *s = HYPERVISOR_shared_info;
218 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
220 vcpu_info->evtchn_upcall_pending = 0;
222 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
223 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
224 while (l1 != 0) {
225 l1i = __ffs(l1);
226 l1 &= ~(1UL << l1i);
228 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
229 l2i = __ffs(l2);
231 port = (l1i * BITS_PER_LONG) + l2i;
232 if ((irq = evtchn_to_irq[port]) != -1)
233 do_IRQ(irq, regs);
234 else {
235 exit_idle();
236 evtchn_device_upcall(port);
237 }
238 }
239 }
240 }
242 static int find_unbound_irq(void)
243 {
244 int irq;
246 /* Only allocate from dynirq range */
247 for (irq = DYNIRQ_BASE; irq < NR_IRQS; irq++)
248 if (irq_bindcount[irq] == 0)
249 break;
251 if (irq == NR_IRQS)
252 panic("No available IRQ to bind to: increase NR_IRQS!\n");
254 return irq;
255 }
257 static int bind_evtchn_to_irq(unsigned int evtchn)
258 {
259 int irq;
261 spin_lock(&irq_mapping_update_lock);
263 if ((irq = evtchn_to_irq[evtchn]) == -1) {
264 irq = find_unbound_irq();
265 evtchn_to_irq[evtchn] = irq;
266 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
267 }
269 irq_bindcount[irq]++;
271 spin_unlock(&irq_mapping_update_lock);
273 return irq;
274 }
276 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
277 {
278 struct evtchn_bind_virq bind_virq;
279 int evtchn, irq;
281 spin_lock(&irq_mapping_update_lock);
283 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
284 bind_virq.virq = virq;
285 bind_virq.vcpu = cpu;
286 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
287 &bind_virq) != 0)
288 BUG();
289 evtchn = bind_virq.port;
291 irq = find_unbound_irq();
292 evtchn_to_irq[evtchn] = irq;
293 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
295 per_cpu(virq_to_irq, cpu)[virq] = irq;
297 bind_evtchn_to_cpu(evtchn, cpu);
298 }
300 irq_bindcount[irq]++;
302 spin_unlock(&irq_mapping_update_lock);
304 return irq;
305 }
307 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
308 {
309 struct evtchn_bind_ipi bind_ipi;
310 int evtchn, irq;
312 spin_lock(&irq_mapping_update_lock);
314 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
315 bind_ipi.vcpu = cpu;
316 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
317 &bind_ipi) != 0)
318 BUG();
319 evtchn = bind_ipi.port;
321 irq = find_unbound_irq();
322 evtchn_to_irq[evtchn] = irq;
323 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
325 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
327 bind_evtchn_to_cpu(evtchn, cpu);
328 }
330 irq_bindcount[irq]++;
332 spin_unlock(&irq_mapping_update_lock);
334 return irq;
335 }
337 static void unbind_from_irq(unsigned int irq)
338 {
339 struct evtchn_close close;
340 int evtchn = evtchn_from_irq(irq);
342 spin_lock(&irq_mapping_update_lock);
344 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
345 close.port = evtchn;
346 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
347 BUG();
349 switch (type_from_irq(irq)) {
350 case IRQT_VIRQ:
351 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
352 [index_from_irq(irq)] = -1;
353 break;
354 case IRQT_IPI:
355 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
356 [index_from_irq(irq)] = -1;
357 break;
358 default:
359 break;
360 }
362 /* Closed ports are implicitly re-bound to VCPU0. */
363 bind_evtchn_to_cpu(evtchn, 0);
365 evtchn_to_irq[evtchn] = -1;
366 irq_info[irq] = IRQ_UNBOUND;
367 }
369 spin_unlock(&irq_mapping_update_lock);
370 }
372 int bind_evtchn_to_irqhandler(
373 unsigned int evtchn,
374 irqreturn_t (*handler)(int, void *, struct pt_regs *),
375 unsigned long irqflags,
376 const char *devname,
377 void *dev_id)
378 {
379 unsigned int irq;
380 int retval;
382 irq = bind_evtchn_to_irq(evtchn);
383 retval = request_irq(irq, handler, irqflags, devname, dev_id);
384 if (retval != 0) {
385 unbind_from_irq(irq);
386 return retval;
387 }
389 return irq;
390 }
391 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
393 int bind_virq_to_irqhandler(
394 unsigned int virq,
395 unsigned int cpu,
396 irqreturn_t (*handler)(int, void *, struct pt_regs *),
397 unsigned long irqflags,
398 const char *devname,
399 void *dev_id)
400 {
401 unsigned int irq;
402 int retval;
404 irq = bind_virq_to_irq(virq, cpu);
405 retval = request_irq(irq, handler, irqflags, devname, dev_id);
406 if (retval != 0) {
407 unbind_from_irq(irq);
408 return retval;
409 }
411 return irq;
412 }
413 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
415 int bind_ipi_to_irqhandler(
416 unsigned int ipi,
417 unsigned int cpu,
418 irqreturn_t (*handler)(int, void *, struct pt_regs *),
419 unsigned long irqflags,
420 const char *devname,
421 void *dev_id)
422 {
423 unsigned int irq;
424 int retval;
426 irq = bind_ipi_to_irq(ipi, cpu);
427 retval = request_irq(irq, handler, irqflags, devname, dev_id);
428 if (retval != 0) {
429 unbind_from_irq(irq);
430 return retval;
431 }
433 return irq;
434 }
435 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
437 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
438 {
439 free_irq(irq, dev_id);
440 unbind_from_irq(irq);
441 }
442 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
444 /* Rebind an evtchn so that it gets delivered to a specific cpu */
445 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
446 {
447 struct evtchn_bind_vcpu bind_vcpu;
448 int evtchn = evtchn_from_irq(irq);
450 if (!VALID_EVTCHN(evtchn))
451 return;
453 /* Send future instances of this interrupt to other vcpu. */
454 bind_vcpu.port = evtchn;
455 bind_vcpu.vcpu = tcpu;
457 /*
458 * If this fails, it usually just indicates that we're dealing with a
459 * virq or IPI channel, which don't actually need to be rebound. Ignore
460 * it, but don't do the xenlinux-level rebind in that case.
461 */
462 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
463 bind_evtchn_to_cpu(evtchn, tcpu);
464 }
467 static void set_affinity_irq(unsigned irq, cpumask_t dest)
468 {
469 unsigned tcpu = first_cpu(dest);
470 rebind_irq_to_cpu(irq, tcpu);
471 }
473 /*
474 * Interface to generic handling in irq.c
475 */
477 static unsigned int startup_dynirq(unsigned int irq)
478 {
479 int evtchn = evtchn_from_irq(irq);
481 if (VALID_EVTCHN(evtchn))
482 unmask_evtchn(evtchn);
483 return 0;
484 }
486 static void shutdown_dynirq(unsigned int irq)
487 {
488 int evtchn = evtchn_from_irq(irq);
490 if (VALID_EVTCHN(evtchn))
491 mask_evtchn(evtchn);
492 }
494 static void enable_dynirq(unsigned int irq)
495 {
496 int evtchn = evtchn_from_irq(irq);
498 if (VALID_EVTCHN(evtchn))
499 unmask_evtchn(evtchn);
500 }
502 static void disable_dynirq(unsigned int irq)
503 {
504 int evtchn = evtchn_from_irq(irq);
506 if (VALID_EVTCHN(evtchn))
507 mask_evtchn(evtchn);
508 }
510 static void ack_dynirq(unsigned int irq)
511 {
512 int evtchn = evtchn_from_irq(irq);
514 move_native_irq(irq);
516 if (VALID_EVTCHN(evtchn)) {
517 mask_evtchn(evtchn);
518 clear_evtchn(evtchn);
519 }
520 }
522 static void end_dynirq(unsigned int irq)
523 {
524 int evtchn = evtchn_from_irq(irq);
526 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
527 unmask_evtchn(evtchn);
528 }
530 static struct hw_interrupt_type dynirq_type = {
531 "Dynamic-irq",
532 startup_dynirq,
533 shutdown_dynirq,
534 enable_dynirq,
535 disable_dynirq,
536 ack_dynirq,
537 end_dynirq,
538 set_affinity_irq
539 };
541 static inline void pirq_unmask_notify(int pirq)
542 {
543 struct physdev_eoi eoi = { .irq = pirq };
544 if (unlikely(test_bit(pirq, &pirq_needs_eoi[0])))
545 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
546 }
548 static inline void pirq_query_unmask(int pirq)
549 {
550 struct physdev_irq_status_query irq_status;
551 irq_status.irq = pirq;
552 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
553 clear_bit(pirq, &pirq_needs_eoi[0]);
554 if (irq_status.flags & XENIRQSTAT_needs_eoi)
555 set_bit(pirq, &pirq_needs_eoi[0]);
556 }
558 /*
559 * On startup, if there is no action associated with the IRQ then we are
560 * probing. In this case we should not share with others as it will confuse us.
561 */
562 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
564 static unsigned int startup_pirq(unsigned int irq)
565 {
566 struct evtchn_bind_pirq bind_pirq;
567 int evtchn = evtchn_from_irq(irq);
569 if (VALID_EVTCHN(evtchn))
570 goto out;
572 bind_pirq.pirq = irq;
573 /* NB. We are happy to share unless we are probing. */
574 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
575 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
576 if (!probing_irq(irq))
577 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
578 irq);
579 return 0;
580 }
581 evtchn = bind_pirq.port;
583 pirq_query_unmask(irq_to_pirq(irq));
585 evtchn_to_irq[evtchn] = irq;
586 bind_evtchn_to_cpu(evtchn, 0);
587 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
589 out:
590 unmask_evtchn(evtchn);
591 pirq_unmask_notify(irq_to_pirq(irq));
593 return 0;
594 }
596 static void shutdown_pirq(unsigned int irq)
597 {
598 struct evtchn_close close;
599 int evtchn = evtchn_from_irq(irq);
601 if (!VALID_EVTCHN(evtchn))
602 return;
604 mask_evtchn(evtchn);
606 close.port = evtchn;
607 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
608 BUG();
610 bind_evtchn_to_cpu(evtchn, 0);
611 evtchn_to_irq[evtchn] = -1;
612 irq_info[irq] = IRQ_UNBOUND;
613 }
615 static void enable_pirq(unsigned int irq)
616 {
617 int evtchn = evtchn_from_irq(irq);
619 if (VALID_EVTCHN(evtchn)) {
620 unmask_evtchn(evtchn);
621 pirq_unmask_notify(irq_to_pirq(irq));
622 }
623 }
625 static void disable_pirq(unsigned int irq)
626 {
627 int evtchn = evtchn_from_irq(irq);
629 if (VALID_EVTCHN(evtchn))
630 mask_evtchn(evtchn);
631 }
633 static void ack_pirq(unsigned int irq)
634 {
635 int evtchn = evtchn_from_irq(irq);
637 move_native_irq(irq);
639 if (VALID_EVTCHN(evtchn)) {
640 mask_evtchn(evtchn);
641 clear_evtchn(evtchn);
642 }
643 }
645 static void end_pirq(unsigned int irq)
646 {
647 int evtchn = evtchn_from_irq(irq);
649 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
650 unmask_evtchn(evtchn);
651 pirq_unmask_notify(irq_to_pirq(irq));
652 }
653 }
655 static struct hw_interrupt_type pirq_type = {
656 "Phys-irq",
657 startup_pirq,
658 shutdown_pirq,
659 enable_pirq,
660 disable_pirq,
661 ack_pirq,
662 end_pirq,
663 set_affinity_irq
664 };
666 int irq_ignore_unhandled(unsigned int irq)
667 {
668 struct physdev_irq_status_query irq_status = { .irq = irq };
670 if (!is_running_on_xen())
671 return 0;
673 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
674 return !!(irq_status.flags & XENIRQSTAT_shared);
675 }
677 void resend_irq_on_evtchn(struct hw_interrupt_type *h, unsigned int i)
678 {
679 int evtchn = evtchn_from_irq(i);
680 shared_info_t *s = HYPERVISOR_shared_info;
681 if (!VALID_EVTCHN(evtchn))
682 return;
683 BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
684 synch_set_bit(evtchn, &s->evtchn_pending[0]);
685 }
687 void notify_remote_via_irq(int irq)
688 {
689 int evtchn = evtchn_from_irq(irq);
691 if (VALID_EVTCHN(evtchn))
692 notify_remote_via_evtchn(evtchn);
693 }
694 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
696 void mask_evtchn(int port)
697 {
698 shared_info_t *s = HYPERVISOR_shared_info;
699 synch_set_bit(port, &s->evtchn_mask[0]);
700 }
701 EXPORT_SYMBOL_GPL(mask_evtchn);
703 void unmask_evtchn(int port)
704 {
705 shared_info_t *s = HYPERVISOR_shared_info;
706 unsigned int cpu = smp_processor_id();
707 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
709 BUG_ON(!irqs_disabled());
711 /* Slow path (hypercall) if this is a non-local port. */
712 if (unlikely(cpu != cpu_from_evtchn(port))) {
713 struct evtchn_unmask unmask = { .port = port };
714 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
715 return;
716 }
718 synch_clear_bit(port, &s->evtchn_mask[0]);
720 /*
721 * The following is basically the equivalent of 'hw_resend_irq'. Just
722 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
723 * masked.
724 */
725 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
726 !synch_test_and_set_bit(port / BITS_PER_LONG,
727 &vcpu_info->evtchn_pending_sel))
728 vcpu_info->evtchn_upcall_pending = 1;
729 }
730 EXPORT_SYMBOL_GPL(unmask_evtchn);
732 void irq_resume(void)
733 {
734 struct evtchn_bind_virq bind_virq;
735 struct evtchn_bind_ipi bind_ipi;
736 int cpu, pirq, virq, ipi, irq, evtchn;
738 init_evtchn_cpu_bindings();
740 /* New event-channel space is not 'live' yet. */
741 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
742 mask_evtchn(evtchn);
744 /* Check that no PIRQs are still bound. */
745 for (pirq = 0; pirq < NR_PIRQS; pirq++)
746 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
748 /* Secondary CPUs must have no VIRQ or IPI bindings. */
749 for_each_possible_cpu(cpu) {
750 if (cpu == 0)
751 continue;
752 for (virq = 0; virq < NR_VIRQS; virq++)
753 BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
754 for (ipi = 0; ipi < NR_IPIS; ipi++)
755 BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
756 }
758 /* No IRQ <-> event-channel mappings. */
759 for (irq = 0; irq < NR_IRQS; irq++)
760 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
761 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
762 evtchn_to_irq[evtchn] = -1;
764 /* Primary CPU: rebind VIRQs automatically. */
765 for (virq = 0; virq < NR_VIRQS; virq++) {
766 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
767 continue;
769 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
771 /* Get a new binding from Xen. */
772 bind_virq.virq = virq;
773 bind_virq.vcpu = 0;
774 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
775 &bind_virq) != 0)
776 BUG();
777 evtchn = bind_virq.port;
779 /* Record the new mapping. */
780 evtchn_to_irq[evtchn] = irq;
781 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
783 /* Ready for use. */
784 unmask_evtchn(evtchn);
785 }
787 /* Primary CPU: rebind IPIs automatically. */
788 for (ipi = 0; ipi < NR_IPIS; ipi++) {
789 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
790 continue;
792 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
794 /* Get a new binding from Xen. */
795 bind_ipi.vcpu = 0;
796 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
797 &bind_ipi) != 0)
798 BUG();
799 evtchn = bind_ipi.port;
801 /* Record the new mapping. */
802 evtchn_to_irq[evtchn] = irq;
803 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
805 /* Ready for use. */
806 unmask_evtchn(evtchn);
807 }
808 }
810 void __init xen_init_IRQ(void)
811 {
812 int i;
814 init_evtchn_cpu_bindings();
816 /* No event channels are 'live' right now. */
817 for (i = 0; i < NR_EVENT_CHANNELS; i++)
818 mask_evtchn(i);
820 /* No IRQ -> event-channel mappings. */
821 for (i = 0; i < NR_IRQS; i++)
822 irq_info[i] = IRQ_UNBOUND;
824 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
825 for (i = 0; i < NR_DYNIRQS; i++) {
826 irq_bindcount[dynirq_to_irq(i)] = 0;
828 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
829 irq_desc[dynirq_to_irq(i)].action = NULL;
830 irq_desc[dynirq_to_irq(i)].depth = 1;
831 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
832 }
834 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
835 for (i = 0; i < NR_PIRQS; i++) {
836 irq_bindcount[pirq_to_irq(i)] = 1;
838 #ifdef RTC_IRQ
839 /* If not domain 0, force our RTC driver to fail its probe. */
840 if ((i == RTC_IRQ) &&
841 !(xen_start_info->flags & SIF_INITDOMAIN))
842 continue;
843 #endif
845 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
846 irq_desc[pirq_to_irq(i)].action = NULL;
847 irq_desc[pirq_to_irq(i)].depth = 1;
848 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
849 }
850 }