direct-io.hg

view linux-2.6-xen-sparse/drivers/xen/core/evtchn.c @ 10558:c9696012fe05

evtchn_do_upcall() has a micro optimization which is depends on that xchg is a barrier.
However xchg of IA64 has acquire semantics so that event
channel notification is lost sometimes. This patch fixes it.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jun 30 10:07:38 2006 +0100 (2006-06-30)
parents fc1c6dfd1807
children d8338b28bcd6
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/irq.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/version.h>
40 #include <asm/atomic.h>
41 #include <asm/system.h>
42 #include <asm/ptrace.h>
43 #include <asm/synch_bitops.h>
44 #include <xen/evtchn.h>
45 #include <xen/interface/event_channel.h>
46 #include <xen/interface/physdev.h>
47 #include <asm/hypervisor.h>
48 #include <linux/mc146818rtc.h> /* RTC_IRQ */
50 /*
51 * This lock protects updates to the following mapping and reference-count
52 * arrays. The lock does not need to be acquired to read the mapping tables.
53 */
54 static DEFINE_SPINLOCK(irq_mapping_update_lock);
56 /* IRQ <-> event-channel mappings. */
57 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
58 [0 ... NR_EVENT_CHANNELS-1] = -1 };
60 /* Packed IRQ information: binding type, sub-type index, and event channel. */
61 static u32 irq_info[NR_IRQS];
63 /* Binding types. */
64 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
66 /* Constructor for packed IRQ information. */
67 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
68 {
69 return ((type << 24) | (index << 16) | evtchn);
70 }
72 /* Convenient shorthand for packed representation of an unbound IRQ. */
73 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
75 /*
76 * Accessors for packed IRQ information.
77 */
79 static inline unsigned int evtchn_from_irq(int irq)
80 {
81 return (u16)(irq_info[irq]);
82 }
84 static inline unsigned int index_from_irq(int irq)
85 {
86 return (u8)(irq_info[irq] >> 16);
87 }
89 static inline unsigned int type_from_irq(int irq)
90 {
91 return (u8)(irq_info[irq] >> 24);
92 }
94 /* IRQ <-> VIRQ mapping. */
95 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
97 /* IRQ <-> IPI mapping. */
98 #ifndef NR_IPIS
99 #define NR_IPIS 1
100 #endif
101 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
103 /* Reference counts for bindings to IRQs. */
104 static int irq_bindcount[NR_IRQS];
106 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
107 static unsigned long pirq_needs_eoi[NR_PIRQS/sizeof(unsigned long)];
109 #ifdef CONFIG_SMP
111 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
112 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
114 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
115 unsigned int idx)
116 {
117 return (sh->evtchn_pending[idx] &
118 cpu_evtchn_mask[cpu][idx] &
119 ~sh->evtchn_mask[idx]);
120 }
122 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
123 {
124 int irq = evtchn_to_irq[chn];
126 BUG_ON(irq == -1);
127 set_native_irq_info(irq, cpumask_of_cpu(cpu));
129 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
130 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
131 cpu_evtchn[chn] = cpu;
132 }
134 static void init_evtchn_cpu_bindings(void)
135 {
136 int i;
138 /* By default all event channels notify CPU#0. */
139 for (i = 0; i < NR_IRQS; i++)
140 set_native_irq_info(i, cpumask_of_cpu(0));
142 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
143 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
144 }
146 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
147 {
148 return cpu_evtchn[evtchn];
149 }
151 #else
153 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
154 unsigned int idx)
155 {
156 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
157 }
159 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
160 {
161 }
163 static void init_evtchn_cpu_bindings(void)
164 {
165 }
167 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
168 {
169 return 0;
170 }
172 #endif
174 /* Upcall to generic IRQ layer. */
175 #ifdef CONFIG_X86
176 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
177 void __init xen_init_IRQ(void);
178 void __init init_IRQ(void)
179 {
180 irq_ctx_init(0);
181 xen_init_IRQ();
182 }
183 #if defined (__i386__)
184 static inline void exit_idle(void) {}
185 #define IRQ_REG orig_eax
186 #elif defined (__x86_64__)
187 #include <asm/idle.h>
188 #define IRQ_REG orig_rax
189 #endif
190 #define do_IRQ(irq, regs) do { \
191 (regs)->IRQ_REG = ~(irq); \
192 do_IRQ((regs)); \
193 } while (0)
194 #endif
196 /* Xen will never allocate port zero for any purpose. */
197 #define VALID_EVTCHN(chn) ((chn) != 0)
199 /*
200 * Force a proper event-channel callback from Xen after clearing the
201 * callback mask. We do this in a very simple manner, by making a call
202 * down into Xen. The pending flag will be checked by Xen on return.
203 */
204 void force_evtchn_callback(void)
205 {
206 (void)HYPERVISOR_xen_version(0, NULL);
207 }
208 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
209 EXPORT_SYMBOL(force_evtchn_callback);
211 /* NB. Interrupts are disabled on entry. */
212 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
213 {
214 unsigned long l1, l2;
215 unsigned int l1i, l2i, port;
216 int irq, cpu = smp_processor_id();
217 shared_info_t *s = HYPERVISOR_shared_info;
218 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
220 vcpu_info->evtchn_upcall_pending = 0;
222 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
223 /* Clear master pending flag /before/ clearing selector flag. */
224 rmb();
225 #endif
226 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
227 while (l1 != 0) {
228 l1i = __ffs(l1);
229 l1 &= ~(1UL << l1i);
231 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
232 l2i = __ffs(l2);
234 port = (l1i * BITS_PER_LONG) + l2i;
235 if ((irq = evtchn_to_irq[port]) != -1)
236 do_IRQ(irq, regs);
237 else {
238 exit_idle();
239 evtchn_device_upcall(port);
240 }
241 }
242 }
243 }
245 static int find_unbound_irq(void)
246 {
247 int irq;
249 /* Only allocate from dynirq range */
250 for (irq = DYNIRQ_BASE; irq < NR_IRQS; irq++)
251 if (irq_bindcount[irq] == 0)
252 break;
254 if (irq == NR_IRQS)
255 panic("No available IRQ to bind to: increase NR_IRQS!\n");
257 return irq;
258 }
260 static int bind_evtchn_to_irq(unsigned int evtchn)
261 {
262 int irq;
264 spin_lock(&irq_mapping_update_lock);
266 if ((irq = evtchn_to_irq[evtchn]) == -1) {
267 irq = find_unbound_irq();
268 evtchn_to_irq[evtchn] = irq;
269 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
270 }
272 irq_bindcount[irq]++;
274 spin_unlock(&irq_mapping_update_lock);
276 return irq;
277 }
279 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
280 {
281 struct evtchn_bind_virq bind_virq;
282 int evtchn, irq;
284 spin_lock(&irq_mapping_update_lock);
286 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
287 bind_virq.virq = virq;
288 bind_virq.vcpu = cpu;
289 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
290 &bind_virq) != 0)
291 BUG();
292 evtchn = bind_virq.port;
294 irq = find_unbound_irq();
295 evtchn_to_irq[evtchn] = irq;
296 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
298 per_cpu(virq_to_irq, cpu)[virq] = irq;
300 bind_evtchn_to_cpu(evtchn, cpu);
301 }
303 irq_bindcount[irq]++;
305 spin_unlock(&irq_mapping_update_lock);
307 return irq;
308 }
310 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
311 {
312 struct evtchn_bind_ipi bind_ipi;
313 int evtchn, irq;
315 spin_lock(&irq_mapping_update_lock);
317 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
318 bind_ipi.vcpu = cpu;
319 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
320 &bind_ipi) != 0)
321 BUG();
322 evtchn = bind_ipi.port;
324 irq = find_unbound_irq();
325 evtchn_to_irq[evtchn] = irq;
326 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
328 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
330 bind_evtchn_to_cpu(evtchn, cpu);
331 }
333 irq_bindcount[irq]++;
335 spin_unlock(&irq_mapping_update_lock);
337 return irq;
338 }
340 static void unbind_from_irq(unsigned int irq)
341 {
342 struct evtchn_close close;
343 int evtchn = evtchn_from_irq(irq);
345 spin_lock(&irq_mapping_update_lock);
347 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
348 close.port = evtchn;
349 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
350 BUG();
352 switch (type_from_irq(irq)) {
353 case IRQT_VIRQ:
354 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
355 [index_from_irq(irq)] = -1;
356 break;
357 case IRQT_IPI:
358 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
359 [index_from_irq(irq)] = -1;
360 break;
361 default:
362 break;
363 }
365 /* Closed ports are implicitly re-bound to VCPU0. */
366 bind_evtchn_to_cpu(evtchn, 0);
368 evtchn_to_irq[evtchn] = -1;
369 irq_info[irq] = IRQ_UNBOUND;
370 }
372 spin_unlock(&irq_mapping_update_lock);
373 }
375 int bind_evtchn_to_irqhandler(
376 unsigned int evtchn,
377 irqreturn_t (*handler)(int, void *, struct pt_regs *),
378 unsigned long irqflags,
379 const char *devname,
380 void *dev_id)
381 {
382 unsigned int irq;
383 int retval;
385 irq = bind_evtchn_to_irq(evtchn);
386 retval = request_irq(irq, handler, irqflags, devname, dev_id);
387 if (retval != 0) {
388 unbind_from_irq(irq);
389 return retval;
390 }
392 return irq;
393 }
394 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
396 int bind_virq_to_irqhandler(
397 unsigned int virq,
398 unsigned int cpu,
399 irqreturn_t (*handler)(int, void *, struct pt_regs *),
400 unsigned long irqflags,
401 const char *devname,
402 void *dev_id)
403 {
404 unsigned int irq;
405 int retval;
407 irq = bind_virq_to_irq(virq, cpu);
408 retval = request_irq(irq, handler, irqflags, devname, dev_id);
409 if (retval != 0) {
410 unbind_from_irq(irq);
411 return retval;
412 }
414 return irq;
415 }
416 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
418 int bind_ipi_to_irqhandler(
419 unsigned int ipi,
420 unsigned int cpu,
421 irqreturn_t (*handler)(int, void *, struct pt_regs *),
422 unsigned long irqflags,
423 const char *devname,
424 void *dev_id)
425 {
426 unsigned int irq;
427 int retval;
429 irq = bind_ipi_to_irq(ipi, cpu);
430 retval = request_irq(irq, handler, irqflags, devname, dev_id);
431 if (retval != 0) {
432 unbind_from_irq(irq);
433 return retval;
434 }
436 return irq;
437 }
438 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
440 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
441 {
442 free_irq(irq, dev_id);
443 unbind_from_irq(irq);
444 }
445 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
447 /* Rebind an evtchn so that it gets delivered to a specific cpu */
448 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
449 {
450 struct evtchn_bind_vcpu bind_vcpu;
451 int evtchn = evtchn_from_irq(irq);
453 if (!VALID_EVTCHN(evtchn))
454 return;
456 /* Send future instances of this interrupt to other vcpu. */
457 bind_vcpu.port = evtchn;
458 bind_vcpu.vcpu = tcpu;
460 /*
461 * If this fails, it usually just indicates that we're dealing with a
462 * virq or IPI channel, which don't actually need to be rebound. Ignore
463 * it, but don't do the xenlinux-level rebind in that case.
464 */
465 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
466 bind_evtchn_to_cpu(evtchn, tcpu);
467 }
470 static void set_affinity_irq(unsigned irq, cpumask_t dest)
471 {
472 unsigned tcpu = first_cpu(dest);
473 rebind_irq_to_cpu(irq, tcpu);
474 }
476 /*
477 * Interface to generic handling in irq.c
478 */
480 static unsigned int startup_dynirq(unsigned int irq)
481 {
482 int evtchn = evtchn_from_irq(irq);
484 if (VALID_EVTCHN(evtchn))
485 unmask_evtchn(evtchn);
486 return 0;
487 }
489 static void shutdown_dynirq(unsigned int irq)
490 {
491 int evtchn = evtchn_from_irq(irq);
493 if (VALID_EVTCHN(evtchn))
494 mask_evtchn(evtchn);
495 }
497 static void enable_dynirq(unsigned int irq)
498 {
499 int evtchn = evtchn_from_irq(irq);
501 if (VALID_EVTCHN(evtchn))
502 unmask_evtchn(evtchn);
503 }
505 static void disable_dynirq(unsigned int irq)
506 {
507 int evtchn = evtchn_from_irq(irq);
509 if (VALID_EVTCHN(evtchn))
510 mask_evtchn(evtchn);
511 }
513 static void ack_dynirq(unsigned int irq)
514 {
515 int evtchn = evtchn_from_irq(irq);
517 move_native_irq(irq);
519 if (VALID_EVTCHN(evtchn)) {
520 mask_evtchn(evtchn);
521 clear_evtchn(evtchn);
522 }
523 }
525 static void end_dynirq(unsigned int irq)
526 {
527 int evtchn = evtchn_from_irq(irq);
529 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
530 unmask_evtchn(evtchn);
531 }
533 static struct hw_interrupt_type dynirq_type = {
534 "Dynamic-irq",
535 startup_dynirq,
536 shutdown_dynirq,
537 enable_dynirq,
538 disable_dynirq,
539 ack_dynirq,
540 end_dynirq,
541 set_affinity_irq
542 };
544 static inline void pirq_unmask_notify(int pirq)
545 {
546 struct physdev_eoi eoi = { .irq = pirq };
547 if (unlikely(test_bit(pirq, &pirq_needs_eoi[0])))
548 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
549 }
551 static inline void pirq_query_unmask(int pirq)
552 {
553 struct physdev_irq_status_query irq_status;
554 irq_status.irq = pirq;
555 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
556 clear_bit(pirq, &pirq_needs_eoi[0]);
557 if (irq_status.flags & XENIRQSTAT_needs_eoi)
558 set_bit(pirq, &pirq_needs_eoi[0]);
559 }
561 /*
562 * On startup, if there is no action associated with the IRQ then we are
563 * probing. In this case we should not share with others as it will confuse us.
564 */
565 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
567 static unsigned int startup_pirq(unsigned int irq)
568 {
569 struct evtchn_bind_pirq bind_pirq;
570 int evtchn = evtchn_from_irq(irq);
572 if (VALID_EVTCHN(evtchn))
573 goto out;
575 bind_pirq.pirq = irq;
576 /* NB. We are happy to share unless we are probing. */
577 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
578 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
579 if (!probing_irq(irq))
580 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
581 irq);
582 return 0;
583 }
584 evtchn = bind_pirq.port;
586 pirq_query_unmask(irq_to_pirq(irq));
588 evtchn_to_irq[evtchn] = irq;
589 bind_evtchn_to_cpu(evtchn, 0);
590 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
592 out:
593 unmask_evtchn(evtchn);
594 pirq_unmask_notify(irq_to_pirq(irq));
596 return 0;
597 }
599 static void shutdown_pirq(unsigned int irq)
600 {
601 struct evtchn_close close;
602 int evtchn = evtchn_from_irq(irq);
604 if (!VALID_EVTCHN(evtchn))
605 return;
607 mask_evtchn(evtchn);
609 close.port = evtchn;
610 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
611 BUG();
613 bind_evtchn_to_cpu(evtchn, 0);
614 evtchn_to_irq[evtchn] = -1;
615 irq_info[irq] = IRQ_UNBOUND;
616 }
618 static void enable_pirq(unsigned int irq)
619 {
620 int evtchn = evtchn_from_irq(irq);
622 if (VALID_EVTCHN(evtchn)) {
623 unmask_evtchn(evtchn);
624 pirq_unmask_notify(irq_to_pirq(irq));
625 }
626 }
628 static void disable_pirq(unsigned int irq)
629 {
630 int evtchn = evtchn_from_irq(irq);
632 if (VALID_EVTCHN(evtchn))
633 mask_evtchn(evtchn);
634 }
636 static void ack_pirq(unsigned int irq)
637 {
638 int evtchn = evtchn_from_irq(irq);
640 move_native_irq(irq);
642 if (VALID_EVTCHN(evtchn)) {
643 mask_evtchn(evtchn);
644 clear_evtchn(evtchn);
645 }
646 }
648 static void end_pirq(unsigned int irq)
649 {
650 int evtchn = evtchn_from_irq(irq);
652 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
653 unmask_evtchn(evtchn);
654 pirq_unmask_notify(irq_to_pirq(irq));
655 }
656 }
658 static struct hw_interrupt_type pirq_type = {
659 "Phys-irq",
660 startup_pirq,
661 shutdown_pirq,
662 enable_pirq,
663 disable_pirq,
664 ack_pirq,
665 end_pirq,
666 set_affinity_irq
667 };
669 int irq_ignore_unhandled(unsigned int irq)
670 {
671 struct physdev_irq_status_query irq_status = { .irq = irq };
673 if (!is_running_on_xen())
674 return 0;
676 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
677 return !!(irq_status.flags & XENIRQSTAT_shared);
678 }
680 void resend_irq_on_evtchn(struct hw_interrupt_type *h, unsigned int i)
681 {
682 int evtchn = evtchn_from_irq(i);
683 shared_info_t *s = HYPERVISOR_shared_info;
684 if (!VALID_EVTCHN(evtchn))
685 return;
686 BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
687 synch_set_bit(evtchn, &s->evtchn_pending[0]);
688 }
690 void notify_remote_via_irq(int irq)
691 {
692 int evtchn = evtchn_from_irq(irq);
694 if (VALID_EVTCHN(evtchn))
695 notify_remote_via_evtchn(evtchn);
696 }
697 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
699 void mask_evtchn(int port)
700 {
701 shared_info_t *s = HYPERVISOR_shared_info;
702 synch_set_bit(port, &s->evtchn_mask[0]);
703 }
704 EXPORT_SYMBOL_GPL(mask_evtchn);
706 void unmask_evtchn(int port)
707 {
708 shared_info_t *s = HYPERVISOR_shared_info;
709 unsigned int cpu = smp_processor_id();
710 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
712 BUG_ON(!irqs_disabled());
714 /* Slow path (hypercall) if this is a non-local port. */
715 if (unlikely(cpu != cpu_from_evtchn(port))) {
716 struct evtchn_unmask unmask = { .port = port };
717 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
718 return;
719 }
721 synch_clear_bit(port, &s->evtchn_mask[0]);
723 /*
724 * The following is basically the equivalent of 'hw_resend_irq'. Just
725 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
726 * masked.
727 */
728 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
729 !synch_test_and_set_bit(port / BITS_PER_LONG,
730 &vcpu_info->evtchn_pending_sel))
731 vcpu_info->evtchn_upcall_pending = 1;
732 }
733 EXPORT_SYMBOL_GPL(unmask_evtchn);
735 void irq_resume(void)
736 {
737 struct evtchn_bind_virq bind_virq;
738 struct evtchn_bind_ipi bind_ipi;
739 int cpu, pirq, virq, ipi, irq, evtchn;
741 init_evtchn_cpu_bindings();
743 /* New event-channel space is not 'live' yet. */
744 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
745 mask_evtchn(evtchn);
747 /* Check that no PIRQs are still bound. */
748 for (pirq = 0; pirq < NR_PIRQS; pirq++)
749 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
751 /* Secondary CPUs must have no VIRQ or IPI bindings. */
752 for_each_possible_cpu(cpu) {
753 if (cpu == 0)
754 continue;
755 for (virq = 0; virq < NR_VIRQS; virq++)
756 BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
757 for (ipi = 0; ipi < NR_IPIS; ipi++)
758 BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
759 }
761 /* No IRQ <-> event-channel mappings. */
762 for (irq = 0; irq < NR_IRQS; irq++)
763 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
764 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
765 evtchn_to_irq[evtchn] = -1;
767 /* Primary CPU: rebind VIRQs automatically. */
768 for (virq = 0; virq < NR_VIRQS; virq++) {
769 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
770 continue;
772 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
774 /* Get a new binding from Xen. */
775 bind_virq.virq = virq;
776 bind_virq.vcpu = 0;
777 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
778 &bind_virq) != 0)
779 BUG();
780 evtchn = bind_virq.port;
782 /* Record the new mapping. */
783 evtchn_to_irq[evtchn] = irq;
784 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
786 /* Ready for use. */
787 unmask_evtchn(evtchn);
788 }
790 /* Primary CPU: rebind IPIs automatically. */
791 for (ipi = 0; ipi < NR_IPIS; ipi++) {
792 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
793 continue;
795 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
797 /* Get a new binding from Xen. */
798 bind_ipi.vcpu = 0;
799 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
800 &bind_ipi) != 0)
801 BUG();
802 evtchn = bind_ipi.port;
804 /* Record the new mapping. */
805 evtchn_to_irq[evtchn] = irq;
806 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
808 /* Ready for use. */
809 unmask_evtchn(evtchn);
810 }
811 }
813 void __init xen_init_IRQ(void)
814 {
815 int i;
817 init_evtchn_cpu_bindings();
819 /* No event channels are 'live' right now. */
820 for (i = 0; i < NR_EVENT_CHANNELS; i++)
821 mask_evtchn(i);
823 /* No IRQ -> event-channel mappings. */
824 for (i = 0; i < NR_IRQS; i++)
825 irq_info[i] = IRQ_UNBOUND;
827 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
828 for (i = 0; i < NR_DYNIRQS; i++) {
829 irq_bindcount[dynirq_to_irq(i)] = 0;
831 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
832 irq_desc[dynirq_to_irq(i)].action = NULL;
833 irq_desc[dynirq_to_irq(i)].depth = 1;
834 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
835 }
837 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
838 for (i = 0; i < NR_PIRQS; i++) {
839 irq_bindcount[pirq_to_irq(i)] = 1;
841 #ifdef RTC_IRQ
842 /* If not domain 0, force our RTC driver to fail its probe. */
843 if ((i == RTC_IRQ) &&
844 !(xen_start_info->flags & SIF_INITDOMAIN))
845 continue;
846 #endif
848 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
849 irq_desc[pirq_to_irq(i)].action = NULL;
850 irq_desc[pirq_to_irq(i)].depth = 1;
851 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
852 }
853 }