ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/core/evtchn.c @ 9869:d265c79031af

Just allocate unbound irq only from dynirq range, since
pirq range is reserved for physical devices. This saves
unnecessary checks on pirq range.

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 27 09:43:49 2006 +0100 (2006-04-27)
parents 1d69cff40b8c
children 42a8e3101c6c
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/irq.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/version.h>
40 #include <asm/atomic.h>
41 #include <asm/system.h>
42 #include <asm/ptrace.h>
43 #include <asm/synch_bitops.h>
44 #include <xen/interface/event_channel.h>
45 #include <xen/interface/physdev.h>
46 #include <asm/hypervisor.h>
47 #include <xen/evtchn.h>
48 #include <linux/mc146818rtc.h> /* RTC_IRQ */
50 /*
51 * This lock protects updates to the following mapping and reference-count
52 * arrays. The lock does not need to be acquired to read the mapping tables.
53 */
54 static spinlock_t irq_mapping_update_lock;
56 /* IRQ <-> event-channel mappings. */
57 static int evtchn_to_irq[NR_EVENT_CHANNELS];
59 /* Packed IRQ information: binding type, sub-type index, and event channel. */
60 static u32 irq_info[NR_IRQS];
62 /* Binding types. */
63 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
65 /* Constructor for packed IRQ information. */
66 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
67 {
68 return ((type << 24) | (index << 16) | evtchn);
69 }
71 /* Convenient shorthand for packed representation of an unbound IRQ. */
72 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
74 /*
75 * Accessors for packed IRQ information.
76 */
78 static inline unsigned int evtchn_from_irq(int irq)
79 {
80 return (u16)(irq_info[irq]);
81 }
83 static inline unsigned int index_from_irq(int irq)
84 {
85 return (u8)(irq_info[irq] >> 16);
86 }
88 static inline unsigned int type_from_irq(int irq)
89 {
90 return (u8)(irq_info[irq] >> 24);
91 }
93 /* IRQ <-> VIRQ mapping. */
94 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
96 /* IRQ <-> IPI mapping. */
97 #ifndef NR_IPIS
98 #define NR_IPIS 1
99 #endif
100 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
102 /* Reference counts for bindings to IRQs. */
103 static int irq_bindcount[NR_IRQS];
105 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
106 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
108 #ifdef CONFIG_SMP
110 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
111 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
113 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
114 unsigned int idx)
115 {
116 return (sh->evtchn_pending[idx] &
117 cpu_evtchn_mask[cpu][idx] &
118 ~sh->evtchn_mask[idx]);
119 }
121 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
122 {
123 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
124 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
125 cpu_evtchn[chn] = cpu;
126 }
128 static void init_evtchn_cpu_bindings(void)
129 {
130 /* By default all event channels notify CPU#0. */
131 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
132 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
133 }
135 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
136 {
137 return cpu_evtchn[evtchn];
138 }
140 #else
142 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
143 unsigned int idx)
144 {
145 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
146 }
148 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
149 {
150 }
152 static void init_evtchn_cpu_bindings(void)
153 {
154 }
156 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
157 {
158 return 0;
159 }
161 #endif
163 /* Upcall to generic IRQ layer. */
164 #ifdef CONFIG_X86
165 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
166 #if defined (__i386__)
167 static inline void exit_idle(void) {}
168 #define IRQ_REG orig_eax
169 #elif defined (__x86_64__)
170 #include <asm/idle.h>
171 #define IRQ_REG orig_rax
172 #endif
173 #define do_IRQ(irq, regs) do { \
174 (regs)->IRQ_REG = ~(irq); \
175 do_IRQ((regs)); \
176 } while (0)
177 #endif
179 /* Xen will never allocate port zero for any purpose. */
180 #define VALID_EVTCHN(chn) ((chn) != 0)
182 /*
183 * Force a proper event-channel callback from Xen after clearing the
184 * callback mask. We do this in a very simple manner, by making a call
185 * down into Xen. The pending flag will be checked by Xen on return.
186 */
187 void force_evtchn_callback(void)
188 {
189 (void)HYPERVISOR_xen_version(0, NULL);
190 }
191 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
192 EXPORT_SYMBOL(force_evtchn_callback);
194 /* NB. Interrupts are disabled on entry. */
195 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
196 {
197 unsigned long l1, l2;
198 unsigned int l1i, l2i, port;
199 int irq, cpu = smp_processor_id();
200 shared_info_t *s = HYPERVISOR_shared_info;
201 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
203 vcpu_info->evtchn_upcall_pending = 0;
205 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
206 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
207 while (l1 != 0) {
208 l1i = __ffs(l1);
209 l1 &= ~(1UL << l1i);
211 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
212 l2i = __ffs(l2);
214 port = (l1i * BITS_PER_LONG) + l2i;
215 if ((irq = evtchn_to_irq[port]) != -1)
216 do_IRQ(irq, regs);
217 else {
218 exit_idle();
219 evtchn_device_upcall(port);
220 }
221 }
222 }
223 }
225 static int find_unbound_irq(void)
226 {
227 int irq;
229 /* Only allocate from dynirq range */
230 for (irq = DYNIRQ_BASE; irq < NR_IRQS; irq++)
231 if (irq_bindcount[irq] == 0)
232 break;
234 if (irq == NR_IRQS)
235 panic("No available IRQ to bind to: increase NR_IRQS!\n");
237 return irq;
238 }
240 static int bind_evtchn_to_irq(unsigned int evtchn)
241 {
242 int irq;
244 spin_lock(&irq_mapping_update_lock);
246 if ((irq = evtchn_to_irq[evtchn]) == -1) {
247 irq = find_unbound_irq();
248 evtchn_to_irq[evtchn] = irq;
249 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
250 }
252 irq_bindcount[irq]++;
254 spin_unlock(&irq_mapping_update_lock);
256 return irq;
257 }
259 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
260 {
261 evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
262 int evtchn, irq;
264 spin_lock(&irq_mapping_update_lock);
266 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
267 op.u.bind_virq.virq = virq;
268 op.u.bind_virq.vcpu = cpu;
269 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
270 evtchn = op.u.bind_virq.port;
272 irq = find_unbound_irq();
273 evtchn_to_irq[evtchn] = irq;
274 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
276 per_cpu(virq_to_irq, cpu)[virq] = irq;
278 bind_evtchn_to_cpu(evtchn, cpu);
279 }
281 irq_bindcount[irq]++;
283 spin_unlock(&irq_mapping_update_lock);
285 return irq;
286 }
288 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
289 {
290 evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
291 int evtchn, irq;
293 spin_lock(&irq_mapping_update_lock);
295 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
296 op.u.bind_ipi.vcpu = cpu;
297 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
298 evtchn = op.u.bind_ipi.port;
300 irq = find_unbound_irq();
301 evtchn_to_irq[evtchn] = irq;
302 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
304 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
306 bind_evtchn_to_cpu(evtchn, cpu);
307 }
309 irq_bindcount[irq]++;
311 spin_unlock(&irq_mapping_update_lock);
313 return irq;
314 }
316 static void unbind_from_irq(unsigned int irq)
317 {
318 evtchn_op_t op = { .cmd = EVTCHNOP_close };
319 int evtchn = evtchn_from_irq(irq);
321 spin_lock(&irq_mapping_update_lock);
323 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
324 op.u.close.port = evtchn;
325 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
327 switch (type_from_irq(irq)) {
328 case IRQT_VIRQ:
329 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
330 [index_from_irq(irq)] = -1;
331 break;
332 case IRQT_IPI:
333 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
334 [index_from_irq(irq)] = -1;
335 break;
336 default:
337 break;
338 }
340 /* Closed ports are implicitly re-bound to VCPU0. */
341 bind_evtchn_to_cpu(evtchn, 0);
343 evtchn_to_irq[evtchn] = -1;
344 irq_info[irq] = IRQ_UNBOUND;
345 }
347 spin_unlock(&irq_mapping_update_lock);
348 }
350 int bind_evtchn_to_irqhandler(
351 unsigned int evtchn,
352 irqreturn_t (*handler)(int, void *, struct pt_regs *),
353 unsigned long irqflags,
354 const char *devname,
355 void *dev_id)
356 {
357 unsigned int irq;
358 int retval;
360 irq = bind_evtchn_to_irq(evtchn);
361 retval = request_irq(irq, handler, irqflags, devname, dev_id);
362 if (retval != 0) {
363 unbind_from_irq(irq);
364 return retval;
365 }
367 return irq;
368 }
369 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
371 int bind_virq_to_irqhandler(
372 unsigned int virq,
373 unsigned int cpu,
374 irqreturn_t (*handler)(int, void *, struct pt_regs *),
375 unsigned long irqflags,
376 const char *devname,
377 void *dev_id)
378 {
379 unsigned int irq;
380 int retval;
382 irq = bind_virq_to_irq(virq, cpu);
383 retval = request_irq(irq, handler, irqflags, devname, dev_id);
384 if (retval != 0) {
385 unbind_from_irq(irq);
386 return retval;
387 }
389 return irq;
390 }
391 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
393 int bind_ipi_to_irqhandler(
394 unsigned int ipi,
395 unsigned int cpu,
396 irqreturn_t (*handler)(int, void *, struct pt_regs *),
397 unsigned long irqflags,
398 const char *devname,
399 void *dev_id)
400 {
401 unsigned int irq;
402 int retval;
404 irq = bind_ipi_to_irq(ipi, cpu);
405 retval = request_irq(irq, handler, irqflags, devname, dev_id);
406 if (retval != 0) {
407 unbind_from_irq(irq);
408 return retval;
409 }
411 return irq;
412 }
413 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
415 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
416 {
417 free_irq(irq, dev_id);
418 unbind_from_irq(irq);
419 }
420 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
422 #ifdef CONFIG_SMP
423 static void do_nothing_function(void *ign)
424 {
425 }
426 #endif
428 /* Rebind an evtchn so that it gets delivered to a specific cpu */
429 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
430 {
431 evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
432 int evtchn;
434 spin_lock(&irq_mapping_update_lock);
436 evtchn = evtchn_from_irq(irq);
437 if (!VALID_EVTCHN(evtchn)) {
438 spin_unlock(&irq_mapping_update_lock);
439 return;
440 }
442 /* Send future instances of this interrupt to other vcpu. */
443 op.u.bind_vcpu.port = evtchn;
444 op.u.bind_vcpu.vcpu = tcpu;
446 /*
447 * If this fails, it usually just indicates that we're dealing with a
448 * virq or IPI channel, which don't actually need to be rebound. Ignore
449 * it, but don't do the xenlinux-level rebind in that case.
450 */
451 if (HYPERVISOR_event_channel_op(&op) >= 0)
452 bind_evtchn_to_cpu(evtchn, tcpu);
454 spin_unlock(&irq_mapping_update_lock);
456 /*
457 * Now send the new target processor a NOP IPI. When this returns, it
458 * will check for any pending interrupts, and so service any that got
459 * delivered to the wrong processor by mistake.
460 *
461 * XXX: The only time this is called with interrupts disabled is from
462 * the hotplug/hotunplug path. In that case, all cpus are stopped with
463 * interrupts disabled, and the missed interrupts will be picked up
464 * when they start again. This is kind of a hack.
465 */
466 if (!irqs_disabled())
467 smp_call_function(do_nothing_function, NULL, 0, 0);
468 }
471 static void set_affinity_irq(unsigned irq, cpumask_t dest)
472 {
473 unsigned tcpu = first_cpu(dest);
474 rebind_irq_to_cpu(irq, tcpu);
475 }
477 /*
478 * Interface to generic handling in irq.c
479 */
481 static unsigned int startup_dynirq(unsigned int irq)
482 {
483 int evtchn = evtchn_from_irq(irq);
485 if (VALID_EVTCHN(evtchn))
486 unmask_evtchn(evtchn);
487 return 0;
488 }
490 static void shutdown_dynirq(unsigned int irq)
491 {
492 int evtchn = evtchn_from_irq(irq);
494 if (VALID_EVTCHN(evtchn))
495 mask_evtchn(evtchn);
496 }
498 static void enable_dynirq(unsigned int irq)
499 {
500 int evtchn = evtchn_from_irq(irq);
502 if (VALID_EVTCHN(evtchn))
503 unmask_evtchn(evtchn);
504 }
506 static void disable_dynirq(unsigned int irq)
507 {
508 int evtchn = evtchn_from_irq(irq);
510 if (VALID_EVTCHN(evtchn))
511 mask_evtchn(evtchn);
512 }
514 static void ack_dynirq(unsigned int irq)
515 {
516 int evtchn = evtchn_from_irq(irq);
518 move_native_irq(irq);
520 if (VALID_EVTCHN(evtchn)) {
521 mask_evtchn(evtchn);
522 clear_evtchn(evtchn);
523 }
524 }
526 static void end_dynirq(unsigned int irq)
527 {
528 int evtchn = evtchn_from_irq(irq);
530 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
531 unmask_evtchn(evtchn);
532 }
534 static struct hw_interrupt_type dynirq_type = {
535 "Dynamic-irq",
536 startup_dynirq,
537 shutdown_dynirq,
538 enable_dynirq,
539 disable_dynirq,
540 ack_dynirq,
541 end_dynirq,
542 set_affinity_irq
543 };
545 static inline void pirq_unmask_notify(int pirq)
546 {
547 physdev_op_t op;
548 if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
549 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
550 (void)HYPERVISOR_physdev_op(&op);
551 }
552 }
554 static inline void pirq_query_unmask(int pirq)
555 {
556 physdev_op_t op;
557 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
558 op.u.irq_status_query.irq = pirq;
559 (void)HYPERVISOR_physdev_op(&op);
560 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
561 if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
562 set_bit(pirq, &pirq_needs_unmask_notify[0]);
563 }
565 /*
566 * On startup, if there is no action associated with the IRQ then we are
567 * probing. In this case we should not share with others as it will confuse us.
568 */
569 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
571 static unsigned int startup_pirq(unsigned int irq)
572 {
573 evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
574 int evtchn = evtchn_from_irq(irq);
576 if (VALID_EVTCHN(evtchn))
577 goto out;
579 op.u.bind_pirq.pirq = irq;
580 /* NB. We are happy to share unless we are probing. */
581 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
582 if (HYPERVISOR_event_channel_op(&op) != 0) {
583 if (!probing_irq(irq))
584 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
585 irq);
586 return 0;
587 }
588 evtchn = op.u.bind_pirq.port;
590 pirq_query_unmask(irq_to_pirq(irq));
592 bind_evtchn_to_cpu(evtchn, 0);
593 evtchn_to_irq[evtchn] = irq;
594 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
596 out:
597 unmask_evtchn(evtchn);
598 pirq_unmask_notify(irq_to_pirq(irq));
600 return 0;
601 }
603 static void shutdown_pirq(unsigned int irq)
604 {
605 evtchn_op_t op = { .cmd = EVTCHNOP_close };
606 int evtchn = evtchn_from_irq(irq);
608 if (!VALID_EVTCHN(evtchn))
609 return;
611 mask_evtchn(evtchn);
613 op.u.close.port = evtchn;
614 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
616 bind_evtchn_to_cpu(evtchn, 0);
617 evtchn_to_irq[evtchn] = -1;
618 irq_info[irq] = IRQ_UNBOUND;
619 }
621 static void enable_pirq(unsigned int irq)
622 {
623 int evtchn = evtchn_from_irq(irq);
625 if (VALID_EVTCHN(evtchn)) {
626 unmask_evtchn(evtchn);
627 pirq_unmask_notify(irq_to_pirq(irq));
628 }
629 }
631 static void disable_pirq(unsigned int irq)
632 {
633 int evtchn = evtchn_from_irq(irq);
635 if (VALID_EVTCHN(evtchn))
636 mask_evtchn(evtchn);
637 }
639 static void ack_pirq(unsigned int irq)
640 {
641 int evtchn = evtchn_from_irq(irq);
643 move_native_irq(irq);
645 if (VALID_EVTCHN(evtchn)) {
646 mask_evtchn(evtchn);
647 clear_evtchn(evtchn);
648 }
649 }
651 static void end_pirq(unsigned int irq)
652 {
653 int evtchn = evtchn_from_irq(irq);
655 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
656 unmask_evtchn(evtchn);
657 pirq_unmask_notify(irq_to_pirq(irq));
658 }
659 }
661 static struct hw_interrupt_type pirq_type = {
662 "Phys-irq",
663 startup_pirq,
664 shutdown_pirq,
665 enable_pirq,
666 disable_pirq,
667 ack_pirq,
668 end_pirq,
669 set_affinity_irq
670 };
672 void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
673 {
674 int evtchn = evtchn_from_irq(i);
675 shared_info_t *s = HYPERVISOR_shared_info;
676 if (!VALID_EVTCHN(evtchn))
677 return;
678 BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
679 synch_set_bit(evtchn, &s->evtchn_pending[0]);
680 }
682 void notify_remote_via_irq(int irq)
683 {
684 int evtchn = evtchn_from_irq(irq);
686 if (VALID_EVTCHN(evtchn))
687 notify_remote_via_evtchn(evtchn);
688 }
689 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
691 void mask_evtchn(int port)
692 {
693 shared_info_t *s = HYPERVISOR_shared_info;
694 synch_set_bit(port, &s->evtchn_mask[0]);
695 }
696 EXPORT_SYMBOL_GPL(mask_evtchn);
698 void unmask_evtchn(int port)
699 {
700 shared_info_t *s = HYPERVISOR_shared_info;
701 unsigned int cpu = smp_processor_id();
702 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
704 /* Slow path (hypercall) if this is a non-local port. */
705 if (unlikely(cpu != cpu_from_evtchn(port))) {
706 evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
707 .u.unmask.port = port };
708 (void)HYPERVISOR_event_channel_op(&op);
709 return;
710 }
712 synch_clear_bit(port, &s->evtchn_mask[0]);
714 /*
715 * The following is basically the equivalent of 'hw_resend_irq'. Just
716 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
717 * masked.
718 */
719 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
720 !synch_test_and_set_bit(port / BITS_PER_LONG,
721 &vcpu_info->evtchn_pending_sel)) {
722 vcpu_info->evtchn_upcall_pending = 1;
723 if (!vcpu_info->evtchn_upcall_mask)
724 force_evtchn_callback();
725 }
726 }
727 EXPORT_SYMBOL_GPL(unmask_evtchn);
729 void irq_resume(void)
730 {
731 evtchn_op_t op;
732 int cpu, pirq, virq, ipi, irq, evtchn;
734 init_evtchn_cpu_bindings();
736 /* New event-channel space is not 'live' yet. */
737 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
738 mask_evtchn(evtchn);
740 /* Check that no PIRQs are still bound. */
741 for (pirq = 0; pirq < NR_PIRQS; pirq++)
742 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
744 /* Secondary CPUs must have no VIRQ or IPI bindings. */
745 for (cpu = 1; cpu < NR_CPUS; cpu++) {
746 for (virq = 0; virq < NR_VIRQS; virq++)
747 BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
748 for (ipi = 0; ipi < NR_IPIS; ipi++)
749 BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
750 }
752 /* No IRQ <-> event-channel mappings. */
753 for (irq = 0; irq < NR_IRQS; irq++)
754 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
755 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
756 evtchn_to_irq[evtchn] = -1;
758 /* Primary CPU: rebind VIRQs automatically. */
759 for (virq = 0; virq < NR_VIRQS; virq++) {
760 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
761 continue;
763 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
765 /* Get a new binding from Xen. */
766 memset(&op, 0, sizeof(op));
767 op.cmd = EVTCHNOP_bind_virq;
768 op.u.bind_virq.virq = virq;
769 op.u.bind_virq.vcpu = 0;
770 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
771 evtchn = op.u.bind_virq.port;
773 /* Record the new mapping. */
774 evtchn_to_irq[evtchn] = irq;
775 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
777 /* Ready for use. */
778 unmask_evtchn(evtchn);
779 }
781 /* Primary CPU: rebind IPIs automatically. */
782 for (ipi = 0; ipi < NR_IPIS; ipi++) {
783 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
784 continue;
786 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
788 /* Get a new binding from Xen. */
789 memset(&op, 0, sizeof(op));
790 op.cmd = EVTCHNOP_bind_ipi;
791 op.u.bind_ipi.vcpu = 0;
792 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
793 evtchn = op.u.bind_ipi.port;
795 /* Record the new mapping. */
796 evtchn_to_irq[evtchn] = irq;
797 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
799 /* Ready for use. */
800 unmask_evtchn(evtchn);
801 }
802 }
804 void __init init_IRQ(void)
805 {
806 int i;
807 int cpu;
809 irq_ctx_init(0);
811 spin_lock_init(&irq_mapping_update_lock);
813 init_evtchn_cpu_bindings();
815 /* No VIRQ or IPI bindings. */
816 for (cpu = 0; cpu < NR_CPUS; cpu++) {
817 for (i = 0; i < NR_VIRQS; i++)
818 per_cpu(virq_to_irq, cpu)[i] = -1;
819 for (i = 0; i < NR_IPIS; i++)
820 per_cpu(ipi_to_irq, cpu)[i] = -1;
821 }
823 /* No event-channel -> IRQ mappings. */
824 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
825 evtchn_to_irq[i] = -1;
826 mask_evtchn(i); /* No event channels are 'live' right now. */
827 }
829 /* No IRQ -> event-channel mappings. */
830 for (i = 0; i < NR_IRQS; i++)
831 irq_info[i] = IRQ_UNBOUND;
833 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
834 for (i = 0; i < NR_DYNIRQS; i++) {
835 irq_bindcount[dynirq_to_irq(i)] = 0;
837 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
838 irq_desc[dynirq_to_irq(i)].action = NULL;
839 irq_desc[dynirq_to_irq(i)].depth = 1;
840 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
841 }
843 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
844 for (i = 0; i < NR_PIRQS; i++) {
845 irq_bindcount[pirq_to_irq(i)] = 1;
847 #ifdef RTC_IRQ
848 /* If not domain 0, force our RTC driver to fail its probe. */
849 if ((i == RTC_IRQ) &&
850 !(xen_start_info->flags & SIF_INITDOMAIN))
851 continue;
852 #endif
854 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
855 irq_desc[pirq_to_irq(i)].action = NULL;
856 irq_desc[pirq_to_irq(i)].depth = 1;
857 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
858 }
859 }
861 /*
862 * Local variables:
863 * c-file-style: "linux"
864 * indent-tabs-mode: t
865 * c-indent-level: 8
866 * c-basic-offset: 8
867 * tab-width: 8
868 * End:
869 */