ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/core/evtchn.c @ 9555:d76a7a40f3a9

Fix do_IRQ high bit masking.
Instead of setting the highest bit (which isn't easily done on native x86_64),
negate the interrupt vector stored in orig_{e,r}ax.
Also add patch for native build.

Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Fri Mar 31 17:44:26 2006 +0100 (2006-03-31)
parents 4109c4e7804a
children 2ccaa3879417
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/config.h>
34 #include <linux/module.h>
35 #include <linux/irq.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/version.h>
40 #include <asm/atomic.h>
41 #include <asm/system.h>
42 #include <asm/ptrace.h>
43 #include <asm/synch_bitops.h>
44 #include <xen/interface/event_channel.h>
45 #include <xen/interface/physdev.h>
46 #include <asm/hypervisor.h>
47 #include <xen/evtchn.h>
48 #include <linux/mc146818rtc.h> /* RTC_IRQ */
50 /*
51 * This lock protects updates to the following mapping and reference-count
52 * arrays. The lock does not need to be acquired to read the mapping tables.
53 */
54 static spinlock_t irq_mapping_update_lock;
56 /* IRQ <-> event-channel mappings. */
57 static int evtchn_to_irq[NR_EVENT_CHANNELS];
59 /* Packed IRQ information: binding type, sub-type index, and event channel. */
60 static u32 irq_info[NR_IRQS];
62 /* Binding types. */
63 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
65 /* Constructor for packed IRQ information. */
66 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
67 {
68 return ((type << 24) | (index << 16) | evtchn);
69 }
71 /* Convenient shorthand for packed representation of an unbound IRQ. */
72 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
74 /*
75 * Accessors for packed IRQ information.
76 */
78 static inline unsigned int evtchn_from_irq(int irq)
79 {
80 return (u16)(irq_info[irq]);
81 }
83 static inline unsigned int index_from_irq(int irq)
84 {
85 return (u8)(irq_info[irq] >> 16);
86 }
88 static inline unsigned int type_from_irq(int irq)
89 {
90 return (u8)(irq_info[irq] >> 24);
91 }
93 /* IRQ <-> VIRQ mapping. */
94 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
96 /* IRQ <-> IPI mapping. */
97 #ifndef NR_IPIS
98 #define NR_IPIS 1
99 #endif
100 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
102 /* Reference counts for bindings to IRQs. */
103 static int irq_bindcount[NR_IRQS];
105 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
106 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
108 #ifdef CONFIG_SMP
110 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
111 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
113 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
114 unsigned int idx)
115 {
116 return (sh->evtchn_pending[idx] &
117 cpu_evtchn_mask[cpu][idx] &
118 ~sh->evtchn_mask[idx]);
119 }
121 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
122 {
123 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
124 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
125 cpu_evtchn[chn] = cpu;
126 }
128 static void init_evtchn_cpu_bindings(void)
129 {
130 /* By default all event channels notify CPU#0. */
131 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
132 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
133 }
135 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
136 {
137 return cpu_evtchn[evtchn];
138 }
140 #else
142 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
143 unsigned int idx)
144 {
145 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
146 }
148 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
149 {
150 }
152 static void init_evtchn_cpu_bindings(void)
153 {
154 }
156 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
157 {
158 return 0;
159 }
161 #endif
163 /* Upcall to generic IRQ layer. */
164 #ifdef CONFIG_X86
165 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
166 #if defined (__i386__)
167 static inline void exit_idle(void) {}
168 #define IRQ_REG orig_eax
169 #elif defined (__x86_64__)
170 #include <asm/idle.h>
171 #define IRQ_REG orig_rax
172 #endif
173 #define do_IRQ(irq, regs) do { \
174 (regs)->IRQ_REG = ~(irq); \
175 do_IRQ((regs)); \
176 } while (0)
177 #endif
179 /* Xen will never allocate port zero for any purpose. */
180 #define VALID_EVTCHN(chn) ((chn) != 0)
182 /*
183 * Force a proper event-channel callback from Xen after clearing the
184 * callback mask. We do this in a very simple manner, by making a call
185 * down into Xen. The pending flag will be checked by Xen on return.
186 */
187 void force_evtchn_callback(void)
188 {
189 (void)HYPERVISOR_xen_version(0, NULL);
190 }
191 EXPORT_SYMBOL_GPL(force_evtchn_callback);
193 /* NB. Interrupts are disabled on entry. */
194 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
195 {
196 unsigned long l1, l2;
197 unsigned int l1i, l2i, port;
198 int irq, cpu = smp_processor_id();
199 shared_info_t *s = HYPERVISOR_shared_info;
200 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
202 vcpu_info->evtchn_upcall_pending = 0;
204 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
205 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
206 while (l1 != 0) {
207 l1i = __ffs(l1);
208 l1 &= ~(1UL << l1i);
210 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
211 l2i = __ffs(l2);
213 port = (l1i * BITS_PER_LONG) + l2i;
214 if ((irq = evtchn_to_irq[port]) != -1)
215 do_IRQ(irq, regs);
216 else {
217 exit_idle();
218 evtchn_device_upcall(port);
219 }
220 }
221 }
222 }
224 static int find_unbound_irq(void)
225 {
226 int irq;
228 for (irq = 0; irq < NR_IRQS; irq++)
229 if (irq_bindcount[irq] == 0)
230 break;
232 if (irq == NR_IRQS)
233 panic("No available IRQ to bind to: increase NR_IRQS!\n");
235 return irq;
236 }
238 static int bind_evtchn_to_irq(unsigned int evtchn)
239 {
240 int irq;
242 spin_lock(&irq_mapping_update_lock);
244 if ((irq = evtchn_to_irq[evtchn]) == -1) {
245 irq = find_unbound_irq();
246 evtchn_to_irq[evtchn] = irq;
247 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
248 }
250 irq_bindcount[irq]++;
252 spin_unlock(&irq_mapping_update_lock);
254 return irq;
255 }
257 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
258 {
259 evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
260 int evtchn, irq;
262 spin_lock(&irq_mapping_update_lock);
264 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
265 op.u.bind_virq.virq = virq;
266 op.u.bind_virq.vcpu = cpu;
267 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
268 evtchn = op.u.bind_virq.port;
270 irq = find_unbound_irq();
271 evtchn_to_irq[evtchn] = irq;
272 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
274 per_cpu(virq_to_irq, cpu)[virq] = irq;
276 bind_evtchn_to_cpu(evtchn, cpu);
277 }
279 irq_bindcount[irq]++;
281 spin_unlock(&irq_mapping_update_lock);
283 return irq;
284 }
286 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
287 {
288 evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
289 int evtchn, irq;
291 spin_lock(&irq_mapping_update_lock);
293 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
294 op.u.bind_ipi.vcpu = cpu;
295 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
296 evtchn = op.u.bind_ipi.port;
298 irq = find_unbound_irq();
299 evtchn_to_irq[evtchn] = irq;
300 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
302 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
304 bind_evtchn_to_cpu(evtchn, cpu);
305 }
307 irq_bindcount[irq]++;
309 spin_unlock(&irq_mapping_update_lock);
311 return irq;
312 }
314 static void unbind_from_irq(unsigned int irq)
315 {
316 evtchn_op_t op = { .cmd = EVTCHNOP_close };
317 int evtchn = evtchn_from_irq(irq);
319 spin_lock(&irq_mapping_update_lock);
321 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
322 op.u.close.port = evtchn;
323 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
325 switch (type_from_irq(irq)) {
326 case IRQT_VIRQ:
327 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
328 [index_from_irq(irq)] = -1;
329 break;
330 case IRQT_IPI:
331 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
332 [index_from_irq(irq)] = -1;
333 break;
334 default:
335 break;
336 }
338 /* Closed ports are implicitly re-bound to VCPU0. */
339 bind_evtchn_to_cpu(evtchn, 0);
341 evtchn_to_irq[evtchn] = -1;
342 irq_info[irq] = IRQ_UNBOUND;
343 }
345 spin_unlock(&irq_mapping_update_lock);
346 }
348 int bind_evtchn_to_irqhandler(
349 unsigned int evtchn,
350 irqreturn_t (*handler)(int, void *, struct pt_regs *),
351 unsigned long irqflags,
352 const char *devname,
353 void *dev_id)
354 {
355 unsigned int irq;
356 int retval;
358 irq = bind_evtchn_to_irq(evtchn);
359 retval = request_irq(irq, handler, irqflags, devname, dev_id);
360 if (retval != 0) {
361 unbind_from_irq(irq);
362 return retval;
363 }
365 return irq;
366 }
367 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
369 int bind_virq_to_irqhandler(
370 unsigned int virq,
371 unsigned int cpu,
372 irqreturn_t (*handler)(int, void *, struct pt_regs *),
373 unsigned long irqflags,
374 const char *devname,
375 void *dev_id)
376 {
377 unsigned int irq;
378 int retval;
380 irq = bind_virq_to_irq(virq, cpu);
381 retval = request_irq(irq, handler, irqflags, devname, dev_id);
382 if (retval != 0) {
383 unbind_from_irq(irq);
384 return retval;
385 }
387 return irq;
388 }
389 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
391 int bind_ipi_to_irqhandler(
392 unsigned int ipi,
393 unsigned int cpu,
394 irqreturn_t (*handler)(int, void *, struct pt_regs *),
395 unsigned long irqflags,
396 const char *devname,
397 void *dev_id)
398 {
399 unsigned int irq;
400 int retval;
402 irq = bind_ipi_to_irq(ipi, cpu);
403 retval = request_irq(irq, handler, irqflags, devname, dev_id);
404 if (retval != 0) {
405 unbind_from_irq(irq);
406 return retval;
407 }
409 return irq;
410 }
411 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
413 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
414 {
415 free_irq(irq, dev_id);
416 unbind_from_irq(irq);
417 }
418 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
420 #ifdef CONFIG_SMP
421 static void do_nothing_function(void *ign)
422 {
423 }
424 #endif
426 /* Rebind an evtchn so that it gets delivered to a specific cpu */
427 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
428 {
429 evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
430 int evtchn;
432 spin_lock(&irq_mapping_update_lock);
434 evtchn = evtchn_from_irq(irq);
435 if (!VALID_EVTCHN(evtchn)) {
436 spin_unlock(&irq_mapping_update_lock);
437 return;
438 }
440 /* Send future instances of this interrupt to other vcpu. */
441 op.u.bind_vcpu.port = evtchn;
442 op.u.bind_vcpu.vcpu = tcpu;
444 /*
445 * If this fails, it usually just indicates that we're dealing with a
446 * virq or IPI channel, which don't actually need to be rebound. Ignore
447 * it, but don't do the xenlinux-level rebind in that case.
448 */
449 if (HYPERVISOR_event_channel_op(&op) >= 0)
450 bind_evtchn_to_cpu(evtchn, tcpu);
452 spin_unlock(&irq_mapping_update_lock);
454 /*
455 * Now send the new target processor a NOP IPI. When this returns, it
456 * will check for any pending interrupts, and so service any that got
457 * delivered to the wrong processor by mistake.
458 *
459 * XXX: The only time this is called with interrupts disabled is from
460 * the hotplug/hotunplug path. In that case, all cpus are stopped with
461 * interrupts disabled, and the missed interrupts will be picked up
462 * when they start again. This is kind of a hack.
463 */
464 if (!irqs_disabled())
465 smp_call_function(do_nothing_function, NULL, 0, 0);
466 }
469 static void set_affinity_irq(unsigned irq, cpumask_t dest)
470 {
471 unsigned tcpu = first_cpu(dest);
472 rebind_irq_to_cpu(irq, tcpu);
473 }
475 /*
476 * Interface to generic handling in irq.c
477 */
479 static unsigned int startup_dynirq(unsigned int irq)
480 {
481 int evtchn = evtchn_from_irq(irq);
483 if (VALID_EVTCHN(evtchn))
484 unmask_evtchn(evtchn);
485 return 0;
486 }
488 static void shutdown_dynirq(unsigned int irq)
489 {
490 int evtchn = evtchn_from_irq(irq);
492 if (VALID_EVTCHN(evtchn))
493 mask_evtchn(evtchn);
494 }
496 static void enable_dynirq(unsigned int irq)
497 {
498 int evtchn = evtchn_from_irq(irq);
500 if (VALID_EVTCHN(evtchn))
501 unmask_evtchn(evtchn);
502 }
504 static void disable_dynirq(unsigned int irq)
505 {
506 int evtchn = evtchn_from_irq(irq);
508 if (VALID_EVTCHN(evtchn))
509 mask_evtchn(evtchn);
510 }
512 static void ack_dynirq(unsigned int irq)
513 {
514 int evtchn = evtchn_from_irq(irq);
516 if (VALID_EVTCHN(evtchn)) {
517 mask_evtchn(evtchn);
518 clear_evtchn(evtchn);
519 }
520 }
522 static void end_dynirq(unsigned int irq)
523 {
524 int evtchn = evtchn_from_irq(irq);
526 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
527 unmask_evtchn(evtchn);
528 }
530 static struct hw_interrupt_type dynirq_type = {
531 "Dynamic-irq",
532 startup_dynirq,
533 shutdown_dynirq,
534 enable_dynirq,
535 disable_dynirq,
536 ack_dynirq,
537 end_dynirq,
538 set_affinity_irq
539 };
541 static inline void pirq_unmask_notify(int pirq)
542 {
543 physdev_op_t op;
544 if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
545 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
546 (void)HYPERVISOR_physdev_op(&op);
547 }
548 }
550 static inline void pirq_query_unmask(int pirq)
551 {
552 physdev_op_t op;
553 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
554 op.u.irq_status_query.irq = pirq;
555 (void)HYPERVISOR_physdev_op(&op);
556 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
557 if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
558 set_bit(pirq, &pirq_needs_unmask_notify[0]);
559 }
561 /*
562 * On startup, if there is no action associated with the IRQ then we are
563 * probing. In this case we should not share with others as it will confuse us.
564 */
565 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
567 static unsigned int startup_pirq(unsigned int irq)
568 {
569 evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
570 int evtchn = evtchn_from_irq(irq);
572 if (VALID_EVTCHN(evtchn))
573 goto out;
575 op.u.bind_pirq.pirq = irq;
576 /* NB. We are happy to share unless we are probing. */
577 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
578 if (HYPERVISOR_event_channel_op(&op) != 0) {
579 if (!probing_irq(irq))
580 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
581 irq);
582 return 0;
583 }
584 evtchn = op.u.bind_pirq.port;
586 pirq_query_unmask(irq_to_pirq(irq));
588 bind_evtchn_to_cpu(evtchn, 0);
589 evtchn_to_irq[evtchn] = irq;
590 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
592 out:
593 unmask_evtchn(evtchn);
594 pirq_unmask_notify(irq_to_pirq(irq));
596 return 0;
597 }
599 static void shutdown_pirq(unsigned int irq)
600 {
601 evtchn_op_t op = { .cmd = EVTCHNOP_close };
602 int evtchn = evtchn_from_irq(irq);
604 if (!VALID_EVTCHN(evtchn))
605 return;
607 mask_evtchn(evtchn);
609 op.u.close.port = evtchn;
610 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
612 bind_evtchn_to_cpu(evtchn, 0);
613 evtchn_to_irq[evtchn] = -1;
614 irq_info[irq] = IRQ_UNBOUND;
615 }
617 static void enable_pirq(unsigned int irq)
618 {
619 int evtchn = evtchn_from_irq(irq);
621 if (VALID_EVTCHN(evtchn)) {
622 unmask_evtchn(evtchn);
623 pirq_unmask_notify(irq_to_pirq(irq));
624 }
625 }
627 static void disable_pirq(unsigned int irq)
628 {
629 int evtchn = evtchn_from_irq(irq);
631 if (VALID_EVTCHN(evtchn))
632 mask_evtchn(evtchn);
633 }
635 static void ack_pirq(unsigned int irq)
636 {
637 int evtchn = evtchn_from_irq(irq);
639 if (VALID_EVTCHN(evtchn)) {
640 mask_evtchn(evtchn);
641 clear_evtchn(evtchn);
642 }
643 }
645 static void end_pirq(unsigned int irq)
646 {
647 int evtchn = evtchn_from_irq(irq);
649 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
650 unmask_evtchn(evtchn);
651 pirq_unmask_notify(irq_to_pirq(irq));
652 }
653 }
655 static struct hw_interrupt_type pirq_type = {
656 "Phys-irq",
657 startup_pirq,
658 shutdown_pirq,
659 enable_pirq,
660 disable_pirq,
661 ack_pirq,
662 end_pirq,
663 set_affinity_irq
664 };
666 void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
667 {
668 int evtchn = evtchn_from_irq(i);
669 shared_info_t *s = HYPERVISOR_shared_info;
670 if (!VALID_EVTCHN(evtchn))
671 return;
672 BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
673 synch_set_bit(evtchn, &s->evtchn_pending[0]);
674 }
676 void notify_remote_via_irq(int irq)
677 {
678 int evtchn = evtchn_from_irq(irq);
680 if (VALID_EVTCHN(evtchn))
681 notify_remote_via_evtchn(evtchn);
682 }
683 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
685 void mask_evtchn(int port)
686 {
687 shared_info_t *s = HYPERVISOR_shared_info;
688 synch_set_bit(port, &s->evtchn_mask[0]);
689 }
690 EXPORT_SYMBOL_GPL(mask_evtchn);
692 void unmask_evtchn(int port)
693 {
694 shared_info_t *s = HYPERVISOR_shared_info;
695 unsigned int cpu = smp_processor_id();
696 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
698 /* Slow path (hypercall) if this is a non-local port. */
699 if (unlikely(cpu != cpu_from_evtchn(port))) {
700 evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
701 .u.unmask.port = port };
702 (void)HYPERVISOR_event_channel_op(&op);
703 return;
704 }
706 synch_clear_bit(port, &s->evtchn_mask[0]);
708 /*
709 * The following is basically the equivalent of 'hw_resend_irq'. Just
710 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
711 * masked.
712 */
713 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
714 !synch_test_and_set_bit(port / BITS_PER_LONG,
715 &vcpu_info->evtchn_pending_sel)) {
716 vcpu_info->evtchn_upcall_pending = 1;
717 if (!vcpu_info->evtchn_upcall_mask)
718 force_evtchn_callback();
719 }
720 }
721 EXPORT_SYMBOL_GPL(unmask_evtchn);
723 void irq_resume(void)
724 {
725 evtchn_op_t op;
726 int cpu, pirq, virq, ipi, irq, evtchn;
728 init_evtchn_cpu_bindings();
730 /* New event-channel space is not 'live' yet. */
731 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
732 mask_evtchn(evtchn);
734 /* Check that no PIRQs are still bound. */
735 for (pirq = 0; pirq < NR_PIRQS; pirq++)
736 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
738 /* Secondary CPUs must have no VIRQ or IPI bindings. */
739 for (cpu = 1; cpu < NR_CPUS; cpu++) {
740 for (virq = 0; virq < NR_VIRQS; virq++)
741 BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
742 for (ipi = 0; ipi < NR_IPIS; ipi++)
743 BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
744 }
746 /* No IRQ <-> event-channel mappings. */
747 for (irq = 0; irq < NR_IRQS; irq++)
748 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
749 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
750 evtchn_to_irq[evtchn] = -1;
752 /* Primary CPU: rebind VIRQs automatically. */
753 for (virq = 0; virq < NR_VIRQS; virq++) {
754 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
755 continue;
757 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
759 /* Get a new binding from Xen. */
760 memset(&op, 0, sizeof(op));
761 op.cmd = EVTCHNOP_bind_virq;
762 op.u.bind_virq.virq = virq;
763 op.u.bind_virq.vcpu = 0;
764 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
765 evtchn = op.u.bind_virq.port;
767 /* Record the new mapping. */
768 evtchn_to_irq[evtchn] = irq;
769 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
771 /* Ready for use. */
772 unmask_evtchn(evtchn);
773 }
775 /* Primary CPU: rebind IPIs automatically. */
776 for (ipi = 0; ipi < NR_IPIS; ipi++) {
777 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
778 continue;
780 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
782 /* Get a new binding from Xen. */
783 memset(&op, 0, sizeof(op));
784 op.cmd = EVTCHNOP_bind_ipi;
785 op.u.bind_ipi.vcpu = 0;
786 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
787 evtchn = op.u.bind_ipi.port;
789 /* Record the new mapping. */
790 evtchn_to_irq[evtchn] = irq;
791 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
793 /* Ready for use. */
794 unmask_evtchn(evtchn);
795 }
796 }
798 void __init init_IRQ(void)
799 {
800 int i;
801 int cpu;
803 irq_ctx_init(0);
805 spin_lock_init(&irq_mapping_update_lock);
807 init_evtchn_cpu_bindings();
809 /* No VIRQ or IPI bindings. */
810 for (cpu = 0; cpu < NR_CPUS; cpu++) {
811 for (i = 0; i < NR_VIRQS; i++)
812 per_cpu(virq_to_irq, cpu)[i] = -1;
813 for (i = 0; i < NR_IPIS; i++)
814 per_cpu(ipi_to_irq, cpu)[i] = -1;
815 }
817 /* No event-channel -> IRQ mappings. */
818 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
819 evtchn_to_irq[i] = -1;
820 mask_evtchn(i); /* No event channels are 'live' right now. */
821 }
823 /* No IRQ -> event-channel mappings. */
824 for (i = 0; i < NR_IRQS; i++)
825 irq_info[i] = IRQ_UNBOUND;
827 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
828 for (i = 0; i < NR_DYNIRQS; i++) {
829 irq_bindcount[dynirq_to_irq(i)] = 0;
831 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
832 irq_desc[dynirq_to_irq(i)].action = NULL;
833 irq_desc[dynirq_to_irq(i)].depth = 1;
834 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
835 }
837 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
838 for (i = 0; i < NR_PIRQS; i++) {
839 irq_bindcount[pirq_to_irq(i)] = 1;
841 #ifdef RTC_IRQ
842 /* If not domain 0, force our RTC driver to fail its probe. */
843 if ((i == RTC_IRQ) &&
844 !(xen_start_info->flags & SIF_INITDOMAIN))
845 continue;
846 #endif
848 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
849 irq_desc[pirq_to_irq(i)].action = NULL;
850 irq_desc[pirq_to_irq(i)].depth = 1;
851 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
852 }
853 }
855 /*
856 * Local variables:
857 * c-file-style: "linux"
858 * indent-tabs-mode: t
859 * c-indent-level: 8
860 * c-basic-offset: 8
861 * tab-width: 8
862 * End:
863 */