ia64/xen-unstable

view xenolinux-2.4.26-sparse/arch/xen/kernel/evtchn.c @ 1314:33e86963d2a4

bitkeeper revision 1.874 (408808d0T9IfojQOKayZ-Kw-GYP94A)

Fix event-channel suspend/resume.
author kaf24@scramble.cl.cam.ac.uk
date Thu Apr 22 18:02:56 2004 +0000 (2004-04-22)
parents 6532f3f768df
children 5a6113c65ead acc04d188782
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Communication via Xen event channels.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 */
9 #include <linux/config.h>
10 #include <linux/irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/kernel_stat.h>
14 #include <asm/atomic.h>
15 #include <asm/system.h>
16 #include <asm/ptrace.h>
17 #include <asm/synch_bitops.h>
18 #include <asm/ctrl_if.h>
19 #include <asm/hypervisor.h>
20 #include <asm/hypervisor-ifs/event_channel.h>
21 #include <asm/hypervisor-ifs/physdev.h>
23 /*
24 * This lock protects updates to the following mapping and reference-count
25 * arrays. The lock does not need to be acquired to read the mapping tables.
26 */
27 static spinlock_t irq_mapping_update_lock;
29 /* IRQ <-> event-channel mappings. */
30 static int evtchn_to_irq[NR_EVENT_CHANNELS];
31 static int irq_to_evtchn[NR_IRQS];
33 /* IRQ <-> VIRQ mapping. */
34 static int virq_to_irq[NR_VIRQS];
36 /* Reference counts for bindings to IRQs. */
37 static int irq_bindcount[NR_IRQS];
39 /* Upcall to generic IRQ layer. */
40 extern asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs);
42 #define VALID_EVTCHN(_chn) ((_chn) != -1)
44 void evtchn_do_upcall(struct pt_regs *regs)
45 {
46 unsigned long l1, l2;
47 unsigned int l1i, l2i, port;
48 int irq;
49 unsigned long flags;
50 shared_info_t *s = HYPERVISOR_shared_info;
52 local_irq_save(flags);
54 while ( s->vcpu_data[0].evtchn_upcall_pending )
55 {
56 s->vcpu_data[0].evtchn_upcall_pending = 0;
57 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
58 l1 = xchg(&s->evtchn_pending_sel, 0);
59 while ( (l1i = ffs(l1)) != 0 )
60 {
61 l1i--;
62 l1 &= ~(1 << l1i);
64 l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
65 while ( (l2i = ffs(l2)) != 0 )
66 {
67 l2i--;
68 l2 &= ~(1 << l2i);
70 port = (l1i << 5) + l2i;
71 if ( (irq = evtchn_to_irq[port]) != -1 )
72 do_IRQ(irq, regs);
73 else
74 evtchn_device_upcall(port);
75 }
76 }
77 }
79 local_irq_restore(flags);
80 }
83 static int find_unbound_irq(void)
84 {
85 int irq;
87 for ( irq = 0; irq < NR_IRQS; irq++ )
88 if ( irq_bindcount[irq] == 0 )
89 break;
91 if ( irq == NR_IRQS )
92 panic("No available IRQ to bind to: increase NR_IRQS!\n");
94 return irq;
95 }
97 int bind_virq_to_irq(int virq)
98 {
99 evtchn_op_t op;
100 int evtchn, irq;
102 spin_lock(&irq_mapping_update_lock);
104 if ( (irq = virq_to_irq[virq]) == -1 )
105 {
106 op.cmd = EVTCHNOP_bind_virq;
107 op.u.bind_virq.virq = virq;
108 if ( HYPERVISOR_event_channel_op(&op) != 0 )
109 panic("Failed to bind virtual IRQ %d\n", virq);
110 evtchn = op.u.bind_virq.port;
112 irq = find_unbound_irq();
113 evtchn_to_irq[evtchn] = irq;
114 irq_to_evtchn[irq] = evtchn;
116 virq_to_irq[virq] = irq;
117 }
119 irq_bindcount[irq]++;
121 spin_unlock(&irq_mapping_update_lock);
123 return irq;
124 }
126 void unbind_virq_from_irq(int virq)
127 {
128 evtchn_op_t op;
129 int irq = virq_to_irq[virq];
130 int evtchn = irq_to_evtchn[irq];
132 spin_lock(&irq_mapping_update_lock);
134 if ( --irq_bindcount[irq] == 0 )
135 {
136 op.cmd = EVTCHNOP_close;
137 op.u.close.dom = DOMID_SELF;
138 op.u.close.port = evtchn;
139 if ( HYPERVISOR_event_channel_op(&op) != 0 )
140 panic("Failed to unbind virtual IRQ %d\n", virq);
142 evtchn_to_irq[evtchn] = -1;
143 irq_to_evtchn[irq] = -1;
144 virq_to_irq[virq] = -1;
145 }
147 spin_unlock(&irq_mapping_update_lock);
148 }
150 int bind_evtchn_to_irq(int evtchn)
151 {
152 int irq;
154 spin_lock(&irq_mapping_update_lock);
156 if ( (irq = evtchn_to_irq[evtchn]) == -1 )
157 {
158 irq = find_unbound_irq();
159 evtchn_to_irq[evtchn] = irq;
160 irq_to_evtchn[irq] = evtchn;
161 }
163 irq_bindcount[irq]++;
165 spin_unlock(&irq_mapping_update_lock);
167 return irq;
168 }
170 void unbind_evtchn_from_irq(int evtchn)
171 {
172 int irq = evtchn_to_irq[evtchn];
174 spin_lock(&irq_mapping_update_lock);
176 if ( --irq_bindcount[irq] == 0 )
177 {
178 evtchn_to_irq[evtchn] = -1;
179 irq_to_evtchn[irq] = -1;
180 }
182 spin_unlock(&irq_mapping_update_lock);
183 }
186 /*
187 * Interface to generic handling in irq.c
188 */
190 static unsigned int startup_dynirq(unsigned int irq)
191 {
192 unmask_evtchn(irq_to_evtchn[irq]);
193 return 0;
194 }
196 static void shutdown_dynirq(unsigned int irq)
197 {
198 mask_evtchn(irq_to_evtchn[irq]);
199 }
201 static void enable_dynirq(unsigned int irq)
202 {
203 unmask_evtchn(irq_to_evtchn[irq]);
204 }
206 static void disable_dynirq(unsigned int irq)
207 {
208 mask_evtchn(irq_to_evtchn[irq]);
209 }
211 static void ack_dynirq(unsigned int irq)
212 {
213 mask_evtchn(irq_to_evtchn[irq]);
214 clear_evtchn(irq_to_evtchn[irq]);
215 }
217 static void end_dynirq(unsigned int irq)
218 {
219 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
220 unmask_evtchn(irq_to_evtchn[irq]);
221 }
223 static struct hw_interrupt_type dynirq_type = {
224 "Dynamic-irq",
225 startup_dynirq,
226 shutdown_dynirq,
227 enable_dynirq,
228 disable_dynirq,
229 ack_dynirq,
230 end_dynirq,
231 NULL
232 };
234 static inline void pirq_unmask_notify(int pirq)
235 {
236 physdev_op_t op;
237 op.cmd = PHYSDEVOP_UNMASK_IRQ;
238 (void)HYPERVISOR_physdev_op(&op);
239 }
241 /*
242 * On startup, if there is no action associated with the IRQ then we are
243 * probing. In this case we should not share with others as it will confuse us.
244 */
245 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
247 static unsigned int startup_pirq(unsigned int irq)
248 {
249 evtchn_op_t op;
250 int evtchn;
252 op.cmd = EVTCHNOP_bind_pirq;
253 op.u.bind_pirq.pirq = irq;
254 /* NB. We are happy to share unless we are probing. */
255 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
256 if ( HYPERVISOR_event_channel_op(&op) != 0 )
257 {
258 if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
259 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
260 return 0;
261 }
262 evtchn = op.u.bind_pirq.port;
264 evtchn_to_irq[evtchn] = irq;
265 irq_to_evtchn[irq] = evtchn;
267 unmask_evtchn(evtchn);
268 pirq_unmask_notify(irq_to_pirq(irq));
270 return 0;
271 }
273 static void shutdown_pirq(unsigned int irq)
274 {
275 evtchn_op_t op;
276 int evtchn = irq_to_evtchn[irq];
278 if ( !VALID_EVTCHN(evtchn) )
279 return;
281 mask_evtchn(evtchn);
283 op.cmd = EVTCHNOP_close;
284 op.u.close.dom = DOMID_SELF;
285 op.u.close.port = evtchn;
286 if ( HYPERVISOR_event_channel_op(&op) != 0 )
287 panic("Failed to unbind physical IRQ %d\n", irq);
289 evtchn_to_irq[evtchn] = -1;
290 irq_to_evtchn[irq] = -1;
291 }
293 static void enable_pirq(unsigned int irq)
294 {
295 int evtchn = irq_to_evtchn[irq];
296 if ( !VALID_EVTCHN(evtchn) )
297 return;
298 unmask_evtchn(evtchn);
299 pirq_unmask_notify(irq_to_pirq(irq));
300 }
302 static void disable_pirq(unsigned int irq)
303 {
304 int evtchn = irq_to_evtchn[irq];
305 if ( !VALID_EVTCHN(evtchn) )
306 return;
307 mask_evtchn(evtchn);
308 }
310 static void ack_pirq(unsigned int irq)
311 {
312 int evtchn = irq_to_evtchn[irq];
313 if ( !VALID_EVTCHN(evtchn) )
314 return;
315 mask_evtchn(evtchn);
316 clear_evtchn(evtchn);
317 }
319 static void end_pirq(unsigned int irq)
320 {
321 int evtchn = irq_to_evtchn[irq];
322 if ( !VALID_EVTCHN(evtchn) )
323 return;
324 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
325 {
326 unmask_evtchn(evtchn);
327 pirq_unmask_notify(irq_to_pirq(irq));
328 }
329 }
331 static struct hw_interrupt_type pirq_type = {
332 "Phys-irq",
333 startup_pirq,
334 shutdown_pirq,
335 enable_pirq,
336 disable_pirq,
337 ack_pirq,
338 end_pirq,
339 NULL
340 };
342 static void misdirect_interrupt(int irq, void *dev_id, struct pt_regs *regs)
343 {
344 /* nothing */
345 }
347 static struct irqaction misdirect_action = {
348 misdirect_interrupt,
349 SA_INTERRUPT,
350 0,
351 "misdirect",
352 NULL,
353 NULL
354 };
356 void irq_suspend(void)
357 {
358 int virq, irq, evtchn;
360 /* Unbind VIRQs from event channels. */
361 for ( virq = 0; virq < NR_VIRQS; virq++ )
362 {
363 if ( (irq = virq_to_irq[virq]) == -1 )
364 continue;
365 evtchn = irq_to_evtchn[irq];
367 /* Mark the event channel as unused in our table. */
368 evtchn_to_irq[evtchn] = -1;
369 irq_to_evtchn[irq] = -1;
370 }
372 /*
373 * We should now be unbound from all event channels. Stale bindings to
374 * PIRQs and/or inter-domain event channels will cause us to barf here.
375 */
376 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
377 if ( evtchn_to_irq[evtchn] != -1 )
378 panic("Suspend attempted while bound to evtchn %d.\n", evtchn);
379 }
382 void irq_resume(void)
383 {
384 evtchn_op_t op;
385 int virq, irq, evtchn;
387 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
388 mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
390 for ( virq = 0; virq < NR_VIRQS; virq++ )
391 {
392 if ( (irq = virq_to_irq[virq]) == -1 )
393 continue;
395 /* Get a new binding from Xen. */
396 op.cmd = EVTCHNOP_bind_virq;
397 op.u.bind_virq.virq = virq;
398 if ( HYPERVISOR_event_channel_op(&op) != 0 )
399 panic("Failed to bind virtual IRQ %d\n", virq);
400 evtchn = op.u.bind_virq.port;
402 /* Record the new mapping. */
403 evtchn_to_irq[evtchn] = irq;
404 irq_to_evtchn[irq] = evtchn;
406 /* Ready for use. */
407 unmask_evtchn(evtchn);
408 }
409 }
411 void __init init_IRQ(void)
412 {
413 int i;
415 spin_lock_init(&irq_mapping_update_lock);
417 /* No VIRQ -> IRQ mappings. */
418 for ( i = 0; i < NR_VIRQS; i++ )
419 virq_to_irq[i] = -1;
421 /* No event-channel -> IRQ mappings. */
422 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
423 {
424 evtchn_to_irq[i] = -1;
425 mask_evtchn(i); /* No event channels are 'live' right now. */
426 }
428 /* No IRQ -> event-channel mappings. */
429 for ( i = 0; i < NR_IRQS; i++ )
430 irq_to_evtchn[i] = -1;
432 for ( i = 0; i < NR_DYNIRQS; i++ )
433 {
434 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
435 irq_bindcount[dynirq_to_irq(i)] = 0;
437 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
438 irq_desc[dynirq_to_irq(i)].action = 0;
439 irq_desc[dynirq_to_irq(i)].depth = 1;
440 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
441 }
443 for ( i = 0; i < NR_PIRQS; i++ )
444 {
445 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
446 irq_bindcount[pirq_to_irq(i)] = 1;
448 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
449 irq_desc[pirq_to_irq(i)].action = 0;
450 irq_desc[pirq_to_irq(i)].depth = 1;
451 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
452 }
454 (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
456 /* This needs to be done early, but after the IRQ subsystem is alive. */
457 ctrl_if_init();
458 }