direct-io.hg

view xen/arch/x86/irq.c @ 9937:65a2cf84b335

Add missing spin_unlock_irq() at xen/arch/x86/irq.c

Changeset 9889:42a8e3101c6c reorganized the code on this file, and
missed this spin_unlock_irq(). Without this patch, my machine hangs
completely during boot. With this, it works.

Signed-off-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
author kaf24@firebug.cl.cam.ac.uk
date Fri May 05 13:41:35 2006 +0100 (2006-05-05)
parents 0ee104bd6557
children 1a500cc4fcd5
line source
1 /******************************************************************************
2 * arch/x86/irq.c
3 *
4 * Portions of this file are:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/errno.h>
11 #include <xen/event.h>
12 #include <xen/irq.h>
13 #include <xen/perfc.h>
14 #include <xen/sched.h>
15 #include <xen/keyhandler.h>
16 #include <asm/current.h>
17 #include <asm/smpboot.h>
19 /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
20 int opt_noirqbalance = 0;
21 boolean_param("noirqbalance", opt_noirqbalance);
23 irq_desc_t irq_desc[NR_IRQS];
25 static void __do_IRQ_guest(int vector);
27 void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
29 static void enable_none(unsigned int vector) { }
30 static unsigned int startup_none(unsigned int vector) { return 0; }
31 static void disable_none(unsigned int vector) { }
32 static void ack_none(unsigned int vector)
33 {
34 ack_bad_irq(vector);
35 }
37 #define shutdown_none disable_none
38 #define end_none enable_none
40 struct hw_interrupt_type no_irq_type = {
41 "none",
42 startup_none,
43 shutdown_none,
44 enable_none,
45 disable_none,
46 ack_none,
47 end_none
48 };
50 atomic_t irq_err_count;
52 asmlinkage void do_IRQ(struct cpu_user_regs *regs)
53 {
54 unsigned int vector = regs->entry_vector;
55 irq_desc_t *desc = &irq_desc[vector];
56 struct irqaction *action;
58 perfc_incrc(irqs);
60 spin_lock(&desc->lock);
61 desc->handler->ack(vector);
63 if ( likely(desc->status & IRQ_GUEST) )
64 {
65 __do_IRQ_guest(vector);
66 spin_unlock(&desc->lock);
67 return;
68 }
70 desc->status &= ~IRQ_REPLAY;
71 desc->status |= IRQ_PENDING;
73 /*
74 * Since we set PENDING, if another processor is handling a different
75 * instance of this same irq, the other processor will take care of it.
76 */
77 if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) )
78 goto out;
80 desc->status |= IRQ_INPROGRESS;
82 action = desc->action;
83 while ( desc->status & IRQ_PENDING )
84 {
85 desc->status &= ~IRQ_PENDING;
86 irq_enter();
87 spin_unlock_irq(&desc->lock);
88 action->handler(vector_to_irq(vector), action->dev_id, regs);
89 spin_lock_irq(&desc->lock);
90 irq_exit();
91 }
93 desc->status &= ~IRQ_INPROGRESS;
95 out:
96 desc->handler->end(vector);
97 spin_unlock(&desc->lock);
98 }
100 void free_irq(unsigned int irq)
101 {
102 unsigned int vector = irq_to_vector(irq);
103 irq_desc_t *desc = &irq_desc[vector];
104 unsigned long flags;
106 spin_lock_irqsave(&desc->lock,flags);
107 desc->action = NULL;
108 desc->depth = 1;
109 desc->status |= IRQ_DISABLED;
110 desc->handler->shutdown(irq);
111 spin_unlock_irqrestore(&desc->lock,flags);
113 /* Wait to make sure it's not being used on another CPU */
114 do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );
115 }
117 int setup_irq(unsigned int irq, struct irqaction *new)
118 {
119 unsigned int vector = irq_to_vector(irq);
120 irq_desc_t *desc = &irq_desc[vector];
121 unsigned long flags;
123 spin_lock_irqsave(&desc->lock,flags);
125 if ( desc->action != NULL )
126 {
127 spin_unlock_irqrestore(&desc->lock,flags);
128 return -EBUSY;
129 }
131 desc->action = new;
132 desc->depth = 0;
133 desc->status &= ~IRQ_DISABLED;
134 desc->handler->startup(vector);
136 spin_unlock_irqrestore(&desc->lock,flags);
138 return 0;
139 }
142 /*
143 * HANDLING OF GUEST-BOUND PHYSICAL IRQS
144 */
146 #define IRQ_MAX_GUESTS 7
147 typedef struct {
148 u8 nr_guests;
149 u8 in_flight;
150 u8 shareable;
151 u8 ack_type;
152 #define ACKTYPE_NONE 0 /* No final acknowledgement is required */
153 #define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */
154 #define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */
155 cpumask_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */
156 struct domain *guest[IRQ_MAX_GUESTS];
157 } irq_guest_action_t;
159 /*
160 * Stack of interrupts awaiting EOI on each CPU. These must be popped in
161 * order, as only the current highest-priority pending irq can be EOIed.
162 */
163 static struct {
164 u8 vector; /* Vector awaiting EOI */
165 u8 ready; /* Ready for EOI now? */
166 } pending_eoi[NR_CPUS][NR_VECTORS] __cacheline_aligned;
167 #define pending_eoi_sp(cpu) (pending_eoi[cpu][NR_VECTORS-1].vector)
169 static void __do_IRQ_guest(int vector)
170 {
171 unsigned int irq = vector_to_irq(vector);
172 irq_desc_t *desc = &irq_desc[vector];
173 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
174 struct domain *d;
175 int i, sp, cpu = smp_processor_id();
177 if ( unlikely(action->nr_guests == 0) )
178 {
179 /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
180 ASSERT(action->ack_type == ACKTYPE_EOI);
181 ASSERT(desc->status & IRQ_DISABLED);
182 desc->handler->end(vector);
183 return;
184 }
186 if ( action->ack_type == ACKTYPE_EOI )
187 {
188 sp = pending_eoi_sp(cpu);
189 ASSERT((sp == 0) || (pending_eoi[cpu][sp-1].vector < vector));
190 ASSERT(sp < (NR_VECTORS-1));
191 pending_eoi[cpu][sp].vector = vector;
192 pending_eoi[cpu][sp].ready = 0;
193 pending_eoi_sp(cpu) = sp+1;
194 cpu_set(cpu, action->cpu_eoi_map);
195 }
197 for ( i = 0; i < action->nr_guests; i++ )
198 {
199 d = action->guest[i];
200 if ( (action->ack_type != ACKTYPE_NONE) &&
201 !test_and_set_bit(irq, d->pirq_mask) )
202 action->in_flight++;
203 send_guest_pirq(d, irq);
204 }
205 }
207 /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
208 static void flush_ready_eoi(void *unused)
209 {
210 irq_desc_t *desc;
211 int vector, sp, cpu = smp_processor_id();
213 ASSERT(!local_irq_is_enabled());
215 sp = pending_eoi_sp(cpu);
217 while ( (--sp >= 0) && pending_eoi[cpu][sp].ready )
218 {
219 vector = pending_eoi[cpu][sp].vector;
220 desc = &irq_desc[vector];
221 spin_lock(&desc->lock);
222 desc->handler->end(vector);
223 spin_unlock(&desc->lock);
224 }
226 pending_eoi_sp(cpu) = sp+1;
227 }
229 static void __set_eoi_ready(irq_desc_t *desc)
230 {
231 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
232 int vector, sp, cpu = smp_processor_id();
234 vector = desc - irq_desc;
236 if ( !(desc->status & IRQ_GUEST) ||
237 (action->in_flight != 0) ||
238 !cpu_test_and_clear(cpu, action->cpu_eoi_map) )
239 return;
241 sp = pending_eoi_sp(cpu);
242 do {
243 ASSERT(sp > 0);
244 } while ( pending_eoi[cpu][--sp].vector != vector );
245 ASSERT(!pending_eoi[cpu][sp].ready);
246 pending_eoi[cpu][sp].ready = 1;
247 }
249 /* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */
250 static void set_eoi_ready(void *data)
251 {
252 irq_desc_t *desc = data;
254 ASSERT(!local_irq_is_enabled());
256 spin_lock(&desc->lock);
257 __set_eoi_ready(desc);
258 spin_unlock(&desc->lock);
260 flush_ready_eoi(NULL);
261 }
263 /*
264 * Forcibly flush all pending EOIs on this CPU by emulating end-of-ISR
265 * notifications from guests. The caller of this function must ensure that
266 * all CPUs execute flush_ready_eoi().
267 */
268 static void flush_all_pending_eoi(void *unused)
269 {
270 irq_desc_t *desc;
271 irq_guest_action_t *action;
272 int i, vector, sp, cpu = smp_processor_id();
274 ASSERT(!local_irq_is_enabled());
276 sp = pending_eoi_sp(cpu);
277 while ( --sp >= 0 )
278 {
279 if ( pending_eoi[cpu][sp].ready )
280 continue;
281 vector = pending_eoi[cpu][sp].vector;
282 desc = &irq_desc[vector];
283 spin_lock(&desc->lock);
284 action = (irq_guest_action_t *)desc->action;
285 ASSERT(action->ack_type == ACKTYPE_EOI);
286 ASSERT(desc->status & IRQ_GUEST);
287 for ( i = 0; i < action->nr_guests; i++ )
288 clear_bit(vector_to_irq(vector), action->guest[i]->pirq_mask);
289 action->in_flight = 0;
290 spin_unlock(&desc->lock);
291 }
293 flush_ready_eoi(NULL);
294 }
296 static void __pirq_guest_eoi(struct domain *d, int irq)
297 {
298 irq_desc_t *desc;
299 irq_guest_action_t *action;
300 cpumask_t cpu_eoi_map;
302 desc = &irq_desc[irq_to_vector(irq)];
303 action = (irq_guest_action_t *)desc->action;
305 spin_lock_irq(&desc->lock);
307 ASSERT(!test_bit(irq, d->pirq_mask) ||
308 (action->ack_type != ACKTYPE_NONE));
310 if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) ||
311 unlikely(--action->in_flight != 0) )
312 {
313 spin_unlock_irq(&desc->lock);
314 return;
315 }
317 if ( action->ack_type == ACKTYPE_UNMASK )
318 {
319 ASSERT(cpus_empty(action->cpu_eoi_map));
320 desc->handler->end(irq_to_vector(irq));
321 spin_unlock_irq(&desc->lock);
322 return;
323 }
325 ASSERT(action->ack_type == ACKTYPE_EOI);
327 cpu_eoi_map = action->cpu_eoi_map;
329 if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
330 {
331 __set_eoi_ready(desc);
332 spin_unlock(&desc->lock);
333 flush_ready_eoi(NULL);
334 local_irq_enable();
335 }
336 else
337 {
338 spin_unlock_irq(&desc->lock);
339 }
341 if ( !cpus_empty(cpu_eoi_map) )
342 on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
343 }
345 int pirq_guest_eoi(struct domain *d, int irq)
346 {
347 if ( (irq < 0) || (irq >= NR_IRQS) )
348 return -EINVAL;
350 __pirq_guest_eoi(d, irq);
352 return 0;
353 }
355 int pirq_guest_unmask(struct domain *d)
356 {
357 unsigned int irq;
358 shared_info_t *s = d->shared_info;
360 for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
361 irq < NR_IRQS;
362 irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
363 {
364 if ( !test_bit(d->pirq_to_evtchn[irq], s->evtchn_mask) )
365 __pirq_guest_eoi(d, irq);
366 }
368 return 0;
369 }
371 extern int ioapic_ack_new;
372 int pirq_acktype(int irq)
373 {
374 irq_desc_t *desc;
375 unsigned int vector;
377 vector = irq_to_vector(irq);
378 if ( vector == 0 )
379 return ACKTYPE_NONE;
381 desc = &irq_desc[vector];
383 /*
384 * Edge-triggered IO-APIC interrupts need no final acknowledgement:
385 * we ACK early during interrupt processing.
386 */
387 if ( !strcmp(desc->handler->typename, "IO-APIC-edge") )
388 return ACKTYPE_NONE;
390 /* Legacy PIC interrupts can be acknowledged from any CPU. */
391 if ( !strcmp(desc->handler->typename, "XT-PIC") )
392 return ACKTYPE_UNMASK;
394 /*
395 * Level-triggered IO-APIC interrupts need to be acknowledged on the CPU
396 * on which they were received. This is because we tickle the LAPIC to EOI.
397 */
398 if ( !strcmp(desc->handler->typename, "IO-APIC-level") )
399 return ioapic_ack_new ? ACKTYPE_EOI : ACKTYPE_UNMASK;
401 BUG();
402 return 0;
403 }
405 int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
406 {
407 unsigned int vector;
408 irq_desc_t *desc;
409 irq_guest_action_t *action;
410 unsigned long flags;
411 int rc = 0;
412 cpumask_t cpumask = CPU_MASK_NONE;
414 retry:
415 vector = irq_to_vector(irq);
416 if ( vector == 0 )
417 return -EINVAL;
419 desc = &irq_desc[vector];
421 spin_lock_irqsave(&desc->lock, flags);
423 action = (irq_guest_action_t *)desc->action;
425 if ( !(desc->status & IRQ_GUEST) )
426 {
427 if ( desc->action != NULL )
428 {
429 DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n",
430 irq, desc->action->name);
431 rc = -EBUSY;
432 goto out;
433 }
435 action = xmalloc(irq_guest_action_t);
436 if ( (desc->action = (struct irqaction *)action) == NULL )
437 {
438 DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
439 rc = -ENOMEM;
440 goto out;
441 }
443 action->nr_guests = 0;
444 action->in_flight = 0;
445 action->shareable = will_share;
446 action->ack_type = pirq_acktype(irq);
447 action->cpu_eoi_map = CPU_MASK_NONE;
449 desc->depth = 0;
450 desc->status |= IRQ_GUEST;
451 desc->status &= ~IRQ_DISABLED;
452 desc->handler->startup(vector);
454 /* Attempt to bind the interrupt target to the correct CPU. */
455 cpu_set(v->processor, cpumask);
456 if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
457 desc->handler->set_affinity(vector, cpumask);
458 }
459 else if ( !will_share || !action->shareable )
460 {
461 DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n",
462 irq);
463 rc = -EBUSY;
464 goto out;
465 }
466 else if ( action->nr_guests == 0 )
467 {
468 /*
469 * Indicates that an ACKTYPE_EOI interrupt is being released.
470 * Wait for that to happen before continuing.
471 */
472 ASSERT(action->ack_type == ACKTYPE_EOI);
473 ASSERT(desc->status & IRQ_DISABLED);
474 spin_unlock_irqrestore(&desc->lock, flags);
475 cpu_relax();
476 goto retry;
477 }
479 if ( action->nr_guests == IRQ_MAX_GUESTS )
480 {
481 DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq);
482 rc = -EBUSY;
483 goto out;
484 }
486 action->guest[action->nr_guests++] = v->domain;
488 out:
489 spin_unlock_irqrestore(&desc->lock, flags);
490 return rc;
491 }
493 int pirq_guest_unbind(struct domain *d, int irq)
494 {
495 unsigned int vector = irq_to_vector(irq);
496 irq_desc_t *desc = &irq_desc[vector];
497 irq_guest_action_t *action;
498 cpumask_t cpu_eoi_map;
499 unsigned long flags;
500 int i;
502 BUG_ON(vector == 0);
504 spin_lock_irqsave(&desc->lock, flags);
506 action = (irq_guest_action_t *)desc->action;
508 i = 0;
509 while ( action->guest[i] && (action->guest[i] != d) )
510 i++;
511 memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
512 action->nr_guests--;
514 switch ( action->ack_type )
515 {
516 case ACKTYPE_UNMASK:
517 if ( test_and_clear_bit(irq, d->pirq_mask) &&
518 (--action->in_flight == 0) )
519 desc->handler->end(vector);
520 break;
521 case ACKTYPE_EOI:
522 /* NB. If #guests == 0 then we clear the eoi_map later on. */
523 if ( test_and_clear_bit(irq, d->pirq_mask) &&
524 (--action->in_flight == 0) &&
525 (action->nr_guests != 0) )
526 {
527 cpu_eoi_map = action->cpu_eoi_map;
528 spin_unlock_irqrestore(&desc->lock, flags);
529 on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
530 spin_lock_irqsave(&desc->lock, flags);
531 }
532 break;
533 }
535 BUG_ON(test_bit(irq, d->pirq_mask));
537 if ( action->nr_guests != 0 )
538 goto out;
540 BUG_ON(action->in_flight != 0);
542 /* Disabling IRQ before releasing the desc_lock avoids an IRQ storm. */
543 desc->depth = 1;
544 desc->status |= IRQ_DISABLED;
545 desc->handler->disable(vector);
547 /*
548 * We may have a EOI languishing anywhere in one of the per-CPU
549 * EOI stacks. Forcibly flush the stack on every CPU where this might
550 * be the case.
551 */
552 cpu_eoi_map = action->cpu_eoi_map;
553 if ( !cpus_empty(cpu_eoi_map) )
554 {
555 BUG_ON(action->ack_type != ACKTYPE_EOI);
556 spin_unlock_irqrestore(&desc->lock, flags);
557 on_selected_cpus(cpu_eoi_map, flush_all_pending_eoi, NULL, 1, 1);
558 on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 1);
559 spin_lock_irqsave(&desc->lock, flags);
560 }
562 BUG_ON(!cpus_empty(action->cpu_eoi_map));
564 desc->action = NULL;
565 xfree(action);
566 desc->status &= ~IRQ_GUEST;
567 desc->handler->shutdown(vector);
569 out:
570 spin_unlock_irqrestore(&desc->lock, flags);
571 return 0;
572 }
574 extern void dump_ioapic_irq_info(void);
576 static void dump_irqs(unsigned char key)
577 {
578 int i, irq, vector;
579 irq_desc_t *desc;
580 irq_guest_action_t *action;
581 struct domain *d;
582 unsigned long flags;
584 printk("Guest interrupt information:\n");
586 for ( irq = 0; irq < NR_IRQS; irq++ )
587 {
588 vector = irq_to_vector(irq);
589 if ( vector == 0 )
590 continue;
592 desc = &irq_desc[vector];
594 spin_lock_irqsave(&desc->lock, flags);
596 if ( desc->status & IRQ_GUEST )
597 {
598 action = (irq_guest_action_t *)desc->action;
600 printk(" IRQ%3d Vec%3d: type=%-15s status=%08x "
601 "in-flight=%d domain-list=",
602 irq, vector, desc->handler->typename,
603 desc->status, action->in_flight);
605 for ( i = 0; i < action->nr_guests; i++ )
606 {
607 d = action->guest[i];
608 printk("%u(%c%c%c%c)",
609 d->domain_id,
610 (test_bit(d->pirq_to_evtchn[irq],
611 d->shared_info->evtchn_pending) ?
612 'P' : '-'),
613 (test_bit(d->pirq_to_evtchn[irq]/BITS_PER_LONG,
614 &d->shared_info->vcpu_info[0].
615 evtchn_pending_sel) ?
616 'S' : '-'),
617 (test_bit(d->pirq_to_evtchn[irq],
618 d->shared_info->evtchn_mask) ?
619 'M' : '-'),
620 (test_bit(irq, d->pirq_mask) ?
621 'M' : '-'));
622 if ( i != action->nr_guests )
623 printk(",");
624 }
626 printk("\n");
627 }
629 spin_unlock_irqrestore(&desc->lock, flags);
630 }
632 dump_ioapic_irq_info();
633 }
635 static int __init setup_dump_irqs(void)
636 {
637 register_keyhandler('i', dump_irqs, "dump interrupt bindings");
638 return 0;
639 }
640 __initcall(setup_dump_irqs);
642 static struct timer end_irq_timer[NR_CPUS];
644 /*
645 * force_intack: Forcibly emit all pending EOIs on each CPU every second.
646 * Mainly useful for debugging or poking lazy guests ISRs.
647 */
649 static void end_irq_timeout(void *unused)
650 {
651 int cpu = smp_processor_id();
653 local_irq_disable();
654 flush_all_pending_eoi(NULL);
655 local_irq_enable();
657 on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 0);
659 set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000));
660 }
662 static void __init __setup_irq_timeout(void *unused)
663 {
664 int cpu = smp_processor_id();
665 init_timer(&end_irq_timer[cpu], end_irq_timeout, NULL, cpu);
666 set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000));
667 }
669 static int force_intack;
670 boolean_param("force_intack", force_intack);
672 static int __init setup_irq_timeout(void)
673 {
674 if ( force_intack )
675 on_each_cpu(__setup_irq_timeout, NULL, 1, 1);
676 return 0;
677 }
678 __initcall(setup_irq_timeout);