ia64/xen-unstable

view xen/arch/x86/irq.c @ 18591:ed398097c03e

x86: Move pirq logic to irq.c.

Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 08 10:48:48 2008 +0100 (2008-10-08)
parents e1507b441be4
children 51a05fb4c601
line source
1 /******************************************************************************
2 * arch/x86/irq.c
3 *
4 * Portions of this file are:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/errno.h>
11 #include <xen/event.h>
12 #include <xen/irq.h>
13 #include <xen/perfc.h>
14 #include <xen/sched.h>
15 #include <xen/keyhandler.h>
16 #include <xen/compat.h>
17 #include <xen/iocap.h>
18 #include <xen/iommu.h>
19 #include <asm/msi.h>
20 #include <asm/current.h>
21 #include <public/physdev.h>
23 /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
24 int opt_noirqbalance = 0;
25 boolean_param("noirqbalance", opt_noirqbalance);
27 irq_desc_t irq_desc[NR_IRQS];
29 static void __do_IRQ_guest(int vector);
31 void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
33 static void enable_none(unsigned int vector) { }
34 static unsigned int startup_none(unsigned int vector) { return 0; }
35 static void disable_none(unsigned int vector) { }
36 static void ack_none(unsigned int vector)
37 {
38 ack_bad_irq(vector);
39 }
41 #define shutdown_none disable_none
42 #define end_none enable_none
44 struct hw_interrupt_type no_irq_type = {
45 "none",
46 startup_none,
47 shutdown_none,
48 enable_none,
49 disable_none,
50 ack_none,
51 end_none
52 };
54 atomic_t irq_err_count;
56 asmlinkage void do_IRQ(struct cpu_user_regs *regs)
57 {
58 unsigned int vector = regs->entry_vector;
59 irq_desc_t *desc = &irq_desc[vector];
60 struct irqaction *action;
62 perfc_incr(irqs);
64 spin_lock(&desc->lock);
65 desc->handler->ack(vector);
67 if ( likely(desc->status & IRQ_GUEST) )
68 {
69 irq_enter();
70 __do_IRQ_guest(vector);
71 irq_exit();
72 spin_unlock(&desc->lock);
73 return;
74 }
76 desc->status &= ~IRQ_REPLAY;
77 desc->status |= IRQ_PENDING;
79 /*
80 * Since we set PENDING, if another processor is handling a different
81 * instance of this same irq, the other processor will take care of it.
82 */
83 if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) )
84 goto out;
86 desc->status |= IRQ_INPROGRESS;
88 action = desc->action;
89 while ( desc->status & IRQ_PENDING )
90 {
91 desc->status &= ~IRQ_PENDING;
92 irq_enter();
93 spin_unlock_irq(&desc->lock);
94 action->handler(vector_to_irq(vector), action->dev_id, regs);
95 spin_lock_irq(&desc->lock);
96 irq_exit();
97 }
99 desc->status &= ~IRQ_INPROGRESS;
101 out:
102 desc->handler->end(vector);
103 spin_unlock(&desc->lock);
104 }
106 int request_irq(unsigned int irq,
107 void (*handler)(int, void *, struct cpu_user_regs *),
108 unsigned long irqflags, const char * devname, void *dev_id)
109 {
110 struct irqaction * action;
111 int retval;
113 /*
114 * Sanity-check: shared interrupts must pass in a real dev-ID,
115 * otherwise we'll have trouble later trying to figure out
116 * which interrupt is which (messes up the interrupt freeing
117 * logic etc).
118 */
119 if (irq >= NR_IRQS)
120 return -EINVAL;
121 if (!handler)
122 return -EINVAL;
124 action = xmalloc(struct irqaction);
125 if (!action)
126 return -ENOMEM;
128 action->handler = handler;
129 action->name = devname;
130 action->dev_id = dev_id;
132 retval = setup_irq(irq, action);
133 if (retval)
134 xfree(action);
136 return retval;
137 }
139 void free_irq(unsigned int irq)
140 {
141 unsigned int vector = irq_to_vector(irq);
142 irq_desc_t *desc = &irq_desc[vector];
143 unsigned long flags;
145 spin_lock_irqsave(&desc->lock,flags);
146 desc->action = NULL;
147 desc->depth = 1;
148 desc->status |= IRQ_DISABLED;
149 desc->handler->shutdown(irq);
150 spin_unlock_irqrestore(&desc->lock,flags);
152 /* Wait to make sure it's not being used on another CPU */
153 do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );
154 }
156 int setup_irq(unsigned int irq, struct irqaction *new)
157 {
158 unsigned int vector = irq_to_vector(irq);
159 irq_desc_t *desc = &irq_desc[vector];
160 unsigned long flags;
162 spin_lock_irqsave(&desc->lock,flags);
164 if ( desc->action != NULL )
165 {
166 spin_unlock_irqrestore(&desc->lock,flags);
167 return -EBUSY;
168 }
170 desc->action = new;
171 desc->depth = 0;
172 desc->status &= ~IRQ_DISABLED;
173 desc->handler->startup(vector);
175 spin_unlock_irqrestore(&desc->lock,flags);
177 return 0;
178 }
181 /*
182 * HANDLING OF GUEST-BOUND PHYSICAL IRQS
183 */
185 #define IRQ_MAX_GUESTS 7
186 typedef struct {
187 u8 nr_guests;
188 u8 in_flight;
189 u8 shareable;
190 u8 ack_type;
191 #define ACKTYPE_NONE 0 /* No final acknowledgement is required */
192 #define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */
193 #define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */
194 cpumask_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */
195 struct domain *guest[IRQ_MAX_GUESTS];
196 } irq_guest_action_t;
198 /*
199 * Stack of interrupts awaiting EOI on each CPU. These must be popped in
200 * order, as only the current highest-priority pending irq can be EOIed.
201 */
202 struct pending_eoi {
203 u8 vector; /* Vector awaiting EOI */
204 u8 ready; /* Ready for EOI now? */
205 };
206 static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
207 #define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector)
209 static struct timer irq_guest_eoi_timer[NR_IRQS];
210 static void irq_guest_eoi_timer_fn(void *data)
211 {
212 irq_desc_t *desc = data;
213 unsigned vector = desc - irq_desc;
214 unsigned long flags;
216 spin_lock_irqsave(&desc->lock, flags);
217 desc->status &= ~IRQ_INPROGRESS;
218 desc->handler->enable(vector);
219 spin_unlock_irqrestore(&desc->lock, flags);
220 }
222 static void __do_IRQ_guest(int vector)
223 {
224 irq_desc_t *desc = &irq_desc[vector];
225 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
226 struct domain *d;
227 int i, sp, already_pending = 0;
228 struct pending_eoi *peoi = this_cpu(pending_eoi);
230 if ( unlikely(action->nr_guests == 0) )
231 {
232 /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
233 ASSERT(action->ack_type == ACKTYPE_EOI);
234 ASSERT(desc->status & IRQ_DISABLED);
235 desc->handler->end(vector);
236 return;
237 }
239 if ( action->ack_type == ACKTYPE_EOI )
240 {
241 sp = pending_eoi_sp(peoi);
242 ASSERT((sp == 0) || (peoi[sp-1].vector < vector));
243 ASSERT(sp < (NR_VECTORS-1));
244 peoi[sp].vector = vector;
245 peoi[sp].ready = 0;
246 pending_eoi_sp(peoi) = sp+1;
247 cpu_set(smp_processor_id(), action->cpu_eoi_map);
248 }
250 for ( i = 0; i < action->nr_guests; i++ )
251 {
252 unsigned int irq;
253 d = action->guest[i];
254 irq = domain_vector_to_irq(d, vector);
255 if ( (action->ack_type != ACKTYPE_NONE) &&
256 !test_and_set_bit(irq, d->pirq_mask) )
257 action->in_flight++;
258 if ( hvm_do_IRQ_dpci(d, irq) )
259 {
260 if ( action->ack_type == ACKTYPE_NONE )
261 {
262 already_pending += !!(desc->status & IRQ_INPROGRESS);
263 desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */
264 }
265 }
266 else if ( send_guest_pirq(d, irq) &&
267 (action->ack_type == ACKTYPE_NONE) )
268 {
269 already_pending++;
270 }
271 }
273 if ( already_pending == action->nr_guests )
274 {
275 desc->handler->disable(vector);
276 stop_timer(&irq_guest_eoi_timer[vector]);
277 init_timer(&irq_guest_eoi_timer[vector],
278 irq_guest_eoi_timer_fn, desc, smp_processor_id());
279 set_timer(&irq_guest_eoi_timer[vector], NOW() + MILLISECS(1));
280 }
281 }
283 /*
284 * Retrieve Xen irq-descriptor corresponding to a domain-specific irq.
285 * The descriptor is returned locked. This function is safe against changes
286 * to the per-domain irq-to-vector mapping.
287 */
288 static irq_desc_t *domain_spin_lock_irq_desc(
289 struct domain *d, int irq, unsigned long *pflags)
290 {
291 unsigned int vector;
292 unsigned long flags;
293 irq_desc_t *desc;
295 for ( ; ; )
296 {
297 vector = domain_irq_to_vector(d, irq);
298 if ( vector <= 0 )
299 return NULL;
300 desc = &irq_desc[vector];
301 spin_lock_irqsave(&desc->lock, flags);
302 if ( vector == domain_irq_to_vector(d, irq) )
303 break;
304 spin_unlock_irqrestore(&desc->lock, flags);
305 }
307 if ( pflags != NULL )
308 *pflags = flags;
309 return desc;
310 }
312 /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
313 static void flush_ready_eoi(void *unused)
314 {
315 struct pending_eoi *peoi = this_cpu(pending_eoi);
316 irq_desc_t *desc;
317 int vector, sp;
319 ASSERT(!local_irq_is_enabled());
321 sp = pending_eoi_sp(peoi);
323 while ( (--sp >= 0) && peoi[sp].ready )
324 {
325 vector = peoi[sp].vector;
326 desc = &irq_desc[vector];
327 spin_lock(&desc->lock);
328 desc->handler->end(vector);
329 spin_unlock(&desc->lock);
330 }
332 pending_eoi_sp(peoi) = sp+1;
333 }
335 static void __set_eoi_ready(irq_desc_t *desc)
336 {
337 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
338 struct pending_eoi *peoi = this_cpu(pending_eoi);
339 int vector, sp;
341 vector = desc - irq_desc;
343 if ( !(desc->status & IRQ_GUEST) ||
344 (action->in_flight != 0) ||
345 !cpu_test_and_clear(smp_processor_id(), action->cpu_eoi_map) )
346 return;
348 sp = pending_eoi_sp(peoi);
349 do {
350 ASSERT(sp > 0);
351 } while ( peoi[--sp].vector != vector );
352 ASSERT(!peoi[sp].ready);
353 peoi[sp].ready = 1;
354 }
356 /* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */
357 static void set_eoi_ready(void *data)
358 {
359 irq_desc_t *desc = data;
361 ASSERT(!local_irq_is_enabled());
363 spin_lock(&desc->lock);
364 __set_eoi_ready(desc);
365 spin_unlock(&desc->lock);
367 flush_ready_eoi(NULL);
368 }
370 static void __pirq_guest_eoi(struct domain *d, int irq)
371 {
372 irq_desc_t *desc;
373 irq_guest_action_t *action;
374 cpumask_t cpu_eoi_map;
375 int vector;
377 ASSERT(local_irq_is_enabled());
378 desc = domain_spin_lock_irq_desc(d, irq, NULL);
379 if ( desc == NULL )
380 return;
382 action = (irq_guest_action_t *)desc->action;
383 vector = desc - irq_desc;
385 ASSERT(!test_bit(irq, d->pirq_mask) ||
386 (action->ack_type != ACKTYPE_NONE));
388 if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) ||
389 unlikely(--action->in_flight != 0) )
390 {
391 spin_unlock_irq(&desc->lock);
392 return;
393 }
395 if ( action->ack_type == ACKTYPE_UNMASK )
396 {
397 ASSERT(cpus_empty(action->cpu_eoi_map));
398 desc->handler->end(vector);
399 spin_unlock_irq(&desc->lock);
400 return;
401 }
403 ASSERT(action->ack_type == ACKTYPE_EOI);
405 cpu_eoi_map = action->cpu_eoi_map;
407 if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
408 {
409 __set_eoi_ready(desc);
410 spin_unlock(&desc->lock);
411 flush_ready_eoi(NULL);
412 local_irq_enable();
413 }
414 else
415 {
416 spin_unlock_irq(&desc->lock);
417 }
419 if ( !cpus_empty(cpu_eoi_map) )
420 on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
421 }
423 int pirq_guest_eoi(struct domain *d, int irq)
424 {
425 if ( (irq < 0) || (irq >= NR_IRQS) )
426 return -EINVAL;
428 __pirq_guest_eoi(d, irq);
430 return 0;
431 }
433 int pirq_guest_unmask(struct domain *d)
434 {
435 unsigned int irq;
437 for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
438 irq < NR_IRQS;
439 irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
440 {
441 if ( !test_bit(d->pirq_to_evtchn[irq], &shared_info(d, evtchn_mask)) )
442 __pirq_guest_eoi(d, irq);
443 }
445 return 0;
446 }
448 extern int ioapic_ack_new;
449 int pirq_acktype(struct domain *d, int irq)
450 {
451 irq_desc_t *desc;
452 unsigned int vector;
454 vector = domain_irq_to_vector(d, irq);
455 if ( vector <= 0 )
456 return ACKTYPE_NONE;
458 desc = &irq_desc[vector];
460 if ( desc->handler == &no_irq_type )
461 return ACKTYPE_NONE;
463 /*
464 * Edge-triggered IO-APIC and LAPIC interrupts need no final
465 * acknowledgement: we ACK early during interrupt processing.
466 * MSIs are treated as edge-triggered interrupts.
467 */
468 if ( !strcmp(desc->handler->typename, "IO-APIC-edge") ||
469 !strcmp(desc->handler->typename, "local-APIC-edge") ||
470 !strcmp(desc->handler->typename, "PCI-MSI") )
471 return ACKTYPE_NONE;
473 /*
474 * Level-triggered IO-APIC interrupts need to be acknowledged on the CPU
475 * on which they were received. This is because we tickle the LAPIC to EOI.
476 */
477 if ( !strcmp(desc->handler->typename, "IO-APIC-level") )
478 return ioapic_ack_new ? ACKTYPE_EOI : ACKTYPE_UNMASK;
480 /* Legacy PIC interrupts can be acknowledged from any CPU. */
481 if ( !strcmp(desc->handler->typename, "XT-PIC") )
482 return ACKTYPE_UNMASK;
484 printk("Unknown PIC type '%s' for IRQ %d\n", desc->handler->typename, irq);
485 BUG();
487 return 0;
488 }
490 int pirq_shared(struct domain *d, int irq)
491 {
492 irq_desc_t *desc;
493 irq_guest_action_t *action;
494 unsigned long flags;
495 int shared;
497 desc = domain_spin_lock_irq_desc(d, irq, &flags);
498 if ( desc == NULL )
499 return 0;
501 action = (irq_guest_action_t *)desc->action;
502 shared = ((desc->status & IRQ_GUEST) && (action->nr_guests > 1));
504 spin_unlock_irqrestore(&desc->lock, flags);
506 return shared;
507 }
509 int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
510 {
511 unsigned int vector;
512 irq_desc_t *desc;
513 irq_guest_action_t *action;
514 int rc = 0;
515 cpumask_t cpumask = CPU_MASK_NONE;
517 WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
518 BUG_ON(!local_irq_is_enabled());
520 retry:
521 desc = domain_spin_lock_irq_desc(v->domain, irq, NULL);
522 if ( desc == NULL )
523 return -EINVAL;
525 action = (irq_guest_action_t *)desc->action;
526 vector = desc - irq_desc;
528 if ( !(desc->status & IRQ_GUEST) )
529 {
530 if ( desc->action != NULL )
531 {
532 gdprintk(XENLOG_INFO,
533 "Cannot bind IRQ %d to guest. In use by '%s'.\n",
534 irq, desc->action->name);
535 rc = -EBUSY;
536 goto out;
537 }
539 action = xmalloc(irq_guest_action_t);
540 if ( (desc->action = (struct irqaction *)action) == NULL )
541 {
542 gdprintk(XENLOG_INFO,
543 "Cannot bind IRQ %d to guest. Out of memory.\n",
544 irq);
545 rc = -ENOMEM;
546 goto out;
547 }
549 action->nr_guests = 0;
550 action->in_flight = 0;
551 action->shareable = will_share;
552 action->ack_type = pirq_acktype(v->domain, irq);
553 cpus_clear(action->cpu_eoi_map);
555 desc->depth = 0;
556 desc->status |= IRQ_GUEST;
557 desc->status &= ~IRQ_DISABLED;
558 desc->handler->startup(vector);
560 /* Attempt to bind the interrupt target to the correct CPU. */
561 cpu_set(v->processor, cpumask);
562 if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
563 desc->handler->set_affinity(vector, cpumask);
564 }
565 else if ( !will_share || !action->shareable )
566 {
567 gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. "
568 "Will not share with others.\n",
569 irq);
570 rc = -EBUSY;
571 goto out;
572 }
573 else if ( action->nr_guests == 0 )
574 {
575 /*
576 * Indicates that an ACKTYPE_EOI interrupt is being released.
577 * Wait for that to happen before continuing.
578 */
579 ASSERT(action->ack_type == ACKTYPE_EOI);
580 ASSERT(desc->status & IRQ_DISABLED);
581 spin_unlock_irq(&desc->lock);
582 cpu_relax();
583 goto retry;
584 }
586 if ( action->nr_guests == IRQ_MAX_GUESTS )
587 {
588 gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. "
589 "Already at max share.\n", irq);
590 rc = -EBUSY;
591 goto out;
592 }
594 action->guest[action->nr_guests++] = v->domain;
596 out:
597 spin_unlock_irq(&desc->lock);
598 return rc;
599 }
601 static void __pirq_guest_unbind(struct domain *d, int irq, irq_desc_t *desc)
602 {
603 unsigned int vector;
604 irq_guest_action_t *action;
605 cpumask_t cpu_eoi_map;
606 int i;
608 BUG_ON(!(desc->status & IRQ_GUEST));
610 action = (irq_guest_action_t *)desc->action;
611 vector = desc - irq_desc;
613 for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
614 continue;
615 BUG_ON(i == action->nr_guests);
616 memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
617 action->nr_guests--;
619 switch ( action->ack_type )
620 {
621 case ACKTYPE_UNMASK:
622 if ( test_and_clear_bit(irq, d->pirq_mask) &&
623 (--action->in_flight == 0) )
624 desc->handler->end(vector);
625 break;
626 case ACKTYPE_EOI:
627 /* NB. If #guests == 0 then we clear the eoi_map later on. */
628 if ( test_and_clear_bit(irq, d->pirq_mask) &&
629 (--action->in_flight == 0) &&
630 (action->nr_guests != 0) )
631 {
632 cpu_eoi_map = action->cpu_eoi_map;
633 spin_unlock_irq(&desc->lock);
634 on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
635 spin_lock_irq(&desc->lock);
636 }
637 break;
638 }
640 /*
641 * The guest cannot re-bind to this IRQ until this function returns. So,
642 * when we have flushed this IRQ from pirq_mask, it should remain flushed.
643 */
644 BUG_ON(test_bit(irq, d->pirq_mask));
646 if ( action->nr_guests != 0 )
647 return;
649 BUG_ON(action->in_flight != 0);
651 /* Disabling IRQ before releasing the desc_lock avoids an IRQ storm. */
652 desc->depth = 1;
653 desc->status |= IRQ_DISABLED;
654 desc->handler->disable(vector);
656 /*
657 * Mark any remaining pending EOIs as ready to flush.
658 * NOTE: We will need to make this a stronger barrier if in future we allow
659 * an interrupt vectors to be re-bound to a different PIC. In that case we
660 * would need to flush all ready EOIs before returning as otherwise the
661 * desc->handler could change and we would call the wrong 'end' hook.
662 */
663 cpu_eoi_map = action->cpu_eoi_map;
664 if ( !cpus_empty(cpu_eoi_map) )
665 {
666 BUG_ON(action->ack_type != ACKTYPE_EOI);
667 spin_unlock_irq(&desc->lock);
668 on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 1);
669 spin_lock_irq(&desc->lock);
670 }
672 BUG_ON(!cpus_empty(action->cpu_eoi_map));
674 desc->action = NULL;
675 xfree(action);
676 desc->status &= ~IRQ_GUEST;
677 desc->status &= ~IRQ_INPROGRESS;
678 kill_timer(&irq_guest_eoi_timer[vector]);
679 desc->handler->shutdown(vector);
680 }
682 void pirq_guest_unbind(struct domain *d, int irq)
683 {
684 irq_desc_t *desc;
685 int vector;
687 WARN_ON(!spin_is_locked(&d->evtchn_lock));
689 BUG_ON(!local_irq_is_enabled());
690 desc = domain_spin_lock_irq_desc(d, irq, NULL);
692 if ( desc == NULL )
693 {
694 vector = -domain_irq_to_vector(d, irq);
695 BUG_ON(vector <= 0);
696 desc = &irq_desc[vector];
697 spin_lock_irq(&desc->lock);
698 d->arch.pirq_vector[irq] = d->arch.vector_pirq[vector] = 0;
699 }
700 else
701 {
702 __pirq_guest_unbind(d, irq, desc);
703 }
705 spin_unlock_irq(&desc->lock);
706 }
708 int pirq_guest_force_unbind(struct domain *d, int irq)
709 {
710 irq_desc_t *desc;
711 irq_guest_action_t *action;
712 int i, bound = 0;
714 WARN_ON(!spin_is_locked(&d->evtchn_lock));
716 BUG_ON(!local_irq_is_enabled());
717 desc = domain_spin_lock_irq_desc(d, irq, NULL);
718 BUG_ON(desc == NULL);
720 if ( !(desc->status & IRQ_GUEST) )
721 goto out;
723 action = (irq_guest_action_t *)desc->action;
724 for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
725 continue;
726 if ( i == action->nr_guests )
727 goto out;
729 bound = 1;
730 __pirq_guest_unbind(d, irq, desc);
732 out:
733 spin_unlock_irq(&desc->lock);
734 return bound;
735 }
737 int get_free_pirq(struct domain *d, int type, int index)
738 {
739 int i;
741 ASSERT(spin_is_locked(&d->evtchn_lock));
743 if ( type == MAP_PIRQ_TYPE_GSI )
744 {
745 for ( i = 16; i < NR_PIRQS; i++ )
746 if ( !d->arch.pirq_vector[i] )
747 break;
748 if ( i == NR_PIRQS )
749 return -ENOSPC;
750 }
751 else
752 {
753 for ( i = NR_PIRQS - 1; i >= 16; i-- )
754 if ( !d->arch.pirq_vector[i] )
755 break;
756 if ( i == 16 )
757 return -ENOSPC;
758 }
760 return i;
761 }
763 int map_domain_pirq(
764 struct domain *d, int pirq, int vector, int type, void *data)
765 {
766 int ret = 0;
767 int old_vector, old_pirq;
768 irq_desc_t *desc;
769 unsigned long flags;
771 ASSERT(spin_is_locked(&d->evtchn_lock));
773 if ( !IS_PRIV(current->domain) )
774 return -EPERM;
776 if ( pirq < 0 || pirq >= NR_PIRQS || vector < 0 || vector >= NR_VECTORS )
777 {
778 dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or vector %d\n",
779 d->domain_id, pirq, vector);
780 return -EINVAL;
781 }
783 old_vector = d->arch.pirq_vector[pirq];
784 old_pirq = d->arch.vector_pirq[vector];
786 if ( (old_vector && (old_vector != vector) ) ||
787 (old_pirq && (old_pirq != pirq)) )
788 {
789 dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
790 d->domain_id, pirq, vector);
791 return -EINVAL;
792 }
794 ret = irq_permit_access(d, pirq);
795 if ( ret )
796 {
797 dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
798 d->domain_id, pirq);
799 return ret;
800 }
802 desc = &irq_desc[vector];
803 spin_lock_irqsave(&desc->lock, flags);
805 if ( type == MAP_PIRQ_TYPE_MSI )
806 {
807 struct msi_info *msi = (struct msi_info *)data;
808 if ( desc->handler != &no_irq_type )
809 dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
810 d->domain_id, vector);
811 desc->handler = &pci_msi_type;
812 ret = pci_enable_msi(msi);
813 if ( ret )
814 goto done;
815 }
817 d->arch.pirq_vector[pirq] = vector;
818 d->arch.vector_pirq[vector] = pirq;
820 done:
821 spin_unlock_irqrestore(&desc->lock, flags);
822 return ret;
823 }
825 /* The pirq should have been unbound before this call. */
826 int unmap_domain_pirq(struct domain *d, int pirq)
827 {
828 unsigned long flags;
829 irq_desc_t *desc;
830 int vector, ret = 0;
831 bool_t forced_unbind;
833 if ( (pirq < 0) || (pirq >= NR_PIRQS) )
834 return -EINVAL;
836 if ( !IS_PRIV(current->domain) )
837 return -EINVAL;
839 ASSERT(spin_is_locked(&d->evtchn_lock));
841 vector = d->arch.pirq_vector[pirq];
842 if ( vector <= 0 )
843 {
844 dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
845 d->domain_id, pirq);
846 ret = -EINVAL;
847 goto done;
848 }
850 forced_unbind = pirq_guest_force_unbind(d, pirq);
851 if ( forced_unbind )
852 dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
853 d->domain_id, pirq);
855 desc = &irq_desc[vector];
856 spin_lock_irqsave(&desc->lock, flags);
858 BUG_ON(vector != d->arch.pirq_vector[pirq]);
860 if ( desc->msi_desc )
861 pci_disable_msi(vector);
863 if ( desc->handler == &pci_msi_type )
864 desc->handler = &no_irq_type;
866 if ( !forced_unbind )
867 {
868 d->arch.pirq_vector[pirq] = 0;
869 d->arch.vector_pirq[vector] = 0;
870 }
871 else
872 {
873 d->arch.pirq_vector[pirq] = -vector;
874 d->arch.vector_pirq[vector] = -pirq;
875 }
877 spin_unlock_irqrestore(&desc->lock, flags);
879 ret = irq_deny_access(d, pirq);
880 if ( ret )
881 dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
882 d->domain_id, pirq);
884 done:
885 return ret;
886 }
888 extern void dump_ioapic_irq_info(void);
890 static void dump_irqs(unsigned char key)
891 {
892 int i, irq, vector;
893 irq_desc_t *desc;
894 irq_guest_action_t *action;
895 struct domain *d;
896 unsigned long flags;
898 printk("Guest interrupt information:\n");
900 for ( irq = 0; irq < NR_IRQS; irq++ )
901 {
902 vector = irq_to_vector(irq);
903 if ( vector == 0 )
904 continue;
906 desc = &irq_desc[vector];
908 spin_lock_irqsave(&desc->lock, flags);
910 if ( desc->status & IRQ_GUEST )
911 {
912 action = (irq_guest_action_t *)desc->action;
914 printk(" IRQ%3d Vec%3d: type=%-15s status=%08x "
915 "in-flight=%d domain-list=",
916 irq, vector, desc->handler->typename,
917 desc->status, action->in_flight);
919 for ( i = 0; i < action->nr_guests; i++ )
920 {
921 d = action->guest[i];
922 printk("%u(%c%c%c%c)",
923 d->domain_id,
924 (test_bit(d->pirq_to_evtchn[irq],
925 &shared_info(d, evtchn_pending)) ?
926 'P' : '-'),
927 (test_bit(d->pirq_to_evtchn[irq]/BITS_PER_GUEST_LONG(d),
928 &vcpu_info(d->vcpu[0], evtchn_pending_sel)) ?
929 'S' : '-'),
930 (test_bit(d->pirq_to_evtchn[irq],
931 &shared_info(d, evtchn_mask)) ?
932 'M' : '-'),
933 (test_bit(irq, d->pirq_mask) ?
934 'M' : '-'));
935 if ( i != action->nr_guests )
936 printk(",");
937 }
939 printk("\n");
940 }
942 spin_unlock_irqrestore(&desc->lock, flags);
943 }
945 dump_ioapic_irq_info();
946 }
948 static int __init setup_dump_irqs(void)
949 {
950 register_keyhandler('i', dump_irqs, "dump interrupt bindings");
951 return 0;
952 }
953 __initcall(setup_dump_irqs);
955 #ifdef CONFIG_HOTPLUG_CPU
956 #include <asm/mach-generic/mach_apic.h>
957 #include <xen/delay.h>
959 void fixup_irqs(cpumask_t map)
960 {
961 unsigned int irq, sp;
962 static int warned;
963 irq_guest_action_t *action;
964 struct pending_eoi *peoi;
966 /* Direct all future interrupts away from this CPU. */
967 for ( irq = 0; irq < NR_IRQS; irq++ )
968 {
969 cpumask_t mask;
970 if ( irq == 2 )
971 continue;
973 cpus_and(mask, irq_desc[irq].affinity, map);
974 if ( any_online_cpu(mask) == NR_CPUS )
975 {
976 printk("Breaking affinity for irq %i\n", irq);
977 mask = map;
978 }
979 if ( irq_desc[irq].handler->set_affinity )
980 irq_desc[irq].handler->set_affinity(irq, mask);
981 else if ( irq_desc[irq].action && !(warned++) )
982 printk("Cannot set affinity for irq %i\n", irq);
983 }
985 /* Service any interrupts that beat us in the re-direction race. */
986 local_irq_enable();
987 mdelay(1);
988 local_irq_disable();
990 /* Clean up cpu_eoi_map of every interrupt to exclude this CPU. */
991 for ( irq = 0; irq < NR_IRQS; irq++ )
992 {
993 if ( !(irq_desc[irq].status & IRQ_GUEST) )
994 continue;
995 action = (irq_guest_action_t *)irq_desc[irq].action;
996 cpu_clear(smp_processor_id(), action->cpu_eoi_map);
997 }
999 /* Flush the interrupt EOI stack. */
1000 peoi = this_cpu(pending_eoi);
1001 for ( sp = 0; sp < pending_eoi_sp(peoi); sp++ )
1002 peoi[sp].ready = 1;
1003 flush_ready_eoi(NULL);
1005 #endif