ia64/xen-unstable

view xen/arch/x86/irq.c @ 18806:ed8524f4a044

x86: Re-initialise HPET on resume from S3

Signed-off-by: Guanqun Lu <guanqun.lu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 15:55:14 2008 +0000 (2008-11-18)
parents 2188ed106885
children 6468257e9e62
line source
1 /******************************************************************************
2 * arch/x86/irq.c
3 *
4 * Portions of this file are:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/errno.h>
11 #include <xen/event.h>
12 #include <xen/irq.h>
13 #include <xen/perfc.h>
14 #include <xen/sched.h>
15 #include <xen/keyhandler.h>
16 #include <xen/compat.h>
17 #include <xen/iocap.h>
18 #include <xen/iommu.h>
19 #include <asm/msi.h>
20 #include <asm/current.h>
21 #include <public/physdev.h>
23 /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
24 int opt_noirqbalance = 0;
25 boolean_param("noirqbalance", opt_noirqbalance);
27 irq_desc_t irq_desc[NR_IRQS];
29 static void __do_IRQ_guest(int vector);
31 void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
33 static void enable_none(unsigned int vector) { }
34 static unsigned int startup_none(unsigned int vector) { return 0; }
35 static void disable_none(unsigned int vector) { }
36 static void ack_none(unsigned int vector)
37 {
38 ack_bad_irq(vector);
39 }
41 #define shutdown_none disable_none
42 #define end_none enable_none
44 struct hw_interrupt_type no_irq_type = {
45 "none",
46 startup_none,
47 shutdown_none,
48 enable_none,
49 disable_none,
50 ack_none,
51 end_none
52 };
54 atomic_t irq_err_count;
56 asmlinkage void do_IRQ(struct cpu_user_regs *regs)
57 {
58 unsigned int vector = regs->entry_vector;
59 irq_desc_t *desc = &irq_desc[vector];
60 struct irqaction *action;
62 perfc_incr(irqs);
64 spin_lock(&desc->lock);
65 desc->handler->ack(vector);
67 if ( likely(desc->status & IRQ_GUEST) )
68 {
69 irq_enter();
70 __do_IRQ_guest(vector);
71 irq_exit();
72 spin_unlock(&desc->lock);
73 return;
74 }
76 desc->status &= ~IRQ_REPLAY;
77 desc->status |= IRQ_PENDING;
79 /*
80 * Since we set PENDING, if another processor is handling a different
81 * instance of this same irq, the other processor will take care of it.
82 */
83 if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) )
84 goto out;
86 desc->status |= IRQ_INPROGRESS;
88 action = desc->action;
89 while ( desc->status & IRQ_PENDING )
90 {
91 desc->status &= ~IRQ_PENDING;
92 irq_enter();
93 spin_unlock_irq(&desc->lock);
94 action->handler(vector_to_irq(vector), action->dev_id, regs);
95 spin_lock_irq(&desc->lock);
96 irq_exit();
97 }
99 desc->status &= ~IRQ_INPROGRESS;
101 out:
102 desc->handler->end(vector);
103 spin_unlock(&desc->lock);
104 }
106 int request_irq(unsigned int irq,
107 void (*handler)(int, void *, struct cpu_user_regs *),
108 unsigned long irqflags, const char * devname, void *dev_id)
109 {
110 struct irqaction * action;
111 int retval;
113 /*
114 * Sanity-check: shared interrupts must pass in a real dev-ID,
115 * otherwise we'll have trouble later trying to figure out
116 * which interrupt is which (messes up the interrupt freeing
117 * logic etc).
118 */
119 if (irq >= NR_IRQS)
120 return -EINVAL;
121 if (!handler)
122 return -EINVAL;
124 action = xmalloc(struct irqaction);
125 if (!action)
126 return -ENOMEM;
128 action->handler = handler;
129 action->name = devname;
130 action->dev_id = dev_id;
132 retval = setup_irq(irq, action);
133 if (retval)
134 xfree(action);
136 return retval;
137 }
139 void free_irq(unsigned int irq)
140 {
141 unsigned int vector = irq_to_vector(irq);
142 irq_desc_t *desc = &irq_desc[vector];
143 unsigned long flags;
145 spin_lock_irqsave(&desc->lock,flags);
146 desc->action = NULL;
147 desc->depth = 1;
148 desc->status |= IRQ_DISABLED;
149 desc->handler->shutdown(irq);
150 spin_unlock_irqrestore(&desc->lock,flags);
152 /* Wait to make sure it's not being used on another CPU */
153 do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS );
154 }
156 int setup_irq(unsigned int irq, struct irqaction *new)
157 {
158 unsigned int vector = irq_to_vector(irq);
159 irq_desc_t *desc = &irq_desc[vector];
160 unsigned long flags;
162 spin_lock_irqsave(&desc->lock,flags);
164 if ( desc->action != NULL )
165 {
166 spin_unlock_irqrestore(&desc->lock,flags);
167 return -EBUSY;
168 }
170 desc->action = new;
171 desc->depth = 0;
172 desc->status &= ~IRQ_DISABLED;
173 desc->handler->startup(vector);
175 spin_unlock_irqrestore(&desc->lock,flags);
177 return 0;
178 }
181 /*
182 * HANDLING OF GUEST-BOUND PHYSICAL IRQS
183 */
185 #define IRQ_MAX_GUESTS 7
186 typedef struct {
187 u8 nr_guests;
188 u8 in_flight;
189 u8 shareable;
190 u8 ack_type;
191 #define ACKTYPE_NONE 0 /* No final acknowledgement is required */
192 #define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */
193 #define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */
194 cpumask_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */
195 struct domain *guest[IRQ_MAX_GUESTS];
196 } irq_guest_action_t;
198 /*
199 * Stack of interrupts awaiting EOI on each CPU. These must be popped in
200 * order, as only the current highest-priority pending irq can be EOIed.
201 */
202 struct pending_eoi {
203 u8 vector; /* Vector awaiting EOI */
204 u8 ready; /* Ready for EOI now? */
205 };
206 static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
207 #define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector)
209 static struct timer irq_guest_eoi_timer[NR_IRQS];
210 static void irq_guest_eoi_timer_fn(void *data)
211 {
212 irq_desc_t *desc = data;
213 unsigned vector = desc - irq_desc;
214 unsigned long flags;
216 spin_lock_irqsave(&desc->lock, flags);
217 desc->status &= ~IRQ_INPROGRESS;
218 desc->handler->enable(vector);
219 spin_unlock_irqrestore(&desc->lock, flags);
220 }
222 static void __do_IRQ_guest(int vector)
223 {
224 irq_desc_t *desc = &irq_desc[vector];
225 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
226 struct domain *d;
227 int i, sp, already_pending = 0;
228 struct pending_eoi *peoi = this_cpu(pending_eoi);
230 if ( unlikely(action->nr_guests == 0) )
231 {
232 /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
233 ASSERT(action->ack_type == ACKTYPE_EOI);
234 ASSERT(desc->status & IRQ_DISABLED);
235 desc->handler->end(vector);
236 return;
237 }
239 if ( action->ack_type == ACKTYPE_EOI )
240 {
241 sp = pending_eoi_sp(peoi);
242 ASSERT((sp == 0) || (peoi[sp-1].vector < vector));
243 ASSERT(sp < (NR_VECTORS-1));
244 peoi[sp].vector = vector;
245 peoi[sp].ready = 0;
246 pending_eoi_sp(peoi) = sp+1;
247 cpu_set(smp_processor_id(), action->cpu_eoi_map);
248 }
250 for ( i = 0; i < action->nr_guests; i++ )
251 {
252 unsigned int irq;
253 d = action->guest[i];
254 irq = domain_vector_to_irq(d, vector);
255 if ( (action->ack_type != ACKTYPE_NONE) &&
256 !test_and_set_bit(irq, d->pirq_mask) )
257 action->in_flight++;
258 if ( hvm_do_IRQ_dpci(d, irq) )
259 {
260 if ( action->ack_type == ACKTYPE_NONE )
261 {
262 already_pending += !!(desc->status & IRQ_INPROGRESS);
263 desc->status |= IRQ_INPROGRESS; /* cleared during hvm eoi */
264 }
265 }
266 else if ( send_guest_pirq(d, irq) &&
267 (action->ack_type == ACKTYPE_NONE) )
268 {
269 already_pending++;
270 }
271 }
273 if ( already_pending == action->nr_guests )
274 {
275 desc->handler->disable(vector);
276 stop_timer(&irq_guest_eoi_timer[vector]);
277 init_timer(&irq_guest_eoi_timer[vector],
278 irq_guest_eoi_timer_fn, desc, smp_processor_id());
279 set_timer(&irq_guest_eoi_timer[vector], NOW() + MILLISECS(1));
280 }
281 }
283 /*
284 * Retrieve Xen irq-descriptor corresponding to a domain-specific irq.
285 * The descriptor is returned locked. This function is safe against changes
286 * to the per-domain irq-to-vector mapping.
287 */
288 irq_desc_t *domain_spin_lock_irq_desc(
289 struct domain *d, int irq, unsigned long *pflags)
290 {
291 unsigned int vector;
292 unsigned long flags;
293 irq_desc_t *desc;
295 for ( ; ; )
296 {
297 vector = domain_irq_to_vector(d, irq);
298 if ( vector <= 0 )
299 return NULL;
300 desc = &irq_desc[vector];
301 spin_lock_irqsave(&desc->lock, flags);
302 if ( vector == domain_irq_to_vector(d, irq) )
303 break;
304 spin_unlock_irqrestore(&desc->lock, flags);
305 }
307 if ( pflags != NULL )
308 *pflags = flags;
309 return desc;
310 }
312 /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
313 static void flush_ready_eoi(void *unused)
314 {
315 struct pending_eoi *peoi = this_cpu(pending_eoi);
316 irq_desc_t *desc;
317 int vector, sp;
319 ASSERT(!local_irq_is_enabled());
321 sp = pending_eoi_sp(peoi);
323 while ( (--sp >= 0) && peoi[sp].ready )
324 {
325 vector = peoi[sp].vector;
326 desc = &irq_desc[vector];
327 spin_lock(&desc->lock);
328 desc->handler->end(vector);
329 spin_unlock(&desc->lock);
330 }
332 pending_eoi_sp(peoi) = sp+1;
333 }
335 static void __set_eoi_ready(irq_desc_t *desc)
336 {
337 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
338 struct pending_eoi *peoi = this_cpu(pending_eoi);
339 int vector, sp;
341 vector = desc - irq_desc;
343 if ( !(desc->status & IRQ_GUEST) ||
344 (action->in_flight != 0) ||
345 !cpu_test_and_clear(smp_processor_id(), action->cpu_eoi_map) )
346 return;
348 sp = pending_eoi_sp(peoi);
349 do {
350 ASSERT(sp > 0);
351 } while ( peoi[--sp].vector != vector );
352 ASSERT(!peoi[sp].ready);
353 peoi[sp].ready = 1;
354 }
356 /* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */
357 static void set_eoi_ready(void *data)
358 {
359 irq_desc_t *desc = data;
361 ASSERT(!local_irq_is_enabled());
363 spin_lock(&desc->lock);
364 __set_eoi_ready(desc);
365 spin_unlock(&desc->lock);
367 flush_ready_eoi(NULL);
368 }
370 static void __pirq_guest_eoi(struct domain *d, int irq)
371 {
372 irq_desc_t *desc;
373 irq_guest_action_t *action;
374 cpumask_t cpu_eoi_map;
375 int vector;
377 ASSERT(local_irq_is_enabled());
378 desc = domain_spin_lock_irq_desc(d, irq, NULL);
379 if ( desc == NULL )
380 return;
382 action = (irq_guest_action_t *)desc->action;
383 vector = desc - irq_desc;
385 ASSERT(!test_bit(irq, d->pirq_mask) ||
386 (action->ack_type != ACKTYPE_NONE));
388 if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) ||
389 unlikely(--action->in_flight != 0) )
390 {
391 spin_unlock_irq(&desc->lock);
392 return;
393 }
395 if ( action->ack_type == ACKTYPE_UNMASK )
396 {
397 ASSERT(cpus_empty(action->cpu_eoi_map));
398 desc->handler->end(vector);
399 spin_unlock_irq(&desc->lock);
400 return;
401 }
403 ASSERT(action->ack_type == ACKTYPE_EOI);
405 cpu_eoi_map = action->cpu_eoi_map;
407 if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
408 {
409 __set_eoi_ready(desc);
410 spin_unlock(&desc->lock);
411 flush_ready_eoi(NULL);
412 local_irq_enable();
413 }
414 else
415 {
416 spin_unlock_irq(&desc->lock);
417 }
419 if ( !cpus_empty(cpu_eoi_map) )
420 on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
421 }
423 int pirq_guest_eoi(struct domain *d, int irq)
424 {
425 if ( (irq < 0) || (irq >= NR_IRQS) )
426 return -EINVAL;
428 __pirq_guest_eoi(d, irq);
430 return 0;
431 }
433 int pirq_guest_unmask(struct domain *d)
434 {
435 unsigned int irq;
437 for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
438 irq < NR_IRQS;
439 irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
440 {
441 if ( !test_bit(d->pirq_to_evtchn[irq], &shared_info(d, evtchn_mask)) )
442 __pirq_guest_eoi(d, irq);
443 }
445 return 0;
446 }
448 extern int ioapic_ack_new;
449 int pirq_acktype(struct domain *d, int irq)
450 {
451 irq_desc_t *desc;
452 unsigned int vector;
454 vector = domain_irq_to_vector(d, irq);
455 if ( vector <= 0 )
456 return ACKTYPE_NONE;
458 desc = &irq_desc[vector];
460 if ( desc->handler == &no_irq_type )
461 return ACKTYPE_NONE;
463 /*
464 * Edge-triggered IO-APIC and LAPIC interrupts need no final
465 * acknowledgement: we ACK early during interrupt processing.
466 */
467 if ( !strcmp(desc->handler->typename, "IO-APIC-edge") ||
468 !strcmp(desc->handler->typename, "local-APIC-edge") )
469 return ACKTYPE_NONE;
471 /*
472 * MSIs are treated as edge-triggered interrupts, except
473 * when there is no proper way to mask them.
474 */
475 if ( desc->handler == &pci_msi_type )
476 return msi_maskable_irq(desc->msi_desc) ? ACKTYPE_NONE : ACKTYPE_EOI;
478 /*
479 * Level-triggered IO-APIC interrupts need to be acknowledged on the CPU
480 * on which they were received. This is because we tickle the LAPIC to EOI.
481 */
482 if ( !strcmp(desc->handler->typename, "IO-APIC-level") )
483 return ioapic_ack_new ? ACKTYPE_EOI : ACKTYPE_UNMASK;
485 /* Legacy PIC interrupts can be acknowledged from any CPU. */
486 if ( !strcmp(desc->handler->typename, "XT-PIC") )
487 return ACKTYPE_UNMASK;
489 printk("Unknown PIC type '%s' for IRQ %d\n", desc->handler->typename, irq);
490 BUG();
492 return 0;
493 }
495 int pirq_shared(struct domain *d, int irq)
496 {
497 irq_desc_t *desc;
498 irq_guest_action_t *action;
499 unsigned long flags;
500 int shared;
502 desc = domain_spin_lock_irq_desc(d, irq, &flags);
503 if ( desc == NULL )
504 return 0;
506 action = (irq_guest_action_t *)desc->action;
507 shared = ((desc->status & IRQ_GUEST) && (action->nr_guests > 1));
509 spin_unlock_irqrestore(&desc->lock, flags);
511 return shared;
512 }
514 int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
515 {
516 unsigned int vector;
517 irq_desc_t *desc;
518 irq_guest_action_t *action, *newaction = NULL;
519 int rc = 0;
520 cpumask_t cpumask = CPU_MASK_NONE;
522 WARN_ON(!spin_is_locked(&v->domain->event_lock));
523 BUG_ON(!local_irq_is_enabled());
525 retry:
526 desc = domain_spin_lock_irq_desc(v->domain, irq, NULL);
527 if ( desc == NULL )
528 {
529 rc = -EINVAL;
530 goto out;
531 }
533 action = (irq_guest_action_t *)desc->action;
534 vector = desc - irq_desc;
536 if ( !(desc->status & IRQ_GUEST) )
537 {
538 if ( desc->action != NULL )
539 {
540 gdprintk(XENLOG_INFO,
541 "Cannot bind IRQ %d to guest. In use by '%s'.\n",
542 irq, desc->action->name);
543 rc = -EBUSY;
544 goto unlock_out;
545 }
547 if ( newaction == NULL )
548 {
549 spin_unlock_irq(&desc->lock);
550 if ( (newaction = xmalloc(irq_guest_action_t)) != NULL )
551 goto retry;
552 gdprintk(XENLOG_INFO,
553 "Cannot bind IRQ %d to guest. Out of memory.\n",
554 irq);
555 rc = -ENOMEM;
556 goto out;
557 }
559 action = newaction;
560 desc->action = (struct irqaction *)action;
561 newaction = NULL;
563 action->nr_guests = 0;
564 action->in_flight = 0;
565 action->shareable = will_share;
566 action->ack_type = pirq_acktype(v->domain, irq);
567 cpus_clear(action->cpu_eoi_map);
569 desc->depth = 0;
570 desc->status |= IRQ_GUEST;
571 desc->status &= ~IRQ_DISABLED;
572 desc->handler->startup(vector);
574 /* Attempt to bind the interrupt target to the correct CPU. */
575 cpu_set(v->processor, cpumask);
576 if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
577 desc->handler->set_affinity(vector, cpumask);
578 }
579 else if ( !will_share || !action->shareable )
580 {
581 gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. "
582 "Will not share with others.\n",
583 irq);
584 rc = -EBUSY;
585 goto unlock_out;
586 }
587 else if ( action->nr_guests == 0 )
588 {
589 /*
590 * Indicates that an ACKTYPE_EOI interrupt is being released.
591 * Wait for that to happen before continuing.
592 */
593 ASSERT(action->ack_type == ACKTYPE_EOI);
594 ASSERT(desc->status & IRQ_DISABLED);
595 spin_unlock_irq(&desc->lock);
596 cpu_relax();
597 goto retry;
598 }
600 if ( action->nr_guests == IRQ_MAX_GUESTS )
601 {
602 gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. "
603 "Already at max share.\n", irq);
604 rc = -EBUSY;
605 goto unlock_out;
606 }
608 action->guest[action->nr_guests++] = v->domain;
610 unlock_out:
611 spin_unlock_irq(&desc->lock);
612 out:
613 if ( newaction != NULL )
614 xfree(newaction);
615 return rc;
616 }
618 static irq_guest_action_t *__pirq_guest_unbind(
619 struct domain *d, int irq, irq_desc_t *desc)
620 {
621 unsigned int vector;
622 irq_guest_action_t *action;
623 cpumask_t cpu_eoi_map;
624 int i;
626 BUG_ON(!(desc->status & IRQ_GUEST));
628 action = (irq_guest_action_t *)desc->action;
629 vector = desc - irq_desc;
631 for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
632 continue;
633 BUG_ON(i == action->nr_guests);
634 memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
635 action->nr_guests--;
637 switch ( action->ack_type )
638 {
639 case ACKTYPE_UNMASK:
640 if ( test_and_clear_bit(irq, d->pirq_mask) &&
641 (--action->in_flight == 0) )
642 desc->handler->end(vector);
643 break;
644 case ACKTYPE_EOI:
645 /* NB. If #guests == 0 then we clear the eoi_map later on. */
646 if ( test_and_clear_bit(irq, d->pirq_mask) &&
647 (--action->in_flight == 0) &&
648 (action->nr_guests != 0) )
649 {
650 cpu_eoi_map = action->cpu_eoi_map;
651 spin_unlock_irq(&desc->lock);
652 on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0);
653 spin_lock_irq(&desc->lock);
654 }
655 break;
656 }
658 /*
659 * The guest cannot re-bind to this IRQ until this function returns. So,
660 * when we have flushed this IRQ from pirq_mask, it should remain flushed.
661 */
662 BUG_ON(test_bit(irq, d->pirq_mask));
664 if ( action->nr_guests != 0 )
665 return NULL;
667 BUG_ON(action->in_flight != 0);
669 /* Disabling IRQ before releasing the desc_lock avoids an IRQ storm. */
670 desc->depth = 1;
671 desc->status |= IRQ_DISABLED;
672 desc->handler->disable(vector);
674 /*
675 * Mark any remaining pending EOIs as ready to flush.
676 * NOTE: We will need to make this a stronger barrier if in future we allow
677 * an interrupt vectors to be re-bound to a different PIC. In that case we
678 * would need to flush all ready EOIs before returning as otherwise the
679 * desc->handler could change and we would call the wrong 'end' hook.
680 */
681 cpu_eoi_map = action->cpu_eoi_map;
682 if ( !cpus_empty(cpu_eoi_map) )
683 {
684 BUG_ON(action->ack_type != ACKTYPE_EOI);
685 spin_unlock_irq(&desc->lock);
686 on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 1);
687 spin_lock_irq(&desc->lock);
688 }
690 BUG_ON(!cpus_empty(action->cpu_eoi_map));
692 desc->action = NULL;
693 desc->status &= ~IRQ_GUEST;
694 desc->status &= ~IRQ_INPROGRESS;
695 kill_timer(&irq_guest_eoi_timer[vector]);
696 desc->handler->shutdown(vector);
698 /* Caller frees the old guest descriptor block. */
699 return action;
700 }
702 void pirq_guest_unbind(struct domain *d, int irq)
703 {
704 irq_guest_action_t *oldaction = NULL;
705 irq_desc_t *desc;
706 int vector;
708 WARN_ON(!spin_is_locked(&d->event_lock));
710 BUG_ON(!local_irq_is_enabled());
711 desc = domain_spin_lock_irq_desc(d, irq, NULL);
713 if ( desc == NULL )
714 {
715 vector = -domain_irq_to_vector(d, irq);
716 BUG_ON(vector <= 0);
717 desc = &irq_desc[vector];
718 spin_lock_irq(&desc->lock);
719 d->arch.pirq_vector[irq] = d->arch.vector_pirq[vector] = 0;
720 }
721 else
722 {
723 oldaction = __pirq_guest_unbind(d, irq, desc);
724 }
726 spin_unlock_irq(&desc->lock);
728 if ( oldaction != NULL )
729 xfree(oldaction);
730 }
732 int pirq_guest_force_unbind(struct domain *d, int irq)
733 {
734 irq_desc_t *desc;
735 irq_guest_action_t *action, *oldaction = NULL;
736 int i, bound = 0;
738 WARN_ON(!spin_is_locked(&d->event_lock));
740 BUG_ON(!local_irq_is_enabled());
741 desc = domain_spin_lock_irq_desc(d, irq, NULL);
742 BUG_ON(desc == NULL);
744 if ( !(desc->status & IRQ_GUEST) )
745 goto out;
747 action = (irq_guest_action_t *)desc->action;
748 for ( i = 0; (i < action->nr_guests) && (action->guest[i] != d); i++ )
749 continue;
750 if ( i == action->nr_guests )
751 goto out;
753 bound = 1;
754 oldaction = __pirq_guest_unbind(d, irq, desc);
756 out:
757 spin_unlock_irq(&desc->lock);
759 if ( oldaction != NULL )
760 xfree(oldaction);
762 return bound;
763 }
765 int get_free_pirq(struct domain *d, int type, int index)
766 {
767 int i;
769 ASSERT(spin_is_locked(&d->event_lock));
771 if ( type == MAP_PIRQ_TYPE_GSI )
772 {
773 for ( i = 16; i < NR_IRQS; i++ )
774 if ( !d->arch.pirq_vector[i] )
775 break;
776 if ( i == NR_IRQS )
777 return -ENOSPC;
778 }
779 else
780 {
781 for ( i = NR_IRQS - 1; i >= 16; i-- )
782 if ( !d->arch.pirq_vector[i] )
783 break;
784 if ( i == 16 )
785 return -ENOSPC;
786 }
788 return i;
789 }
791 int map_domain_pirq(
792 struct domain *d, int pirq, int vector, int type, void *data)
793 {
794 int ret = 0;
795 int old_vector, old_pirq;
796 irq_desc_t *desc;
797 unsigned long flags;
799 ASSERT(spin_is_locked(&d->event_lock));
801 /* XXX Until pcidev and msi locking is fixed. */
802 if ( type == MAP_PIRQ_TYPE_MSI )
803 return -EINVAL;
805 if ( !IS_PRIV(current->domain) )
806 return -EPERM;
808 if ( pirq < 0 || pirq >= NR_IRQS || vector < 0 || vector >= NR_VECTORS )
809 {
810 dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or vector %d\n",
811 d->domain_id, pirq, vector);
812 return -EINVAL;
813 }
815 old_vector = d->arch.pirq_vector[pirq];
816 old_pirq = d->arch.vector_pirq[vector];
818 if ( (old_vector && (old_vector != vector) ) ||
819 (old_pirq && (old_pirq != pirq)) )
820 {
821 dprintk(XENLOG_G_ERR, "dom%d: pirq %d or vector %d already mapped\n",
822 d->domain_id, pirq, vector);
823 return -EINVAL;
824 }
826 ret = irq_permit_access(d, pirq);
827 if ( ret )
828 {
829 dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d\n",
830 d->domain_id, pirq);
831 return ret;
832 }
834 desc = &irq_desc[vector];
835 spin_lock_irqsave(&desc->lock, flags);
837 if ( type == MAP_PIRQ_TYPE_MSI )
838 {
839 struct msi_info *msi = (struct msi_info *)data;
840 if ( desc->handler != &no_irq_type )
841 dprintk(XENLOG_G_ERR, "dom%d: vector %d in use\n",
842 d->domain_id, vector);
843 desc->handler = &pci_msi_type;
844 ret = pci_enable_msi(msi);
845 if ( ret )
846 goto done;
847 }
849 d->arch.pirq_vector[pirq] = vector;
850 d->arch.vector_pirq[vector] = pirq;
852 done:
853 spin_unlock_irqrestore(&desc->lock, flags);
854 return ret;
855 }
857 /* The pirq should have been unbound before this call. */
858 int unmap_domain_pirq(struct domain *d, int pirq)
859 {
860 unsigned long flags;
861 irq_desc_t *desc;
862 int vector, ret = 0;
863 bool_t forced_unbind;
865 if ( (pirq < 0) || (pirq >= NR_IRQS) )
866 return -EINVAL;
868 if ( !IS_PRIV(current->domain) )
869 return -EINVAL;
871 ASSERT(spin_is_locked(&d->event_lock));
873 vector = d->arch.pirq_vector[pirq];
874 if ( vector <= 0 )
875 {
876 dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
877 d->domain_id, pirq);
878 ret = -EINVAL;
879 goto done;
880 }
882 forced_unbind = pirq_guest_force_unbind(d, pirq);
883 if ( forced_unbind )
884 dprintk(XENLOG_G_WARNING, "dom%d: forcing unbind of pirq %d\n",
885 d->domain_id, pirq);
887 desc = &irq_desc[vector];
888 spin_lock_irqsave(&desc->lock, flags);
890 BUG_ON(vector != d->arch.pirq_vector[pirq]);
892 if ( desc->msi_desc )
893 pci_disable_msi(vector);
895 if ( desc->handler == &pci_msi_type )
896 {
897 desc->handler = &no_irq_type;
898 free_irq_vector(vector);
899 }
901 if ( !forced_unbind )
902 {
903 d->arch.pirq_vector[pirq] = 0;
904 d->arch.vector_pirq[vector] = 0;
905 }
906 else
907 {
908 d->arch.pirq_vector[pirq] = -vector;
909 d->arch.vector_pirq[vector] = -pirq;
910 }
912 spin_unlock_irqrestore(&desc->lock, flags);
914 ret = irq_deny_access(d, pirq);
915 if ( ret )
916 dprintk(XENLOG_G_ERR, "dom%d: could not deny access to irq %d\n",
917 d->domain_id, pirq);
919 done:
920 return ret;
921 }
923 void free_domain_pirqs(struct domain *d)
924 {
925 int i;
927 spin_lock(&d->event_lock);
929 for ( i = 0; i < NR_IRQS; i++ )
930 if ( d->arch.pirq_vector[i] > 0 )
931 unmap_domain_pirq(d, i);
933 spin_unlock(&d->event_lock);
934 }
936 extern void dump_ioapic_irq_info(void);
938 static void dump_irqs(unsigned char key)
939 {
940 int i, irq, vector;
941 irq_desc_t *desc;
942 irq_guest_action_t *action;
943 struct domain *d;
944 unsigned long flags;
946 printk("Guest interrupt information:\n");
948 for ( irq = 0; irq < NR_IRQS; irq++ )
949 {
950 vector = irq_to_vector(irq);
951 if ( vector == 0 )
952 continue;
954 desc = &irq_desc[vector];
956 spin_lock_irqsave(&desc->lock, flags);
958 if ( desc->status & IRQ_GUEST )
959 {
960 action = (irq_guest_action_t *)desc->action;
962 printk(" IRQ%3d Vec%3d: type=%-15s status=%08x "
963 "in-flight=%d domain-list=",
964 irq, vector, desc->handler->typename,
965 desc->status, action->in_flight);
967 for ( i = 0; i < action->nr_guests; i++ )
968 {
969 d = action->guest[i];
970 printk("%u(%c%c%c%c)",
971 d->domain_id,
972 (test_bit(d->pirq_to_evtchn[irq],
973 &shared_info(d, evtchn_pending)) ?
974 'P' : '-'),
975 (test_bit(d->pirq_to_evtchn[irq]/BITS_PER_GUEST_LONG(d),
976 &vcpu_info(d->vcpu[0], evtchn_pending_sel)) ?
977 'S' : '-'),
978 (test_bit(d->pirq_to_evtchn[irq],
979 &shared_info(d, evtchn_mask)) ?
980 'M' : '-'),
981 (test_bit(irq, d->pirq_mask) ?
982 'M' : '-'));
983 if ( i != action->nr_guests )
984 printk(",");
985 }
987 printk("\n");
988 }
990 spin_unlock_irqrestore(&desc->lock, flags);
991 }
993 dump_ioapic_irq_info();
994 }
996 static int __init setup_dump_irqs(void)
997 {
998 register_keyhandler('i', dump_irqs, "dump interrupt bindings");
999 return 0;
1001 __initcall(setup_dump_irqs);
1003 #ifdef CONFIG_HOTPLUG_CPU
1004 #include <asm/mach-generic/mach_apic.h>
1005 #include <xen/delay.h>
1007 void fixup_irqs(cpumask_t map)
1009 unsigned int irq, sp;
1010 static int warned;
1011 irq_guest_action_t *action;
1012 struct pending_eoi *peoi;
1014 /* Direct all future interrupts away from this CPU. */
1015 for ( irq = 0; irq < NR_IRQS; irq++ )
1017 cpumask_t mask;
1018 if ( irq == 2 )
1019 continue;
1021 cpus_and(mask, irq_desc[irq].affinity, map);
1022 if ( any_online_cpu(mask) == NR_CPUS )
1024 printk("Breaking affinity for irq %i\n", irq);
1025 mask = map;
1027 if ( irq_desc[irq].handler->set_affinity )
1028 irq_desc[irq].handler->set_affinity(irq, mask);
1029 else if ( irq_desc[irq].action && !(warned++) )
1030 printk("Cannot set affinity for irq %i\n", irq);
1033 /* Service any interrupts that beat us in the re-direction race. */
1034 local_irq_enable();
1035 mdelay(1);
1036 local_irq_disable();
1038 /* Clean up cpu_eoi_map of every interrupt to exclude this CPU. */
1039 for ( irq = 0; irq < NR_IRQS; irq++ )
1041 if ( !(irq_desc[irq].status & IRQ_GUEST) )
1042 continue;
1043 action = (irq_guest_action_t *)irq_desc[irq].action;
1044 cpu_clear(smp_processor_id(), action->cpu_eoi_map);
1047 /* Flush the interrupt EOI stack. */
1048 peoi = this_cpu(pending_eoi);
1049 for ( sp = 0; sp < pending_eoi_sp(peoi); sp++ )
1050 peoi[sp].ready = 1;
1051 flush_ready_eoi(NULL);
1053 #endif