ia64/xen-unstable

view xen/arch/ia64/xen/irq.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 31959a65fe7c
children ac8bc814faba 482c16b55c28
line source
1 /*
2 * linux/arch/ia64/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
11 *
12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
13 *
14 * 4/14/2004: Added code to handle cpu migration and do safe irq
15 * migration without lossing interrupts for iosapic
16 * architecture.
17 */
19 /*
20 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
21 *
22 * IRQs are in fact implemented a bit like signal handlers for the kernel.
23 * Naturally it's not a 1:1 relation, but there are similarities.
24 */
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/ioport.h>
31 #include <linux/interrupt.h>
32 #include <linux/timex.h>
33 #include <linux/slab.h>
34 #include <linux/ctype.h>
35 #include <linux/init.h>
36 #include <linux/seq_file.h>
38 #include <asm/atomic.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/bitops.h>
43 #include <asm/pgalloc.h>
44 #include <asm/delay.h>
45 #include <xen/irq.h>
46 #include <asm/hw_irq.h>
48 #include <xen/event.h>
49 #define apicid_to_phys_cpu_present(x) 1
51 #ifdef CONFIG_IA64_GENERIC
52 unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
53 {
54 return (unsigned int) vec;
55 }
56 #endif
58 /*
59 * Linux has a controller-independent x86 interrupt architecture.
60 * every controller has a 'controller-template', that is used
61 * by the main code to do the right thing. Each driver-visible
62 * interrupt source is transparently wired to the appropriate
63 * controller. Thus drivers need not be aware of the
64 * interrupt-controller.
65 *
66 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
67 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
68 * (IO-APICs assumed to be messaging to Pentium local-APICs)
69 *
70 * the code is designed to be easily extended with new/different
71 * interrupt controllers, without having to do assembly magic.
72 */
74 /*
75 * Controller mappings for all interrupt sources:
76 */
77 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
78 [0 ... NR_IRQS-1] = {
79 .status = IRQ_DISABLED,
80 .handler = &no_irq_type,
81 .lock = SPIN_LOCK_UNLOCKED
82 }
83 };
85 void __do_IRQ_guest(int irq);
87 /*
88 * Special irq handlers.
89 */
91 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
93 /*
94 * Generic no controller code
95 */
97 static void enable_none(unsigned int irq) { }
98 static unsigned int startup_none(unsigned int irq) { return 0; }
99 static void disable_none(unsigned int irq) { }
100 static void ack_none(unsigned int irq)
101 {
102 /*
103 * 'what should we do if we get a hw irq event on an illegal vector'.
104 * each architecture has to answer this themselves, it doesn't deserve
105 * a generic callback i think.
106 */
107 printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
108 }
110 /* startup is the same as "enable", shutdown is same as "disable" */
111 #define shutdown_none disable_none
112 #define end_none enable_none
114 struct hw_interrupt_type no_irq_type = {
115 "none",
116 startup_none,
117 shutdown_none,
118 enable_none,
119 disable_none,
120 ack_none,
121 end_none
122 };
124 atomic_t irq_err_count;
126 /*
127 * Generic enable/disable code: this just calls
128 * down into the PIC-specific version for the actual
129 * hardware disable after having gotten the irq
130 * controller lock.
131 */
133 /*
134 * do_IRQ handles all normal device IRQ's (the special
135 * SMP cross-CPU interrupts have their own specific
136 * handlers).
137 */
138 fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
139 {
140 irq_desc_t *desc = irq_desc + irq;
141 struct irqaction * action;
142 unsigned int status;
144 if (likely(desc->status & IRQ_PER_CPU)) {
145 /*
146 * No locking required for CPU-local interrupts:
147 */
148 desc->handler->ack(irq);
149 local_irq_enable();
150 desc->action->handler(irq, desc->action->dev_id, regs);
151 local_irq_disable();
152 desc->handler->end(irq);
153 return 1;
154 }
156 spin_lock(&desc->lock);
158 if (desc->status & IRQ_GUEST) {
159 __do_IRQ_guest(irq);
160 spin_unlock(&desc->lock);
161 return 1;
162 }
164 desc->handler->ack(irq);
165 status = desc->status & ~IRQ_REPLAY;
166 status |= IRQ_PENDING; /* we _want_ to handle it */
168 /*
169 * If the IRQ is disabled for whatever reason, we cannot
170 * use the action we have.
171 */
172 action = NULL;
173 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
174 action = desc->action;
175 status &= ~IRQ_PENDING; /* we commit to handling */
176 status |= IRQ_INPROGRESS; /* we are handling it */
177 }
178 desc->status = status;
180 /*
181 * If there is no IRQ handler or it was disabled, exit early.
182 * Since we set PENDING, if another processor is handling
183 * a different instance of this same irq, the other processor
184 * will take care of it.
185 */
186 if (unlikely(!action))
187 goto out;
189 /*
190 * Edge triggered interrupts need to remember
191 * pending events.
192 * This applies to any hw interrupts that allow a second
193 * instance of the same irq to arrive while we are in do_IRQ
194 * or in the handler. But the code here only handles the _second_
195 * instance of the irq, not the third or fourth. So it is mostly
196 * useful for irq hardware that does not mask cleanly in an
197 * SMP environment.
198 */
199 for (;;) {
200 spin_unlock_irq(&desc->lock);
201 action->handler(irq, action->dev_id, regs);
202 spin_lock_irq(&desc->lock);
204 if (likely(!(desc->status & IRQ_PENDING)))
205 break;
207 desc->status &= ~IRQ_PENDING;
208 }
209 desc->status &= ~IRQ_INPROGRESS;
211 out:
212 /*
213 * The ->end() handler has to deal with interrupts which got
214 * disabled while the handler was running.
215 */
216 desc->handler->end(irq);
217 spin_unlock(&desc->lock);
219 return 1;
220 }
222 /*
223 * IRQ autodetection code..
224 *
225 * This depends on the fact that any interrupt that
226 * comes in on to an unassigned handler will get stuck
227 * with "IRQ_WAITING" cleared and the interrupt
228 * disabled.
229 */
231 int setup_vector(unsigned int irq, struct irqaction * new)
232 {
233 unsigned long flags;
234 struct irqaction *old, **p;
235 irq_desc_t *desc = irq_descp(irq);
237 /*
238 * The following block of code has to be executed atomically
239 */
240 spin_lock_irqsave(&desc->lock,flags);
241 p = &desc->action;
242 if ((old = *p) != NULL) {
243 spin_unlock_irqrestore(&desc->lock,flags);
244 return -EBUSY;
245 }
247 *p = new;
249 desc->depth = 0;
250 desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_GUEST);
251 desc->handler->startup(irq);
252 desc->handler->enable(irq);
253 spin_unlock_irqrestore(&desc->lock,flags);
255 return 0;
256 }
258 /* Vectors reserved by xen (and thus not sharable with domains). */
259 unsigned long ia64_xen_vector[BITS_TO_LONGS(NR_IRQS)];
261 int setup_irq(unsigned int irq, struct irqaction * new)
262 {
263 unsigned int vec;
264 int res;
266 /* Get vector for IRQ. */
267 if (acpi_gsi_to_irq (irq, &vec) < 0)
268 return -ENOSYS;
269 /* Reserve the vector (and thus the irq). */
270 if (test_and_set_bit(vec, ia64_xen_vector))
271 return -EBUSY;
272 res = setup_vector (vec, new);
273 return res;
274 }
276 void free_irq(unsigned int irq)
277 {
278 unsigned int vec;
279 unsigned long flags;
280 irq_desc_t *desc;
282 /* Get vector for IRQ. */
283 if (acpi_gsi_to_irq(irq, &vec) < 0)
284 return;
286 desc = irq_descp(vec);
288 spin_lock_irqsave(&desc->lock, flags);
289 clear_bit(vec, ia64_xen_vector);
290 desc->action = NULL;
291 desc->depth = 1;
292 desc->status |= IRQ_DISABLED;
293 desc->handler->shutdown(vec);
294 spin_unlock_irqrestore(&desc->lock, flags);
296 while (desc->status & IRQ_INPROGRESS)
297 cpu_relax();
298 }
300 /*
301 * HANDLING OF GUEST-BOUND PHYSICAL IRQS
302 */
304 #define IRQ_MAX_GUESTS 7
305 typedef struct {
306 u8 nr_guests;
307 u8 in_flight;
308 u8 shareable;
309 u8 ack_type;
310 #define ACKTYPE_NONE 0 /* No final acknowledgement is required */
311 #define ACKTYPE_UNMASK 1 /* Unmask notification is required */
312 struct domain *guest[IRQ_MAX_GUESTS];
313 } irq_guest_action_t;
315 void __do_IRQ_guest(int irq)
316 {
317 irq_desc_t *desc = &irq_desc[irq];
318 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
319 struct domain *d;
320 int i;
322 for ( i = 0; i < action->nr_guests; i++ )
323 {
324 d = action->guest[i];
325 if ( (action->ack_type != ACKTYPE_NONE) &&
326 !test_and_set_bit(irq, &d->pirq_mask) )
327 action->in_flight++;
328 send_guest_pirq(d, irq);
329 }
330 }
332 int pirq_acktype(int irq)
333 {
334 irq_desc_t *desc = &irq_desc[irq];
336 if (!strcmp(desc->handler->typename, "IO-SAPIC-level"))
337 return ACKTYPE_UNMASK;
339 if (!strcmp(desc->handler->typename, "IO-SAPIC-edge"))
340 return ACKTYPE_NONE;
342 return ACKTYPE_NONE;
343 }
345 int pirq_guest_eoi(struct domain *d, int irq)
346 {
347 irq_desc_t *desc;
349 if ( (irq < 0) || (irq >= NR_IRQS) )
350 return -EINVAL;
352 desc = &irq_desc[irq];
353 spin_lock_irq(&desc->lock);
354 if ( test_and_clear_bit(irq, &d->pirq_mask) &&
355 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
356 {
357 ASSERT(((irq_guest_action_t*)desc->action)->ack_type == ACKTYPE_UNMASK);
358 desc->handler->end(irq);
359 }
360 spin_unlock_irq(&desc->lock);
362 return 0;
364 }
366 int pirq_guest_unmask(struct domain *d)
367 {
368 int irq;
369 shared_info_t *s = d->shared_info;
371 for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
372 irq < NR_IRQS;
373 irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
374 {
375 if ( !test_bit(d->pirq_to_evtchn[irq], &s->evtchn_mask[0]) )
376 pirq_guest_eoi(d, irq);
378 }
380 return 0;
381 }
383 int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
384 {
385 irq_desc_t *desc = &irq_desc[irq];
386 irq_guest_action_t *action;
387 unsigned long flags;
388 int rc = 0;
390 spin_lock_irqsave(&desc->lock, flags);
392 if (desc->handler == &no_irq_type) {
393 spin_unlock_irqrestore(&desc->lock, flags);
394 return -ENOSYS;
395 }
397 action = (irq_guest_action_t *)desc->action;
399 if ( !(desc->status & IRQ_GUEST) )
400 {
401 if ( desc->action != NULL )
402 {
403 gdprintk(XENLOG_INFO,
404 "Cannot bind IRQ %d to guest. In use by '%s'.\n",
405 irq, desc->action->name);
406 rc = -EBUSY;
407 goto out;
408 }
410 action = xmalloc(irq_guest_action_t);
411 if ( (desc->action = (struct irqaction *)action) == NULL )
412 {
413 gdprintk(XENLOG_INFO,
414 "Cannot bind IRQ %d to guest. Out of memory.\n",
415 irq);
416 rc = -ENOMEM;
417 goto out;
418 }
420 action->nr_guests = 0;
421 action->in_flight = 0;
422 action->shareable = will_share;
423 action->ack_type = pirq_acktype(irq);
425 desc->depth = 0;
426 desc->status |= IRQ_GUEST;
427 desc->status &= ~IRQ_DISABLED;
428 desc->handler->startup(irq);
430 /* Attempt to bind the interrupt target to the correct CPU. */
431 #if 0 /* FIXME CONFIG_SMP ??? */
432 if ( desc->handler->set_affinity != NULL )
433 desc->handler->set_affinity(
434 irq, apicid_to_phys_cpu_present(d->processor));
435 #endif
436 }
437 else if ( !will_share || !action->shareable )
438 {
439 gdprintk(XENLOG_INFO,
440 "Cannot bind IRQ %d to guest. Will not share with others.\n",
441 irq);
442 rc = -EBUSY;
443 goto out;
444 }
446 if ( action->nr_guests == IRQ_MAX_GUESTS )
447 {
448 gdprintk(XENLOG_INFO,
449 "Cannot bind IRQ %d to guest. Already at max share.\n",
450 irq);
451 rc = -EBUSY;
452 goto out;
453 }
455 action->guest[action->nr_guests++] = v->domain;
457 out:
458 spin_unlock_irqrestore(&desc->lock, flags);
459 return rc;
460 }
462 int pirq_guest_unbind(struct domain *d, int irq)
463 {
464 irq_desc_t *desc = &irq_desc[irq];
465 irq_guest_action_t *action;
466 unsigned long flags;
467 int i;
469 spin_lock_irqsave(&desc->lock, flags);
471 action = (irq_guest_action_t *)desc->action;
473 i = 0;
474 while ( action->guest[i] && (action->guest[i] != d) )
475 i++;
476 memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
477 action->nr_guests--;
479 if ( action->ack_type == ACKTYPE_UNMASK )
480 if ( test_and_clear_bit(irq, &d->pirq_mask) &&
481 (--action->in_flight == 0) )
482 desc->handler->end(irq);
484 if ( !action->nr_guests )
485 {
486 BUG_ON(action->in_flight != 0);
487 desc->action = NULL;
488 xfree(action);
489 desc->depth = 1;
490 desc->status |= IRQ_DISABLED;
491 desc->status &= ~IRQ_GUEST;
492 desc->handler->shutdown(irq);
493 }
495 spin_unlock_irqrestore(&desc->lock, flags);
496 return 0;
497 }
499 void
500 xen_debug_irq(unsigned long vector, struct pt_regs *regs)
501 {
502 //FIXME: For debug only, can be removed
503 static char firstirq = 1;
504 static char firsttime[256];
505 static char firstpend[256];
506 if (firstirq) {
507 int i;
508 for (i=0;i<256;i++) firsttime[i] = 1;
509 for (i=0;i<256;i++) firstpend[i] = 1;
510 firstirq = 0;
511 }
512 if (firsttime[vector]) {
513 printk("**** (entry) First received int on vector=%lu,itc=%lx\n",
514 (unsigned long) vector, ia64_get_itc());
515 firsttime[vector] = 0;
516 }
517 }
519 /*
520 * Exit an interrupt context. Process softirqs if needed and possible:
521 */
522 void irq_exit(void)
523 {
524 sub_preempt_count(IRQ_EXIT_OFFSET);
525 }
527 // this is a temporary hack until real console input is implemented
528 void guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
529 {
530 vcpu_pend_interrupt(dom0->vcpu[0],irq);
531 }