ia64/xen-unstable

view xen/arch/ia64/xen/irq.c @ 10245:8b81c4e82f3e

[IA64] fix debug=y build: update ASSERT(acktype)

action->acktype is no longer defined here. Dereference
desc->action->acktype to get to it.

Signed-off-by: Aron Griffis <aron@hp.com>
author awilliam@xenbuild.aw
date Sat Jun 03 14:42:13 2006 -0600 (2006-06-03)
parents 003157eafd66
children 0f3bd7d23737
line source
1 /*
2 * linux/arch/ia64/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
11 *
12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
13 *
14 * 4/14/2004: Added code to handle cpu migration and do safe irq
15 * migration without lossing interrupts for iosapic
16 * architecture.
17 */
19 /*
20 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
21 *
22 * IRQs are in fact implemented a bit like signal handlers for the kernel.
23 * Naturally it's not a 1:1 relation, but there are similarities.
24 */
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/ioport.h>
31 #include <linux/interrupt.h>
32 #include <linux/timex.h>
33 #include <linux/slab.h>
34 #include <linux/ctype.h>
35 #include <linux/init.h>
36 #include <linux/seq_file.h>
38 #include <asm/atomic.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/bitops.h>
43 #include <asm/uaccess.h>
44 #include <asm/pgalloc.h>
45 #include <asm/delay.h>
46 #include <xen/irq.h>
47 #include <asm/hw_irq.h>
49 #include <xen/event.h>
50 #define apicid_to_phys_cpu_present(x) 1
52 /*
53 * Linux has a controller-independent x86 interrupt architecture.
54 * every controller has a 'controller-template', that is used
55 * by the main code to do the right thing. Each driver-visible
56 * interrupt source is transparently wired to the appropriate
57 * controller. Thus drivers need not be aware of the
58 * interrupt-controller.
59 *
60 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
61 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
62 * (IO-APICs assumed to be messaging to Pentium local-APICs)
63 *
64 * the code is designed to be easily extended with new/different
65 * interrupt controllers, without having to do assembly magic.
66 */
68 /*
69 * Controller mappings for all interrupt sources:
70 */
71 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
72 [0 ... NR_IRQS-1] = {
73 .status = IRQ_DISABLED,
74 .handler = &no_irq_type,
75 .lock = SPIN_LOCK_UNLOCKED
76 }
77 };
79 void __do_IRQ_guest(int irq);
81 /*
82 * Special irq handlers.
83 */
85 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
87 /*
88 * Generic no controller code
89 */
91 static void enable_none(unsigned int irq) { }
92 static unsigned int startup_none(unsigned int irq) { return 0; }
93 static void disable_none(unsigned int irq) { }
94 static void ack_none(unsigned int irq)
95 {
96 /*
97 * 'what should we do if we get a hw irq event on an illegal vector'.
98 * each architecture has to answer this themselves, it doesn't deserve
99 * a generic callback i think.
100 */
101 printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
102 }
104 /* startup is the same as "enable", shutdown is same as "disable" */
105 #define shutdown_none disable_none
106 #define end_none enable_none
108 struct hw_interrupt_type no_irq_type = {
109 "none",
110 startup_none,
111 shutdown_none,
112 enable_none,
113 disable_none,
114 ack_none,
115 end_none
116 };
118 atomic_t irq_err_count;
120 /* Some placeholder here, which are used by other files and we
121 * don't want to change too much now. Later they should be cleaned.
122 */
123 #ifdef CONFIG_SMP
124 inline void synchronize_irq(unsigned int irq) {}
125 EXPORT_SYMBOL(synchronize_irq);
126 #endif
128 static int noirqdebug;
130 static int __init noirqdebug_setup(char *str)
131 {
132 noirqdebug = 1;
133 printk("IRQ lockup detection disabled\n");
134 return 1;
135 }
137 __setup("noirqdebug", noirqdebug_setup);
139 /*
140 * Generic enable/disable code: this just calls
141 * down into the PIC-specific version for the actual
142 * hardware disable after having gotten the irq
143 * controller lock.
144 */
146 /*
147 * do_IRQ handles all normal device IRQ's (the special
148 * SMP cross-CPU interrupts have their own specific
149 * handlers).
150 */
151 fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
152 {
153 irq_desc_t *desc = irq_desc + irq;
154 struct irqaction * action;
155 unsigned int status;
157 if (likely(desc->status & IRQ_PER_CPU)) {
158 /*
159 * No locking required for CPU-local interrupts:
160 */
161 desc->handler->ack(irq);
162 local_irq_enable();
163 desc->action->handler(irq, desc->action->dev_id, regs);
164 local_irq_disable();
165 desc->handler->end(irq);
166 return 1;
167 }
169 spin_lock(&desc->lock);
171 if (desc->status & IRQ_GUEST) {
172 __do_IRQ_guest(irq);
173 spin_unlock(&desc->lock);
174 return 1;
175 }
177 desc->handler->ack(irq);
178 status = desc->status & ~IRQ_REPLAY;
179 status |= IRQ_PENDING; /* we _want_ to handle it */
181 /*
182 * If the IRQ is disabled for whatever reason, we cannot
183 * use the action we have.
184 */
185 action = NULL;
186 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
187 action = desc->action;
188 status &= ~IRQ_PENDING; /* we commit to handling */
189 status |= IRQ_INPROGRESS; /* we are handling it */
190 }
191 desc->status = status;
193 /*
194 * If there is no IRQ handler or it was disabled, exit early.
195 * Since we set PENDING, if another processor is handling
196 * a different instance of this same irq, the other processor
197 * will take care of it.
198 */
199 if (unlikely(!action))
200 goto out;
202 /*
203 * Edge triggered interrupts need to remember
204 * pending events.
205 * This applies to any hw interrupts that allow a second
206 * instance of the same irq to arrive while we are in do_IRQ
207 * or in the handler. But the code here only handles the _second_
208 * instance of the irq, not the third or fourth. So it is mostly
209 * useful for irq hardware that does not mask cleanly in an
210 * SMP environment.
211 */
212 for (;;) {
213 spin_unlock_irq(&desc->lock);
214 action->handler(irq, action->dev_id, regs);
215 spin_lock_irq(&desc->lock);
217 if (likely(!(desc->status & IRQ_PENDING)))
218 break;
220 desc->status &= ~IRQ_PENDING;
221 }
222 desc->status &= ~IRQ_INPROGRESS;
224 out:
225 /*
226 * The ->end() handler has to deal with interrupts which got
227 * disabled while the handler was running.
228 */
229 desc->handler->end(irq);
230 spin_unlock(&desc->lock);
232 return 1;
233 }
235 /**
236 * request_irq - allocate an interrupt line
237 * @irq: Interrupt line to allocate
238 * @handler: Function to be called when the IRQ occurs
239 * @irqflags: Interrupt type flags
240 * @devname: An ascii name for the claiming device
241 * @dev_id: A cookie passed back to the handler function
242 *
243 * This call allocates interrupt resources and enables the
244 * interrupt line and IRQ handling. From the point this
245 * call is made your handler function may be invoked. Since
246 * your handler function must clear any interrupt the board
247 * raises, you must take care both to initialise your hardware
248 * and to set up the interrupt handler in the right order.
249 *
250 * Dev_id must be globally unique. Normally the address of the
251 * device data structure is used as the cookie. Since the handler
252 * receives this value it makes sense to use it.
253 *
254 * If your interrupt is shared you must pass a non NULL dev_id
255 * as this is required when freeing the interrupt.
256 *
257 * Flags:
258 *
259 * SA_SHIRQ Interrupt is shared
260 *
261 * SA_INTERRUPT Disable local interrupts while processing
262 *
263 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
264 *
265 */
267 int request_irq(unsigned int irq,
268 irqreturn_t (*handler)(int, void *, struct pt_regs *),
269 unsigned long irqflags,
270 const char * devname,
271 void *dev_id)
272 {
273 int retval;
274 struct irqaction * action;
276 /*
277 * Sanity-check: shared interrupts should REALLY pass in
278 * a real dev-ID, otherwise we'll have trouble later trying
279 * to figure out which interrupt is which (messes up the
280 * interrupt freeing logic etc).
281 */
282 if (irqflags & SA_SHIRQ) {
283 if (!dev_id)
284 printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
285 }
287 if (irq >= NR_IRQS)
288 return -EINVAL;
289 if (!handler)
290 return -EINVAL;
292 action = xmalloc(struct irqaction);
293 if (!action)
294 return -ENOMEM;
296 action->handler = (void *) handler;
297 action->name = devname;
298 action->dev_id = dev_id;
300 retval = setup_irq(irq, action);
301 if (retval)
302 xfree(action);
303 return retval;
304 }
306 EXPORT_SYMBOL(request_irq);
308 /**
309 * free_irq - free an interrupt
310 * @irq: Interrupt line to free
311 * @dev_id: Device identity to free
312 *
313 * Remove an interrupt handler. The handler is removed and if the
314 * interrupt line is no longer in use by any driver it is disabled.
315 * On a shared IRQ the caller must ensure the interrupt is disabled
316 * on the card it drives before calling this function. The function
317 * does not return until any executing interrupts for this IRQ
318 * have completed.
319 *
320 * This function must not be called from interrupt context.
321 */
323 void free_irq(unsigned int irq)
324 {
325 irq_desc_t *desc;
326 unsigned long flags;
328 if (irq >= NR_IRQS)
329 return;
331 desc = irq_descp(irq);
332 spin_lock_irqsave(&desc->lock,flags);
333 if (desc->action) {
334 struct irqaction * action = desc->action;
335 desc->action = NULL;
336 desc->status |= IRQ_DISABLED;
337 desc->handler->shutdown(irq);
338 spin_unlock_irqrestore(&desc->lock,flags);
340 /* Wait to make sure it's not being used on another CPU */
341 synchronize_irq(irq);
342 xfree(action);
343 return;
344 }
345 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
346 spin_unlock_irqrestore(&desc->lock,flags);
347 }
349 EXPORT_SYMBOL(free_irq);
351 /*
352 * IRQ autodetection code..
353 *
354 * This depends on the fact that any interrupt that
355 * comes in on to an unassigned handler will get stuck
356 * with "IRQ_WAITING" cleared and the interrupt
357 * disabled.
358 */
360 int setup_irq(unsigned int irq, struct irqaction * new)
361 {
362 unsigned long flags;
363 struct irqaction *old, **p;
364 irq_desc_t *desc = irq_descp(irq);
366 /*
367 * The following block of code has to be executed atomically
368 */
369 spin_lock_irqsave(&desc->lock,flags);
370 p = &desc->action;
371 if ((old = *p) != NULL) {
372 spin_unlock_irqrestore(&desc->lock,flags);
373 return -EBUSY;
374 }
376 *p = new;
378 desc->depth = 0;
379 desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_GUEST);
380 desc->handler->startup(irq);
381 spin_unlock_irqrestore(&desc->lock,flags);
383 return 0;
384 }
386 /*
387 * HANDLING OF GUEST-BOUND PHYSICAL IRQS
388 */
390 #define IRQ_MAX_GUESTS 7
391 typedef struct {
392 u8 nr_guests;
393 u8 in_flight;
394 u8 shareable;
395 u8 ack_type;
396 #define ACKTYPE_NONE 0 /* No final acknowledgement is required */
397 #define ACKTYPE_UNMASK 1 /* Unmask notification is required */
398 struct domain *guest[IRQ_MAX_GUESTS];
399 } irq_guest_action_t;
401 void __do_IRQ_guest(int irq)
402 {
403 irq_desc_t *desc = &irq_desc[irq];
404 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
405 struct domain *d;
406 int i;
408 for ( i = 0; i < action->nr_guests; i++ )
409 {
410 d = action->guest[i];
411 if ( (action->ack_type != ACKTYPE_NONE) &&
412 !test_and_set_bit(irq, &d->pirq_mask) )
413 action->in_flight++;
414 send_guest_pirq(d, irq);
415 }
416 }
418 int pirq_acktype(int irq)
419 {
420 irq_desc_t *desc = &irq_desc[irq];
422 if (!strcmp(desc->handler->typename, "IO-SAPIC-level"))
423 return ACKTYPE_UNMASK;
425 if (!strcmp(desc->handler->typename, "IO-SAPIC-edge"))
426 return ACKTYPE_NONE;
428 return ACKTYPE_NONE;
429 }
431 int pirq_guest_eoi(struct domain *d, int irq)
432 {
433 irq_desc_t *desc;
435 if ( (irq < 0) || (irq >= NR_IRQS) )
436 return -EINVAL;
438 desc = &irq_desc[irq];
439 spin_lock_irq(&desc->lock);
440 if ( test_and_clear_bit(irq, &d->pirq_mask) &&
441 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
442 {
443 ASSERT(((irq_guest_action_t*)desc->action)->ack_type == ACKTYPE_UNMASK);
444 desc->handler->end(irq);
445 }
446 spin_unlock_irq(&desc->lock);
448 return 0;
450 }
452 int pirq_guest_unmask(struct domain *d)
453 {
454 int irq;
455 shared_info_t *s = d->shared_info;
457 for ( irq = find_first_bit(d->pirq_mask, NR_IRQS);
458 irq < NR_IRQS;
459 irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) )
460 {
461 if ( !test_bit(d->pirq_to_evtchn[irq], &s->evtchn_mask[0]) )
462 pirq_guest_eoi(d, irq);
464 }
466 return 0;
467 }
469 int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
470 {
471 irq_desc_t *desc = &irq_desc[irq];
472 irq_guest_action_t *action;
473 unsigned long flags;
474 int rc = 0;
476 spin_lock_irqsave(&desc->lock, flags);
478 if (desc->handler == &no_irq_type) {
479 spin_unlock_irqrestore(&desc->lock, flags);
480 return -ENOSYS;
481 }
483 action = (irq_guest_action_t *)desc->action;
485 if ( !(desc->status & IRQ_GUEST) )
486 {
487 if ( desc->action != NULL )
488 {
489 DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n",
490 irq, desc->action->name);
491 rc = -EBUSY;
492 goto out;
493 }
495 action = xmalloc(irq_guest_action_t);
496 if ( (desc->action = (struct irqaction *)action) == NULL )
497 {
498 DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
499 rc = -ENOMEM;
500 goto out;
501 }
503 action->nr_guests = 0;
504 action->in_flight = 0;
505 action->shareable = will_share;
506 action->ack_type = pirq_acktype(irq);
508 desc->depth = 0;
509 desc->status |= IRQ_GUEST;
510 desc->status &= ~IRQ_DISABLED;
511 desc->handler->startup(irq);
513 /* Attempt to bind the interrupt target to the correct CPU. */
514 #if 0 /* FIXME CONFIG_SMP ??? */
515 if ( desc->handler->set_affinity != NULL )
516 desc->handler->set_affinity(
517 irq, apicid_to_phys_cpu_present(d->processor));
518 #endif
519 }
520 else if ( !will_share || !action->shareable )
521 {
522 DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n",
523 irq);
524 rc = -EBUSY;
525 goto out;
526 }
528 if ( action->nr_guests == IRQ_MAX_GUESTS )
529 {
530 DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq);
531 rc = -EBUSY;
532 goto out;
533 }
535 action->guest[action->nr_guests++] = v->domain;
537 out:
538 spin_unlock_irqrestore(&desc->lock, flags);
539 return rc;
540 }
542 int pirq_guest_unbind(struct domain *d, int irq)
543 {
544 irq_desc_t *desc = &irq_desc[irq];
545 irq_guest_action_t *action;
546 unsigned long flags;
547 int i;
549 spin_lock_irqsave(&desc->lock, flags);
551 action = (irq_guest_action_t *)desc->action;
553 i = 0;
554 while ( action->guest[i] && (action->guest[i] != d) )
555 i++;
556 memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
557 action->nr_guests--;
559 if ( action->ack_type == ACKTYPE_UNMASK )
560 if ( test_and_clear_bit(irq, &d->pirq_mask) &&
561 (--action->in_flight == 0) )
562 desc->handler->end(irq);
564 if ( !action->nr_guests )
565 {
566 BUG_ON(action->in_flight != 0);
567 desc->action = NULL;
568 xfree(action);
569 desc->depth = 1;
570 desc->status |= IRQ_DISABLED;
571 desc->status &= ~IRQ_GUEST;
572 desc->handler->shutdown(irq);
573 }
575 spin_unlock_irqrestore(&desc->lock, flags);
576 return 0;
577 }
579 void
580 xen_debug_irq(unsigned long vector, struct pt_regs *regs)
581 {
582 //FIXME: For debug only, can be removed
583 static char firstirq = 1;
584 static char firsttime[256];
585 static char firstpend[256];
586 if (firstirq) {
587 int i;
588 for (i=0;i<256;i++) firsttime[i] = 1;
589 for (i=0;i<256;i++) firstpend[i] = 1;
590 firstirq = 0;
591 }
592 if (firsttime[vector]) {
593 printf("**** (entry) First received int on vector=%lu,itc=%lx\n",
594 (unsigned long) vector, ia64_get_itc());
595 firsttime[vector] = 0;
596 }
597 }
599 /*
600 * Exit an interrupt context. Process softirqs if needed and possible:
601 */
602 void irq_exit(void)
603 {
604 sub_preempt_count(IRQ_EXIT_OFFSET);
605 }
607 /*
608 * ONLY gets called from ia64_leave_kernel
609 * ONLY call with interrupts enabled
610 */
611 void process_soft_irq(void)
612 {
613 if (!in_interrupt() && local_softirq_pending()) {
614 add_preempt_count(SOFTIRQ_OFFSET);
615 do_softirq();
616 sub_preempt_count(SOFTIRQ_OFFSET);
617 }
618 }
620 // this is a temporary hack until real console input is implemented
621 extern void domain_pend_keyboard_interrupt(int irq);
622 void guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
623 {
624 domain_pend_keyboard_interrupt(irq);
625 }
627 void serial_input_init(void)
628 {
629 int retval;
630 int irq = 0x30; // FIXME
632 retval = request_irq(irq,guest_forward_keyboard_input,SA_INTERRUPT,"siminput",NULL);
633 if (retval) {
634 printk("serial_input_init: broken request_irq call\n");
635 while(1);
636 }
637 }