ia64/xen-unstable

view xen/arch/ia64/xen/irq.c @ 9747:de2dc4e7966a

[IA64] Add support to physdev_ops

Add support to physdev ops, and thus give IOSAPIC RTEs
managed by Xen now. Dom0 now issues hypercall to r/w
RTE entry. Another change is the irq vector allocation
which is also owned by xen now.

After this change, the IOSAPIC is almost owned by xen
with only exception as IOSAPIC EOI which is still issued
by dom0 directly. But that's OK since currently dom0
owns all external physical devices. Later full event
channel mechanism will provide necessary support for
driver domain, and at that time, dom0 instead issues
physdev_op (PHYSDEVOP_IRQ_UNMASK_NOTIFY) naturally as
replace of IOSAPIC EOI.

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Fri Apr 21 09:03:19 2006 -0600 (2006-04-21)
parents 5cc367720223
children 42a8e3101c6c
line source
1 /*
2 * linux/arch/ia64/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
10 * should be easier.
11 *
12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
13 *
14 * 4/14/2004: Added code to handle cpu migration and do safe irq
15 * migration without lossing interrupts for iosapic
16 * architecture.
17 */
19 /*
20 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
21 *
22 * IRQs are in fact implemented a bit like signal handlers for the kernel.
23 * Naturally it's not a 1:1 relation, but there are similarities.
24 */
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/ioport.h>
31 #include <linux/interrupt.h>
32 #include <linux/timex.h>
33 #include <linux/slab.h>
34 #include <linux/ctype.h>
35 #include <linux/init.h>
36 #include <linux/seq_file.h>
38 #include <asm/atomic.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/bitops.h>
43 #include <asm/uaccess.h>
44 #include <asm/pgalloc.h>
45 #include <asm/delay.h>
46 #include <xen/irq.h>
47 #include <asm/hw_irq.h>
49 #include <xen/event.h>
50 #define apicid_to_phys_cpu_present(x) 1
52 /*
53 * Linux has a controller-independent x86 interrupt architecture.
54 * every controller has a 'controller-template', that is used
55 * by the main code to do the right thing. Each driver-visible
56 * interrupt source is transparently wired to the appropriate
57 * controller. Thus drivers need not be aware of the
58 * interrupt-controller.
59 *
60 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
61 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
62 * (IO-APICs assumed to be messaging to Pentium local-APICs)
63 *
64 * the code is designed to be easily extended with new/different
65 * interrupt controllers, without having to do assembly magic.
66 */
68 /*
69 * Controller mappings for all interrupt sources:
70 */
71 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
72 [0 ... NR_IRQS-1] = {
73 .status = IRQ_DISABLED | IRQ_GUEST,
74 .handler = &no_irq_type,
75 .lock = SPIN_LOCK_UNLOCKED
76 }
77 };
79 /*
80 * Special irq handlers.
81 */
83 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
85 /*
86 * Generic no controller code
87 */
89 static void enable_none(unsigned int irq) { }
90 static unsigned int startup_none(unsigned int irq) { return 0; }
91 static void disable_none(unsigned int irq) { }
92 static void ack_none(unsigned int irq)
93 {
94 /*
95 * 'what should we do if we get a hw irq event on an illegal vector'.
96 * each architecture has to answer this themselves, it doesn't deserve
97 * a generic callback i think.
98 */
99 printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
100 }
102 /* startup is the same as "enable", shutdown is same as "disable" */
103 #define shutdown_none disable_none
104 #define end_none enable_none
106 struct hw_interrupt_type no_irq_type = {
107 "none",
108 startup_none,
109 shutdown_none,
110 enable_none,
111 disable_none,
112 ack_none,
113 end_none
114 };
116 atomic_t irq_err_count;
118 /* Some placeholder here, which are used by other files and we
119 * don't want to change too much now. Later they should be cleaned.
120 */
121 #ifdef CONFIG_SMP
122 inline void synchronize_irq(unsigned int irq) {}
123 EXPORT_SYMBOL(synchronize_irq);
124 #endif
126 static int noirqdebug;
128 static int __init noirqdebug_setup(char *str)
129 {
130 noirqdebug = 1;
131 printk("IRQ lockup detection disabled\n");
132 return 1;
133 }
135 __setup("noirqdebug", noirqdebug_setup);
137 /*
138 * Generic enable/disable code: this just calls
139 * down into the PIC-specific version for the actual
140 * hardware disable after having gotten the irq
141 * controller lock.
142 */
144 /*
145 * do_IRQ handles all normal device IRQ's (the special
146 * SMP cross-CPU interrupts have their own specific
147 * handlers).
148 */
149 fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
150 {
151 irq_desc_t *desc = irq_desc + irq;
152 struct irqaction * action;
153 unsigned int status;
155 if (likely(desc->status & IRQ_PER_CPU)) {
156 /*
157 * No locking required for CPU-local interrupts:
158 */
159 desc->handler->ack(irq);
160 local_irq_enable();
161 desc->action->handler(irq, desc->action->dev_id, regs);
162 local_irq_disable();
163 desc->handler->end(irq);
164 return 1;
165 }
167 spin_lock(&desc->lock);
169 if (desc->status & IRQ_GUEST) {
170 /* __do_IRQ_guest(irq); */
171 vcpu_pend_interrupt(dom0->vcpu[0],irq);
172 vcpu_wake(dom0->vcpu[0]);
173 spin_unlock(&desc->lock);
174 return 1;
175 }
177 desc->handler->ack(irq);
178 status = desc->status & ~IRQ_REPLAY;
179 status |= IRQ_PENDING; /* we _want_ to handle it */
181 /*
182 * If the IRQ is disabled for whatever reason, we cannot
183 * use the action we have.
184 */
185 action = NULL;
186 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
187 action = desc->action;
188 status &= ~IRQ_PENDING; /* we commit to handling */
189 status |= IRQ_INPROGRESS; /* we are handling it */
190 }
191 desc->status = status;
193 /*
194 * If there is no IRQ handler or it was disabled, exit early.
195 * Since we set PENDING, if another processor is handling
196 * a different instance of this same irq, the other processor
197 * will take care of it.
198 */
199 if (unlikely(!action))
200 goto out;
202 /*
203 * Edge triggered interrupts need to remember
204 * pending events.
205 * This applies to any hw interrupts that allow a second
206 * instance of the same irq to arrive while we are in do_IRQ
207 * or in the handler. But the code here only handles the _second_
208 * instance of the irq, not the third or fourth. So it is mostly
209 * useful for irq hardware that does not mask cleanly in an
210 * SMP environment.
211 */
212 for (;;) {
213 spin_unlock_irq(&desc->lock);
214 action->handler(irq, action->dev_id, regs);
215 spin_lock_irq(&desc->lock);
217 if (likely(!(desc->status & IRQ_PENDING)))
218 break;
220 desc->status &= ~IRQ_PENDING;
221 }
222 desc->status &= ~IRQ_INPROGRESS;
224 out:
225 /*
226 * The ->end() handler has to deal with interrupts which got
227 * disabled while the handler was running.
228 */
229 desc->handler->end(irq);
230 spin_unlock(&desc->lock);
232 return 1;
233 }
235 /**
236 * request_irq - allocate an interrupt line
237 * @irq: Interrupt line to allocate
238 * @handler: Function to be called when the IRQ occurs
239 * @irqflags: Interrupt type flags
240 * @devname: An ascii name for the claiming device
241 * @dev_id: A cookie passed back to the handler function
242 *
243 * This call allocates interrupt resources and enables the
244 * interrupt line and IRQ handling. From the point this
245 * call is made your handler function may be invoked. Since
246 * your handler function must clear any interrupt the board
247 * raises, you must take care both to initialise your hardware
248 * and to set up the interrupt handler in the right order.
249 *
250 * Dev_id must be globally unique. Normally the address of the
251 * device data structure is used as the cookie. Since the handler
252 * receives this value it makes sense to use it.
253 *
254 * If your interrupt is shared you must pass a non NULL dev_id
255 * as this is required when freeing the interrupt.
256 *
257 * Flags:
258 *
259 * SA_SHIRQ Interrupt is shared
260 *
261 * SA_INTERRUPT Disable local interrupts while processing
262 *
263 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
264 *
265 */
267 int request_irq(unsigned int irq,
268 irqreturn_t (*handler)(int, void *, struct pt_regs *),
269 unsigned long irqflags,
270 const char * devname,
271 void *dev_id)
272 {
273 int retval;
274 struct irqaction * action;
276 /*
277 * Sanity-check: shared interrupts should REALLY pass in
278 * a real dev-ID, otherwise we'll have trouble later trying
279 * to figure out which interrupt is which (messes up the
280 * interrupt freeing logic etc).
281 */
282 if (irqflags & SA_SHIRQ) {
283 if (!dev_id)
284 printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
285 }
287 if (irq >= NR_IRQS)
288 return -EINVAL;
289 if (!handler)
290 return -EINVAL;
292 action = xmalloc(struct irqaction);
293 if (!action)
294 return -ENOMEM;
296 action->handler = (void *) handler;
297 action->name = devname;
298 action->dev_id = dev_id;
300 retval = setup_irq(irq, action);
301 if (retval)
302 xfree(action);
303 return retval;
304 }
306 EXPORT_SYMBOL(request_irq);
308 /**
309 * free_irq - free an interrupt
310 * @irq: Interrupt line to free
311 * @dev_id: Device identity to free
312 *
313 * Remove an interrupt handler. The handler is removed and if the
314 * interrupt line is no longer in use by any driver it is disabled.
315 * On a shared IRQ the caller must ensure the interrupt is disabled
316 * on the card it drives before calling this function. The function
317 * does not return until any executing interrupts for this IRQ
318 * have completed.
319 *
320 * This function must not be called from interrupt context.
321 */
323 void free_irq(unsigned int irq)
324 {
325 irq_desc_t *desc;
326 unsigned long flags;
328 if (irq >= NR_IRQS)
329 return;
331 desc = irq_descp(irq);
332 spin_lock_irqsave(&desc->lock,flags);
333 if (desc->action) {
334 struct irqaction * action = desc->action;
335 desc->action = NULL;
336 desc->status |= IRQ_DISABLED;
337 desc->handler->shutdown(irq);
338 spin_unlock_irqrestore(&desc->lock,flags);
340 /* Wait to make sure it's not being used on another CPU */
341 synchronize_irq(irq);
342 xfree(action);
343 return;
344 }
345 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
346 spin_unlock_irqrestore(&desc->lock,flags);
347 }
349 EXPORT_SYMBOL(free_irq);
351 /*
352 * IRQ autodetection code..
353 *
354 * This depends on the fact that any interrupt that
355 * comes in on to an unassigned handler will get stuck
356 * with "IRQ_WAITING" cleared and the interrupt
357 * disabled.
358 */
360 int setup_irq(unsigned int irq, struct irqaction * new)
361 {
362 unsigned long flags;
363 struct irqaction *old, **p;
364 irq_desc_t *desc = irq_descp(irq);
366 /*
367 * The following block of code has to be executed atomically
368 */
369 spin_lock_irqsave(&desc->lock,flags);
370 p = &desc->action;
371 if ((old = *p) != NULL) {
372 spin_unlock_irqrestore(&desc->lock,flags);
373 return -EBUSY;
374 }
376 *p = new;
378 desc->depth = 0;
379 desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_GUEST);
380 desc->handler->startup(irq);
381 spin_unlock_irqrestore(&desc->lock,flags);
383 return 0;
384 }
386 /*
387 * HANDLING OF GUEST-BOUND PHYSICAL IRQS
388 */
390 #define IRQ_MAX_GUESTS 7
391 typedef struct {
392 u8 nr_guests;
393 u8 in_flight;
394 u8 shareable;
395 struct domain *guest[IRQ_MAX_GUESTS];
396 } irq_guest_action_t;
398 void __do_IRQ_guest(int irq)
399 {
400 irq_desc_t *desc = &irq_desc[irq];
401 irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
402 struct domain *d;
403 int i;
405 for ( i = 0; i < action->nr_guests; i++ )
406 {
407 d = action->guest[i];
408 if ( !test_and_set_bit(irq, &d->pirq_mask) )
409 action->in_flight++;
410 send_guest_pirq(d, irq);
411 }
412 }
414 int pirq_guest_unmask(struct domain *d)
415 {
416 irq_desc_t *desc;
417 int pirq;
418 shared_info_t *s = d->shared_info;
420 for ( pirq = find_first_bit(d->pirq_mask, NR_PIRQS);
421 pirq < NR_PIRQS;
422 pirq = find_next_bit(d->pirq_mask, NR_PIRQS, pirq+1) )
423 {
424 desc = &irq_desc[pirq];
425 spin_lock_irq(&desc->lock);
426 if ( !test_bit(d->pirq_to_evtchn[pirq], &s->evtchn_mask[0]) &&
427 test_and_clear_bit(pirq, &d->pirq_mask) &&
428 (--((irq_guest_action_t *)desc->action)->in_flight == 0) )
429 desc->handler->end(pirq);
430 spin_unlock_irq(&desc->lock);
431 }
433 return 0;
434 }
436 int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
437 {
438 irq_desc_t *desc = &irq_desc[irq];
439 irq_guest_action_t *action;
440 unsigned long flags;
441 int rc = 0;
443 spin_lock_irqsave(&desc->lock, flags);
445 action = (irq_guest_action_t *)desc->action;
447 if ( !(desc->status & IRQ_GUEST) )
448 {
449 if ( desc->action != NULL )
450 {
451 DPRINTK("Cannot bind IRQ %d to guest. In use by '%s'.\n",
452 irq, desc->action->name);
453 rc = -EBUSY;
454 goto out;
455 }
457 action = xmalloc(irq_guest_action_t);
458 if ( (desc->action = (struct irqaction *)action) == NULL )
459 {
460 DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
461 rc = -ENOMEM;
462 goto out;
463 }
465 action->nr_guests = 0;
466 action->in_flight = 0;
467 action->shareable = will_share;
469 desc->depth = 0;
470 desc->status |= IRQ_GUEST;
471 desc->status &= ~IRQ_DISABLED;
472 desc->handler->startup(irq);
474 /* Attempt to bind the interrupt target to the correct CPU. */
475 #if 0 /* FIXME CONFIG_SMP ??? */
476 if ( desc->handler->set_affinity != NULL )
477 desc->handler->set_affinity(
478 irq, apicid_to_phys_cpu_present(d->processor));
479 #endif
480 }
481 else if ( !will_share || !action->shareable )
482 {
483 DPRINTK("Cannot bind IRQ %d to guest. Will not share with others.\n",
484 irq);
485 rc = -EBUSY;
486 goto out;
487 }
489 if ( action->nr_guests == IRQ_MAX_GUESTS )
490 {
491 DPRINTK("Cannot bind IRQ %d to guest. Already at max share.\n", irq);
492 rc = -EBUSY;
493 goto out;
494 }
496 action->guest[action->nr_guests++] = v->domain;
498 out:
499 spin_unlock_irqrestore(&desc->lock, flags);
500 return rc;
501 }
503 int pirq_guest_unbind(struct domain *d, int irq)
504 {
505 irq_desc_t *desc = &irq_desc[irq];
506 irq_guest_action_t *action;
507 unsigned long flags;
508 int i;
510 spin_lock_irqsave(&desc->lock, flags);
512 action = (irq_guest_action_t *)desc->action;
514 if ( test_and_clear_bit(irq, &d->pirq_mask) &&
515 (--action->in_flight == 0) )
516 desc->handler->end(irq);
518 if ( action->nr_guests == 1 )
519 {
520 desc->action = NULL;
521 xfree(action);
522 desc->depth = 1;
523 desc->status |= IRQ_DISABLED;
524 desc->status &= ~IRQ_GUEST;
525 desc->handler->shutdown(irq);
526 }
527 else
528 {
529 i = 0;
530 while ( action->guest[i] != d )
531 i++;
532 memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
533 action->nr_guests--;
534 }
536 spin_unlock_irqrestore(&desc->lock, flags);
537 return 0;
538 }
540 void
541 xen_debug_irq(unsigned long vector, struct pt_regs *regs)
542 {
543 //FIXME: For debug only, can be removed
544 static char firstirq = 1;
545 static char firsttime[256];
546 static char firstpend[256];
547 if (firstirq) {
548 int i;
549 for (i=0;i<256;i++) firsttime[i] = 1;
550 for (i=0;i<256;i++) firstpend[i] = 1;
551 firstirq = 0;
552 }
553 if (firsttime[vector]) {
554 printf("**** (entry) First received int on vector=%lu,itc=%lx\n",
555 (unsigned long) vector, ia64_get_itc());
556 firsttime[vector] = 0;
557 }
558 }
560 /*
561 * Exit an interrupt context. Process softirqs if needed and possible:
562 */
563 void irq_exit(void)
564 {
565 sub_preempt_count(IRQ_EXIT_OFFSET);
566 }
568 /*
569 * ONLY gets called from ia64_leave_kernel
570 * ONLY call with interrupts enabled
571 */
572 void process_soft_irq(void)
573 {
574 if (!in_interrupt() && local_softirq_pending()) {
575 add_preempt_count(SOFTIRQ_OFFSET);
576 do_softirq();
577 sub_preempt_count(SOFTIRQ_OFFSET);
578 }
579 }
581 // this is a temporary hack until real console input is implemented
582 extern void domain_pend_keyboard_interrupt(int irq);
583 irqreturn_t guest_forward_keyboard_input(int irq, void *nada, struct pt_regs *regs)
584 {
585 domain_pend_keyboard_interrupt(irq);
586 return 0;
587 }
589 void serial_input_init(void)
590 {
591 int retval;
592 int irq = 0x30; // FIXME
594 retval = request_irq(irq,guest_forward_keyboard_input,SA_INTERRUPT,"siminput",NULL);
595 if (retval) {
596 printk("serial_input_init: broken request_irq call\n");
597 while(1);
598 }
599 }