ia64/xen-unstable

view linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c @ 10692:306d7857928c

[IA64] Save & restore.

xc_ia64_linux_save.c and xc_ia64_linux_restore.c added.
vcpu context has more registers and states (eg: tr registers).
Per cpu irqs are deallocated when cpu is switched off.
#if/#endif added in reboot.c for ia64.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Jul 11 12:51:18 2006 -0600 (2006-07-11)
parents a5bf90abcbe8
children 4ebb9c91c886
line source
1 /*
2 * linux/arch/ia64/kernel/irq.c
3 *
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
10 *
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
15 */
17 #include <linux/config.h>
18 #include <linux/module.h>
20 #include <linux/jiffies.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/slab.h>
27 #include <linux/ptrace.h>
28 #include <linux/random.h> /* for rand_initialize_irq() */
29 #include <linux/signal.h>
30 #include <linux/smp.h>
31 #include <linux/smp_lock.h>
32 #include <linux/threads.h>
33 #include <linux/bitops.h>
34 #ifdef CONFIG_XEN
35 #include <linux/cpu.h>
36 #endif
38 #include <asm/delay.h>
39 #include <asm/intrinsics.h>
40 #include <asm/io.h>
41 #include <asm/hw_irq.h>
42 #include <asm/machvec.h>
43 #include <asm/pgtable.h>
44 #include <asm/system.h>
46 #ifdef CONFIG_PERFMON
47 # include <asm/perfmon.h>
48 #endif
50 #define IRQ_DEBUG 0
52 /* default base addr of IPI table */
53 void __iomem *ipi_base_addr = ((void __iomem *)
54 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
56 /*
57 * Legacy IRQ to IA-64 vector translation table.
58 */
59 __u8 isa_irq_to_vector_map[16] = {
60 /* 8259 IRQ translation, first 16 entries */
61 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
62 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
63 };
64 EXPORT_SYMBOL(isa_irq_to_vector_map);
66 static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
68 int
69 assign_irq_vector (int irq)
70 {
71 int pos, vector;
73 #ifdef CONFIG_XEN
74 if (is_running_on_xen()) {
75 extern int xen_assign_irq_vector(int);
76 return xen_assign_irq_vector(irq);
77 }
78 #endif
79 again:
80 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
81 vector = IA64_FIRST_DEVICE_VECTOR + pos;
82 if (vector > IA64_LAST_DEVICE_VECTOR)
83 return -ENOSPC;
84 if (test_and_set_bit(pos, ia64_vector_mask))
85 goto again;
86 return vector;
87 }
89 void
90 free_irq_vector (int vector)
91 {
92 int pos;
94 if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
95 return;
97 pos = vector - IA64_FIRST_DEVICE_VECTOR;
98 if (!test_and_clear_bit(pos, ia64_vector_mask))
99 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
100 }
102 #ifdef CONFIG_SMP
103 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
104 #else
105 # define IS_RESCHEDULE(vec) (0)
106 #endif
107 /*
108 * That's where the IVT branches when we get an external
109 * interrupt. This branches to the correct hardware IRQ handler via
110 * function ptr.
111 */
112 void
113 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
114 {
115 unsigned long saved_tpr;
117 #if IRQ_DEBUG
118 {
119 unsigned long bsp, sp;
121 /*
122 * Note: if the interrupt happened while executing in
123 * the context switch routine (ia64_switch_to), we may
124 * get a spurious stack overflow here. This is
125 * because the register and the memory stack are not
126 * switched atomically.
127 */
128 bsp = ia64_getreg(_IA64_REG_AR_BSP);
129 sp = ia64_getreg(_IA64_REG_SP);
131 if ((sp - bsp) < 1024) {
132 static unsigned char count;
133 static long last_time;
135 if (jiffies - last_time > 5*HZ)
136 count = 0;
137 if (++count < 5) {
138 last_time = jiffies;
139 printk("ia64_handle_irq: DANGER: less than "
140 "1KB of free stack space!!\n"
141 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
142 }
143 }
144 }
145 #endif /* IRQ_DEBUG */
147 /*
148 * Always set TPR to limit maximum interrupt nesting depth to
149 * 16 (without this, it would be ~240, which could easily lead
150 * to kernel stack overflows).
151 */
152 irq_enter();
153 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
154 ia64_srlz_d();
155 while (vector != IA64_SPURIOUS_INT_VECTOR) {
156 if (!IS_RESCHEDULE(vector)) {
157 ia64_setreg(_IA64_REG_CR_TPR, vector);
158 ia64_srlz_d();
160 __do_IRQ(local_vector_to_irq(vector), regs);
162 /*
163 * Disable interrupts and send EOI:
164 */
165 local_irq_disable();
166 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
167 }
168 ia64_eoi();
169 vector = ia64_get_ivr();
170 }
171 /*
172 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
173 * handler needs to be able to wait for further keyboard interrupts, which can't
174 * come through until ia64_eoi() has been done.
175 */
176 irq_exit();
177 }
179 #ifdef CONFIG_HOTPLUG_CPU
180 /*
181 * This function emulates a interrupt processing when a cpu is about to be
182 * brought down.
183 */
184 void ia64_process_pending_intr(void)
185 {
186 ia64_vector vector;
187 unsigned long saved_tpr;
188 extern unsigned int vectors_in_migration[NR_IRQS];
190 vector = ia64_get_ivr();
192 irq_enter();
193 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
194 ia64_srlz_d();
196 /*
197 * Perform normal interrupt style processing
198 */
199 while (vector != IA64_SPURIOUS_INT_VECTOR) {
200 if (!IS_RESCHEDULE(vector)) {
201 ia64_setreg(_IA64_REG_CR_TPR, vector);
202 ia64_srlz_d();
204 /*
205 * Now try calling normal ia64_handle_irq as it would have got called
206 * from a real intr handler. Try passing null for pt_regs, hopefully
207 * it will work. I hope it works!.
208 * Probably could shared code.
209 */
210 vectors_in_migration[local_vector_to_irq(vector)]=0;
211 __do_IRQ(local_vector_to_irq(vector), NULL);
213 /*
214 * Disable interrupts and send EOI
215 */
216 local_irq_disable();
217 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
218 }
219 ia64_eoi();
220 vector = ia64_get_ivr();
221 }
222 irq_exit();
223 }
224 #endif
227 #ifdef CONFIG_SMP
228 extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
230 static struct irqaction ipi_irqaction = {
231 .handler = handle_IPI,
232 .flags = SA_INTERRUPT,
233 .name = "IPI"
234 };
235 #endif
237 #ifdef CONFIG_XEN
238 #include <xen/evtchn.h>
239 #include <xen/interface/callback.h>
241 static DEFINE_PER_CPU(int, timer_irq) = -1;
242 static DEFINE_PER_CPU(int, ipi_irq) = -1;
243 static DEFINE_PER_CPU(int, resched_irq) = -1;
244 static char timer_name[NR_CPUS][15];
245 static char ipi_name[NR_CPUS][15];
246 static char resched_name[NR_CPUS][15];
248 struct saved_irq {
249 unsigned int irq;
250 struct irqaction *action;
251 };
252 /* 16 should be far optimistic value, since only several percpu irqs
253 * are registered early.
254 */
255 #define MAX_LATE_IRQ 16
256 static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
257 static unsigned short late_irq_cnt = 0;
258 static unsigned short saved_irq_cnt = 0;
259 static int xen_slab_ready = 0;
261 /* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
262 * it ends up to issue several memory accesses upon percpu data and
263 * thus adds unnecessary traffic to other paths.
264 */
265 static irqreturn_t
266 handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
267 {
269 return IRQ_HANDLED;
270 }
272 static struct irqaction resched_irqaction = {
273 .handler = handle_reschedule,
274 .flags = SA_INTERRUPT,
275 .name = "RESCHED"
276 };
278 /*
279 * This is xen version percpu irq registration, which needs bind
280 * to xen specific evtchn sub-system. One trick here is that xen
281 * evtchn binding interface depends on kmalloc because related
282 * port needs to be freed at device/cpu down. So we cache the
283 * registration on BSP before slab is ready and then deal them
284 * at later point. For rest instances happening after slab ready,
285 * we hook them to xen evtchn immediately.
286 *
287 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
288 * required.
289 */
290 static void
291 xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
292 {
293 unsigned int cpu = smp_processor_id();
294 int ret = 0;
296 if (xen_slab_ready) {
297 switch (irq) {
298 case IA64_TIMER_VECTOR:
299 sprintf(timer_name[cpu], "%s%d", action->name, cpu);
300 ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
301 action->handler, action->flags,
302 timer_name[cpu], action->dev_id);
303 per_cpu(timer_irq,cpu) = ret;
304 printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret);
305 break;
306 case IA64_IPI_RESCHEDULE:
307 sprintf(resched_name[cpu], "%s%d", action->name, cpu);
308 ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
309 action->handler, action->flags,
310 resched_name[cpu], action->dev_id);
311 per_cpu(resched_irq,cpu) = ret;
312 printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret);
313 break;
314 case IA64_IPI_VECTOR:
315 sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
316 ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
317 action->handler, action->flags,
318 ipi_name[cpu], action->dev_id);
319 per_cpu(ipi_irq,cpu) = ret;
320 printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret);
321 break;
322 case IA64_SPURIOUS_INT_VECTOR:
323 break;
324 default:
325 printk(KERN_WARNING "Percpu irq %d is unsupported by xen!\n", irq);
326 break;
327 }
328 BUG_ON(ret < 0);
329 }
331 /* For BSP, we cache registered percpu irqs, and then re-walk
332 * them when initializing APs
333 */
334 if (!cpu && save) {
335 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
336 saved_percpu_irqs[saved_irq_cnt].irq = irq;
337 saved_percpu_irqs[saved_irq_cnt].action = action;
338 saved_irq_cnt++;
339 if (!xen_slab_ready)
340 late_irq_cnt++;
341 }
342 }
344 static void
345 xen_bind_early_percpu_irq (void)
346 {
347 int i;
349 xen_slab_ready = 1;
350 /* There's no race when accessing this cached array, since only
351 * BSP will face with such step shortly
352 */
353 for (i = 0; i < late_irq_cnt; i++)
354 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
355 saved_percpu_irqs[i].action, 0);
356 }
358 /* FIXME: There's no obvious point to check whether slab is ready. So
359 * a hack is used here by utilizing a late time hook.
360 */
361 extern void (*late_time_init)(void);
362 extern char xen_event_callback;
363 extern void xen_init_IRQ(void);
365 #ifdef CONFIG_HOTPLUG_CPU
366 static int __devinit
367 unbind_evtchn_callback(struct notifier_block *nfb,
368 unsigned long action, void *hcpu)
369 {
370 unsigned int cpu = (unsigned long)hcpu;
372 if (action == CPU_DEAD) {
373 /* Unregister evtchn. */
374 if (per_cpu(ipi_irq,cpu) >= 0) {
375 unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
376 per_cpu(ipi_irq, cpu) = -1;
377 }
378 if (per_cpu(resched_irq,cpu) >= 0) {
379 unbind_from_irqhandler (per_cpu(resched_irq, cpu),
380 NULL);
381 per_cpu(resched_irq, cpu) = -1;
382 }
383 if (per_cpu(timer_irq,cpu) >= 0) {
384 unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL);
385 per_cpu(timer_irq, cpu) = -1;
386 }
387 }
388 return NOTIFY_OK;
389 }
391 static struct notifier_block unbind_evtchn_notifier = {
392 .notifier_call = unbind_evtchn_callback,
393 .priority = 0
394 };
395 #endif
397 DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
398 void xen_smp_intr_init(void)
399 {
400 #ifdef CONFIG_SMP
401 unsigned int cpu = smp_processor_id();
402 unsigned int i = 0;
403 struct callback_register event = {
404 .type = CALLBACKTYPE_event,
405 .address = (unsigned long)&xen_event_callback,
406 };
408 if (cpu == 0) {
409 /* Initialization was already done for boot cpu. */
410 #ifdef CONFIG_HOTPLUG_CPU
411 /* Register the notifier only once. */
412 register_cpu_notifier(&unbind_evtchn_notifier);
413 #endif
414 return;
415 }
417 /* This should be piggyback when setup vcpu guest context */
418 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
420 for (i = 0; i < saved_irq_cnt; i++)
421 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
422 saved_percpu_irqs[i].action, 0);
423 #endif /* CONFIG_SMP */
424 }
425 #endif /* CONFIG_XEN */
427 void
428 register_percpu_irq (ia64_vector vec, struct irqaction *action)
429 {
430 irq_desc_t *desc;
431 unsigned int irq;
433 #ifdef CONFIG_XEN
434 if (is_running_on_xen())
435 return xen_register_percpu_irq(vec, action, 1);
436 #endif
438 for (irq = 0; irq < NR_IRQS; ++irq)
439 if (irq_to_vector(irq) == vec) {
440 desc = irq_descp(irq);
441 desc->status |= IRQ_PER_CPU;
442 desc->handler = &irq_type_ia64_lsapic;
443 if (action)
444 setup_irq(irq, action);
445 }
446 }
448 void __init
449 init_IRQ (void)
450 {
451 #ifdef CONFIG_XEN
452 printk(KERN_INFO "init_IRQ called from %p\n",
453 __builtin_return_address (0));
454 /* Maybe put into platform_irq_init later */
455 if (is_running_on_xen()) {
456 struct callback_register event = {
457 .type = CALLBACKTYPE_event,
458 .address = (unsigned long)&xen_event_callback,
459 };
460 xen_init_IRQ();
461 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
462 late_time_init = xen_bind_early_percpu_irq;
463 #ifdef CONFIG_SMP
464 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
465 #endif /* CONFIG_SMP */
466 }
467 #endif /* CONFIG_XEN */
468 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
469 #ifdef CONFIG_SMP
470 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
471 #endif
472 #ifdef CONFIG_PERFMON
473 pfm_init_percpu();
474 #endif
475 platform_irq_init();
476 }
478 void
479 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
480 {
481 void __iomem *ipi_addr;
482 unsigned long ipi_data;
483 unsigned long phys_cpu_id;
485 #ifdef CONFIG_XEN
486 if (is_running_on_xen()) {
487 int irq = -1;
489 /* TODO: we need to call vcpu_up here */
490 if (unlikely(vector == ap_wakeup_vector)) {
491 extern void xen_send_ipi (int cpu, int vec);
492 xen_send_ipi (cpu, vector);
493 //vcpu_prepare_and_up(cpu);
494 return;
495 }
497 switch(vector) {
498 case IA64_IPI_VECTOR:
499 irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
500 break;
501 case IA64_IPI_RESCHEDULE:
502 irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
503 break;
504 default:
505 printk(KERN_WARNING"Unsupported IPI type 0x%x\n", vector);
506 irq = 0;
507 break;
508 }
510 BUG_ON(irq < 0);
511 notify_remote_via_irq(irq);
512 return;
513 }
514 #endif /* CONFIG_XEN */
516 #ifdef CONFIG_SMP
517 phys_cpu_id = cpu_physical_id(cpu);
518 #else
519 phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
520 #endif
522 /*
523 * cpu number is in 8bit ID and 8bit EID
524 */
526 ipi_data = (delivery_mode << 8) | (vector & 0xff);
527 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
529 writeq(ipi_data, ipi_addr);
530 }