direct-io.hg

view linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c @ 10414:a5bf90abcbe8

[IA64] fix garbage in irq_ia64.c kernel message

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Thu Jun 15 08:56:47 2006 -0600 (2006-06-15)
parents d8d2b5c08245
children 306d7857928c
line source
1 /*
2 * linux/arch/ia64/kernel/irq.c
3 *
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
10 *
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
15 */
17 #include <linux/config.h>
18 #include <linux/module.h>
20 #include <linux/jiffies.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/slab.h>
27 #include <linux/ptrace.h>
28 #include <linux/random.h> /* for rand_initialize_irq() */
29 #include <linux/signal.h>
30 #include <linux/smp.h>
31 #include <linux/smp_lock.h>
32 #include <linux/threads.h>
33 #include <linux/bitops.h>
35 #include <asm/delay.h>
36 #include <asm/intrinsics.h>
37 #include <asm/io.h>
38 #include <asm/hw_irq.h>
39 #include <asm/machvec.h>
40 #include <asm/pgtable.h>
41 #include <asm/system.h>
43 #ifdef CONFIG_PERFMON
44 # include <asm/perfmon.h>
45 #endif
47 #define IRQ_DEBUG 0
49 /* default base addr of IPI table */
50 void __iomem *ipi_base_addr = ((void __iomem *)
51 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
53 /*
54 * Legacy IRQ to IA-64 vector translation table.
55 */
56 __u8 isa_irq_to_vector_map[16] = {
57 /* 8259 IRQ translation, first 16 entries */
58 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
59 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
60 };
61 EXPORT_SYMBOL(isa_irq_to_vector_map);
63 static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
65 int
66 assign_irq_vector (int irq)
67 {
68 int pos, vector;
70 #ifdef CONFIG_XEN
71 if (is_running_on_xen()) {
72 extern int xen_assign_irq_vector(int);
73 return xen_assign_irq_vector(irq);
74 }
75 #endif
76 again:
77 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
78 vector = IA64_FIRST_DEVICE_VECTOR + pos;
79 if (vector > IA64_LAST_DEVICE_VECTOR)
80 return -ENOSPC;
81 if (test_and_set_bit(pos, ia64_vector_mask))
82 goto again;
83 return vector;
84 }
86 void
87 free_irq_vector (int vector)
88 {
89 int pos;
91 if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
92 return;
94 pos = vector - IA64_FIRST_DEVICE_VECTOR;
95 if (!test_and_clear_bit(pos, ia64_vector_mask))
96 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
97 }
99 #ifdef CONFIG_SMP
100 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
101 #else
102 # define IS_RESCHEDULE(vec) (0)
103 #endif
104 /*
105 * That's where the IVT branches when we get an external
106 * interrupt. This branches to the correct hardware IRQ handler via
107 * function ptr.
108 */
109 void
110 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
111 {
112 unsigned long saved_tpr;
114 #if IRQ_DEBUG
115 {
116 unsigned long bsp, sp;
118 /*
119 * Note: if the interrupt happened while executing in
120 * the context switch routine (ia64_switch_to), we may
121 * get a spurious stack overflow here. This is
122 * because the register and the memory stack are not
123 * switched atomically.
124 */
125 bsp = ia64_getreg(_IA64_REG_AR_BSP);
126 sp = ia64_getreg(_IA64_REG_SP);
128 if ((sp - bsp) < 1024) {
129 static unsigned char count;
130 static long last_time;
132 if (jiffies - last_time > 5*HZ)
133 count = 0;
134 if (++count < 5) {
135 last_time = jiffies;
136 printk("ia64_handle_irq: DANGER: less than "
137 "1KB of free stack space!!\n"
138 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
139 }
140 }
141 }
142 #endif /* IRQ_DEBUG */
144 /*
145 * Always set TPR to limit maximum interrupt nesting depth to
146 * 16 (without this, it would be ~240, which could easily lead
147 * to kernel stack overflows).
148 */
149 irq_enter();
150 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
151 ia64_srlz_d();
152 while (vector != IA64_SPURIOUS_INT_VECTOR) {
153 if (!IS_RESCHEDULE(vector)) {
154 ia64_setreg(_IA64_REG_CR_TPR, vector);
155 ia64_srlz_d();
157 __do_IRQ(local_vector_to_irq(vector), regs);
159 /*
160 * Disable interrupts and send EOI:
161 */
162 local_irq_disable();
163 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
164 }
165 ia64_eoi();
166 vector = ia64_get_ivr();
167 }
168 /*
169 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
170 * handler needs to be able to wait for further keyboard interrupts, which can't
171 * come through until ia64_eoi() has been done.
172 */
173 irq_exit();
174 }
176 #ifdef CONFIG_HOTPLUG_CPU
177 /*
178 * This function emulates a interrupt processing when a cpu is about to be
179 * brought down.
180 */
181 void ia64_process_pending_intr(void)
182 {
183 ia64_vector vector;
184 unsigned long saved_tpr;
185 extern unsigned int vectors_in_migration[NR_IRQS];
187 vector = ia64_get_ivr();
189 irq_enter();
190 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
191 ia64_srlz_d();
193 /*
194 * Perform normal interrupt style processing
195 */
196 while (vector != IA64_SPURIOUS_INT_VECTOR) {
197 if (!IS_RESCHEDULE(vector)) {
198 ia64_setreg(_IA64_REG_CR_TPR, vector);
199 ia64_srlz_d();
201 /*
202 * Now try calling normal ia64_handle_irq as it would have got called
203 * from a real intr handler. Try passing null for pt_regs, hopefully
204 * it will work. I hope it works!.
205 * Probably could shared code.
206 */
207 vectors_in_migration[local_vector_to_irq(vector)]=0;
208 __do_IRQ(local_vector_to_irq(vector), NULL);
210 /*
211 * Disable interrupts and send EOI
212 */
213 local_irq_disable();
214 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
215 }
216 ia64_eoi();
217 vector = ia64_get_ivr();
218 }
219 irq_exit();
220 }
221 #endif
224 #ifdef CONFIG_SMP
225 extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
227 static struct irqaction ipi_irqaction = {
228 .handler = handle_IPI,
229 .flags = SA_INTERRUPT,
230 .name = "IPI"
231 };
232 #endif
234 #ifdef CONFIG_XEN
235 #include <xen/evtchn.h>
236 #include <xen/interface/callback.h>
238 static char timer_name[NR_CPUS][15];
239 static char ipi_name[NR_CPUS][15];
240 static char resched_name[NR_CPUS][15];
242 struct saved_irq {
243 unsigned int irq;
244 struct irqaction *action;
245 };
246 /* 16 should be far optimistic value, since only several percpu irqs
247 * are registered early.
248 */
249 #define MAX_LATE_IRQ 16
250 static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
251 static unsigned short late_irq_cnt = 0;
252 static unsigned short saved_irq_cnt = 0;
253 static int xen_slab_ready = 0;
255 /* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
256 * it ends up to issue several memory accesses upon percpu data and
257 * thus adds unnecessary traffic to other paths.
258 */
259 static irqreturn_t
260 handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
261 {
263 return IRQ_HANDLED;
264 }
266 static struct irqaction resched_irqaction = {
267 .handler = handle_reschedule,
268 .flags = SA_INTERRUPT,
269 .name = "RESCHED"
270 };
272 /*
273 * This is xen version percpu irq registration, which needs bind
274 * to xen specific evtchn sub-system. One trick here is that xen
275 * evtchn binding interface depends on kmalloc because related
276 * port needs to be freed at device/cpu down. So we cache the
277 * registration on BSP before slab is ready and then deal them
278 * at later point. For rest instances happening after slab ready,
279 * we hook them to xen evtchn immediately.
280 *
281 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
282 * required.
283 */
284 static void
285 xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
286 {
287 unsigned int cpu = smp_processor_id();
288 int ret = 0;
290 if (xen_slab_ready) {
291 switch (irq) {
292 case IA64_TIMER_VECTOR:
293 sprintf(timer_name[cpu], "%s%d", action->name, cpu);
294 ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
295 action->handler, action->flags,
296 timer_name[cpu], action->dev_id);
297 printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret);
298 break;
299 case IA64_IPI_RESCHEDULE:
300 sprintf(resched_name[cpu], "%s%d", action->name, cpu);
301 ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
302 action->handler, action->flags,
303 resched_name[cpu], action->dev_id);
304 printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret);
305 break;
306 case IA64_IPI_VECTOR:
307 sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
308 ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
309 action->handler, action->flags,
310 ipi_name[cpu], action->dev_id);
311 printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret);
312 break;
313 case IA64_SPURIOUS_INT_VECTOR:
314 break;
315 default:
316 printk(KERN_WARNING "Percpu irq %d is unsupported by xen!\n", irq);
317 break;
318 }
319 BUG_ON(ret < 0);
320 }
322 /* For BSP, we cache registered percpu irqs, and then re-walk
323 * them when initializing APs
324 */
325 if (!cpu && save) {
326 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
327 saved_percpu_irqs[saved_irq_cnt].irq = irq;
328 saved_percpu_irqs[saved_irq_cnt].action = action;
329 saved_irq_cnt++;
330 if (!xen_slab_ready)
331 late_irq_cnt++;
332 }
333 }
335 static void
336 xen_bind_early_percpu_irq (void)
337 {
338 int i;
340 xen_slab_ready = 1;
341 /* There's no race when accessing this cached array, since only
342 * BSP will face with such step shortly
343 */
344 for (i = 0; i < late_irq_cnt; i++)
345 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
346 saved_percpu_irqs[i].action, 0);
347 }
349 /* FIXME: There's no obvious point to check whether slab is ready. So
350 * a hack is used here by utilizing a late time hook.
351 */
352 extern void (*late_time_init)(void);
353 extern char xen_event_callback;
354 extern void xen_init_IRQ(void);
356 DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
357 void xen_smp_intr_init(void)
358 {
359 #ifdef CONFIG_SMP
360 unsigned int cpu = smp_processor_id();
361 unsigned int i = 0;
362 struct callback_register event = {
363 .type = CALLBACKTYPE_event,
364 .address = (unsigned long)&xen_event_callback,
365 };
366 static cpumask_t registered_cpumask;
368 if (!cpu)
369 return;
371 /* This should be piggyback when setup vcpu guest context */
372 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
374 if (!cpu_isset(cpu, registered_cpumask)) {
375 cpu_set(cpu, registered_cpumask);
376 for (i = 0; i < saved_irq_cnt; i++)
377 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
378 saved_percpu_irqs[i].action,
379 0);
380 }
381 #endif /* CONFIG_SMP */
382 }
383 #endif /* CONFIG_XEN */
385 void
386 register_percpu_irq (ia64_vector vec, struct irqaction *action)
387 {
388 irq_desc_t *desc;
389 unsigned int irq;
391 for (irq = 0; irq < NR_IRQS; ++irq)
392 if (irq_to_vector(irq) == vec) {
393 #ifdef CONFIG_XEN
394 if (is_running_on_xen())
395 return xen_register_percpu_irq(vec, action, 1);
396 #endif
397 desc = irq_descp(irq);
398 desc->status |= IRQ_PER_CPU;
399 desc->handler = &irq_type_ia64_lsapic;
400 if (action)
401 setup_irq(irq, action);
402 }
403 }
405 void __init
406 init_IRQ (void)
407 {
408 #ifdef CONFIG_XEN
409 /* Maybe put into platform_irq_init later */
410 if (is_running_on_xen()) {
411 struct callback_register event = {
412 .type = CALLBACKTYPE_event,
413 .address = (unsigned long)&xen_event_callback,
414 };
415 xen_init_IRQ();
416 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
417 late_time_init = xen_bind_early_percpu_irq;
418 #ifdef CONFIG_SMP
419 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
420 #endif /* CONFIG_SMP */
421 }
422 #endif /* CONFIG_XEN */
423 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
424 #ifdef CONFIG_SMP
425 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
426 #endif
427 #ifdef CONFIG_PERFMON
428 pfm_init_percpu();
429 #endif
430 platform_irq_init();
431 }
433 void
434 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
435 {
436 void __iomem *ipi_addr;
437 unsigned long ipi_data;
438 unsigned long phys_cpu_id;
440 #ifdef CONFIG_XEN
441 if (is_running_on_xen()) {
442 int irq = -1;
444 /* TODO: we need to call vcpu_up here */
445 if (unlikely(vector == ap_wakeup_vector)) {
446 extern void xen_send_ipi (int cpu, int vec);
447 xen_send_ipi (cpu, vector);
448 //vcpu_prepare_and_up(cpu);
449 return;
450 }
452 switch(vector) {
453 case IA64_IPI_VECTOR:
454 irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
455 break;
456 case IA64_IPI_RESCHEDULE:
457 irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
458 break;
459 default:
460 printk(KERN_WARNING"Unsupported IPI type 0x%x\n", vector);
461 irq = 0;
462 break;
463 }
465 BUG_ON(irq < 0);
466 notify_remote_via_irq(irq);
467 return;
468 }
469 #endif /* CONFIG_XEN */
471 #ifdef CONFIG_SMP
472 phys_cpu_id = cpu_physical_id(cpu);
473 #else
474 phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
475 #endif
477 /*
478 * cpu number is in 8bit ID and 8bit EID
479 */
481 ipi_data = (delivery_mode << 8) | (vector & 0xff);
482 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
484 writeq(ipi_data, ipi_addr);
485 }