ia64/xen-unstable

view xen/arch/ia64/linux-xen/mca.c @ 19206:b432c632ebe8

[IA64] remove a warning.

This patch remove the following warning.

mca.c:1928: warning: unused variable 'irq'

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Fri Feb 13 19:11:38 2009 +0900 (2009-02-13)
parents c7cba853583d
children c4c4ba857d8b
line source
1 /*
2 * File: mca.c
3 * Purpose: Generic MCA handling layer
4 *
5 * Updated for latest kernel
6 * Copyright (C) 2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * Copyright (C) 2002 Dell Inc.
10 * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
11 *
12 * Copyright (C) 2002 Intel
13 * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
14 *
15 * Copyright (C) 2001 Intel
16 * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
17 *
18 * Copyright (C) 2000 Intel
19 * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
20 *
21 * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
22 * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
23 *
24 * 03/04/15 D. Mosberger Added INIT backtrace support.
25 * 02/03/25 M. Domsch GUID cleanups
26 *
27 * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
28 * error flag, set SAL default return values, changed
29 * error record structure to linked list, added init call
30 * to sal_get_state_info_size().
31 *
32 * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected
33 * platform errors, completed code for logging of
34 * corrected & uncorrected machine check errors, and
35 * updated for conformance with Nov. 2000 revision of the
36 * SAL 3.0 spec.
37 * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
38 * added min save state dump, added INIT handler.
39 *
40 * 2003-12-08 Keith Owens <kaos@sgi.com>
41 * smp_call_function() must not be called from interrupt context (can
42 * deadlock on tasklist_lock). Use keventd to call smp_call_function().
43 *
44 * 2004-02-01 Keith Owens <kaos@sgi.com>
45 * Avoid deadlock when using printk() for MCA and INIT records.
46 * Delete all record printing code, moved to salinfo_decode in user space.
47 * Mark variables and functions static where possible.
48 * Delete dead variables and functions.
49 * Reorder to remove the need for forward declarations and to consolidate
50 * related code.
51 */
52 #include <linux/config.h>
53 #include <linux/types.h>
54 #include <linux/init.h>
55 #include <linux/sched.h>
56 #include <linux/interrupt.h>
57 #include <linux/irq.h>
58 #include <linux/kallsyms.h>
59 #include <linux/smp_lock.h>
60 #include <linux/bootmem.h>
61 #include <linux/acpi.h>
62 #include <linux/timer.h>
63 #include <linux/module.h>
64 #include <linux/kernel.h>
65 #include <linux/smp.h>
66 #include <linux/workqueue.h>
68 #include <asm/delay.h>
69 #include <asm/machvec.h>
70 #include <asm/meminit.h>
71 #include <asm/page.h>
72 #include <asm/ptrace.h>
73 #include <asm/system.h>
74 #include <asm/sal.h>
75 #include <asm/mca.h>
77 #include <asm/irq.h>
78 #include <asm/hw_irq.h>
80 #ifdef XEN
81 #include <xen/symbols.h>
82 #include <xen/mm.h>
83 #include <xen/console.h>
84 #include <xen/event.h>
85 #include <xen/softirq.h>
86 #include <asm/xenmca.h>
87 #include <linux/shutdown.h>
88 #endif
90 #if defined(IA64_MCA_DEBUG_INFO)
91 # define IA64_MCA_DEBUG(fmt...) printk(fmt)
92 #else
93 # define IA64_MCA_DEBUG(fmt...)
94 #endif
96 /* Used by mca_asm.S */
97 #ifndef XEN
98 ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state;
99 #else
100 ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state[NR_CPUS];
101 DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr);
102 #endif
103 ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state;
104 u64 ia64_mca_serialize;
105 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
106 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
107 DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
108 DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
110 unsigned long __per_cpu_mca[NR_CPUS];
112 /* In mca_asm.S */
113 extern void ia64_monarch_init_handler (void);
114 extern void ia64_slave_init_handler (void);
115 #ifdef XEN
116 extern void setup_vector (unsigned int vec, struct irqaction *action);
117 #endif
119 static ia64_mc_info_t ia64_mc_info;
121 #ifdef XEN
122 #define jiffies NOW()
123 #undef HZ
124 #define HZ 1000000000UL
125 #endif
127 #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
128 #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
129 #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
130 #define CPE_HISTORY_LENGTH 5
131 #define CMC_HISTORY_LENGTH 5
133 #ifndef XEN
134 static struct timer_list cpe_poll_timer;
135 static struct timer_list cmc_poll_timer;
136 #else
137 #define mod_timer(timer, expires) set_timer(timer, expires)
138 static struct timer cpe_poll_timer;
139 static struct timer cmc_poll_timer;
140 #endif
141 /*
142 * This variable tells whether we are currently in polling mode.
143 * Start with this in the wrong state so we won't play w/ timers
144 * before the system is ready.
145 */
146 static int cmc_polling_enabled = 1;
148 /*
149 * Clearing this variable prevents CPE polling from getting activated
150 * in mca_late_init. Use it if your system doesn't provide a CPEI,
151 * but encounters problems retrieving CPE logs. This should only be
152 * necessary for debugging.
153 */
154 static int cpe_poll_enabled = 1;
156 extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
158 static int mca_init;
160 /*
161 * IA64_MCA log support
162 */
163 #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
164 #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
166 typedef struct ia64_state_log_s
167 {
168 spinlock_t isl_lock;
169 int isl_index;
170 unsigned long isl_count;
171 ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
172 } ia64_state_log_t;
174 static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
176 #ifndef XEN
177 #define IA64_LOG_ALLOCATE(it, size) \
178 {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
179 (ia64_err_rec_t *)alloc_bootmem(size); \
180 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
181 (ia64_err_rec_t *)alloc_bootmem(size);}
182 #else
183 #define IA64_LOG_ALLOCATE(it, size) \
184 do { \
185 unsigned int pageorder; \
186 struct page_info *page; \
187 pageorder = get_order_from_bytes(size); \
188 page = alloc_domheap_pages(NULL, pageorder, 0); \
189 ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
190 page? (ia64_err_rec_t *)page_to_virt(page): NULL; \
191 page = alloc_domheap_pages(NULL, pageorder, 0); \
192 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
193 page? (ia64_err_rec_t *)page_to_virt(page): NULL; \
194 } while(0)
195 #endif
197 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
198 #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
199 #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
200 #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
201 #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
202 #define IA64_LOG_INDEX_INC(it) \
203 {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
204 ia64_state_log[it].isl_count++;}
205 #define IA64_LOG_INDEX_DEC(it) \
206 ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
207 #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
208 #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
209 #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
211 #ifdef XEN
212 sal_queue_entry_t sal_entry[NR_CPUS][IA64_MAX_LOG_TYPES];
213 struct list_head *sal_queue, sal_log_queues[IA64_MAX_LOG_TYPES];
214 sal_log_record_header_t *sal_record;
215 DEFINE_SPINLOCK(sal_queue_lock);
216 #endif
218 /*
219 * ia64_log_init
220 * Reset the OS ia64 log buffer
221 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
222 * Outputs : None
223 */
224 static void
225 ia64_log_init(int sal_info_type)
226 {
227 u64 max_size = 0;
229 IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
230 IA64_LOG_LOCK_INIT(sal_info_type);
232 // SAL will tell us the maximum size of any error record of this type
233 max_size = ia64_sal_get_state_info_size(sal_info_type);
234 if (!max_size)
235 /* alloc_bootmem() doesn't like zero-sized allocations! */
236 return;
238 // set up OS data structures to hold error info
239 IA64_LOG_ALLOCATE(sal_info_type, max_size);
240 memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
241 memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
243 #ifdef XEN
244 if (sal_record == NULL) {
245 unsigned int pageorder;
246 struct page_info *page;
247 pageorder = get_order_from_bytes(max_size);
248 page = alloc_domheap_pages(NULL, pageorder, 0);
249 BUG_ON(page == NULL);
250 sal_record = (sal_log_record_header_t *)page_to_virt(page);
251 BUG_ON(sal_record == NULL);
252 }
253 #endif
254 }
256 #ifndef XEN
257 /*
258 * ia64_log_get
259 *
260 * Get the current MCA log from SAL and copy it into the OS log buffer.
261 *
262 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
263 * irq_safe whether you can use printk at this point
264 * Outputs : size (total record length)
265 * *buffer (ptr to error record)
266 *
267 */
268 static u64
269 ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
270 {
271 sal_log_record_header_t *log_buffer;
272 u64 total_len = 0;
273 int s;
275 IA64_LOG_LOCK(sal_info_type);
277 /* Get the process state information */
278 log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
280 total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
282 if (total_len) {
283 IA64_LOG_INDEX_INC(sal_info_type);
284 IA64_LOG_UNLOCK(sal_info_type);
285 if (irq_safe) {
286 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
287 "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
288 }
289 *buffer = (u8 *) log_buffer;
290 return total_len;
291 } else {
292 IA64_LOG_UNLOCK(sal_info_type);
293 return 0;
294 }
295 }
297 /*
298 * ia64_mca_log_sal_error_record
299 *
300 * This function retrieves a specified error record type from SAL
301 * and wakes up any processes waiting for error records.
302 *
303 * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT)
304 */
305 static void
306 ia64_mca_log_sal_error_record(int sal_info_type)
307 {
308 u8 *buffer;
309 sal_log_record_header_t *rh;
310 u64 size;
311 int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
312 #ifdef IA64_MCA_DEBUG_INFO
313 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
314 #endif
316 size = ia64_log_get(sal_info_type, &buffer, irq_safe);
317 if (!size)
318 return;
320 salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
322 if (irq_safe)
323 IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
324 smp_processor_id(),
325 sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
327 /* Clear logs from corrected errors in case there's no user-level logger */
328 rh = (sal_log_record_header_t *)buffer;
329 if (rh->severity == sal_log_severity_corrected)
330 ia64_sal_clear_state_info(sal_info_type);
331 }
332 #else /* !XEN */
333 /*
334 * ia64_log_queue
335 *
336 * Get the current MCA log from SAL and copy it into the OS log buffer.
337 *
338 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
339 * Outputs : size (total record length)
340 * *buffer (ptr to error record)
341 *
342 */
343 static u64
344 ia64_log_queue(int sal_info_type, int virq)
345 {
346 sal_log_record_header_t *log_buffer;
347 u64 total_len = 0;
348 int s;
349 sal_queue_entry_t *e;
350 unsigned long flags;
352 IA64_LOG_LOCK(sal_info_type);
354 /* Get the process state information */
355 log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
357 total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
359 if (total_len) {
360 int queue_type;
361 int cpuid = smp_processor_id();
363 spin_lock_irqsave(&sal_queue_lock, flags);
365 if (sal_info_type == SAL_INFO_TYPE_MCA && virq == VIRQ_MCA_CMC)
366 queue_type = SAL_INFO_TYPE_CMC;
367 else
368 queue_type = sal_info_type;
370 /* Skip if sal_entry is already listed in sal_queue */
371 list_for_each_entry(e, &sal_queue[queue_type], list) {
372 if (e == &sal_entry[cpuid][queue_type])
373 goto found;
374 }
375 e = &sal_entry[cpuid][queue_type];
376 memset(e, 0, sizeof(sal_queue_entry_t));
377 e->cpuid = cpuid;
378 e->sal_info_type = sal_info_type;
379 e->vector = IA64_CMC_VECTOR;
380 e->virq = virq;
381 e->length = total_len;
383 list_add_tail(&e->list, &sal_queue[queue_type]);
385 found:
386 spin_unlock_irqrestore(&sal_queue_lock, flags);
388 IA64_LOG_INDEX_INC(sal_info_type);
389 IA64_LOG_UNLOCK(sal_info_type);
390 if (sal_info_type != SAL_INFO_TYPE_MCA &&
391 sal_info_type != SAL_INFO_TYPE_INIT) {
392 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
393 "Record length = %ld\n", __FUNCTION__,
394 sal_info_type, total_len);
395 }
396 return total_len;
397 } else {
398 IA64_LOG_UNLOCK(sal_info_type);
399 return 0;
400 }
401 }
402 #endif /* !XEN */
404 /*
405 * platform dependent error handling
406 */
407 #ifndef PLATFORM_MCA_HANDLERS
409 #ifdef CONFIG_ACPI
411 #ifdef XEN
412 /**
413 * Copy from linux/kernel/irq/manage.c
414 *
415 * disable_irq_nosync - disable an irq without waiting
416 * @irq: Interrupt to disable
417 *
418 * Disable the selected interrupt line. Disables and Enables are
419 * nested.
420 * Unlike disable_irq(), this function does not ensure existing
421 * instances of the IRQ handler have completed before returning.
422 *
423 * This function may be called from IRQ context.
424 */
425 void disable_irq_nosync(unsigned int irq)
426 {
427 irq_desc_t *desc = irq_desc + irq;
428 unsigned long flags;
430 if (irq >= NR_IRQS)
431 return;
433 spin_lock_irqsave(&desc->lock, flags);
434 if (!desc->depth++) {
435 desc->status |= IRQ_DISABLED;
436 desc->handler->disable(irq);
437 }
438 spin_unlock_irqrestore(&desc->lock, flags);
439 }
441 /**
442 * Copy from linux/kernel/irq/manage.c
443 *
444 * enable_irq - enable handling of an irq
445 * @irq: Interrupt to enable
446 *
447 * Undoes the effect of one call to disable_irq(). If this
448 * matches the last disable, processing of interrupts on this
449 * IRQ line is re-enabled.
450 *
451 * This function may be called from IRQ context.
452 */
453 void enable_irq(unsigned int irq)
454 {
455 irq_desc_t *desc = irq_desc + irq;
456 unsigned long flags;
458 if (irq >= NR_IRQS)
459 return;
461 spin_lock_irqsave(&desc->lock, flags);
462 switch (desc->depth) {
463 case 0:
464 WARN_ON(1);
465 break;
466 case 1: {
467 unsigned int status = desc->status & ~IRQ_DISABLED;
469 desc->status = status;
470 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
471 desc->status = status | IRQ_REPLAY;
472 hw_resend_irq(desc->handler,irq);
473 }
474 desc->handler->enable(irq);
475 /* fall-through */
476 }
477 default:
478 desc->depth--;
479 }
480 spin_unlock_irqrestore(&desc->lock, flags);
481 }
482 #endif /* XEN */
484 int cpe_vector = -1;
486 static irqreturn_t
487 ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
488 {
489 static unsigned long cpe_history[CPE_HISTORY_LENGTH];
490 static int index;
491 static DEFINE_SPINLOCK(cpe_history_lock);
493 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
494 __FUNCTION__, cpe_irq, smp_processor_id());
496 /* SAL spec states this should run w/ interrupts enabled */
497 local_irq_enable();
499 #ifndef XEN
500 /* Get the CPE error record and log it */
501 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
502 #else
503 ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
504 /* CPE error does not inform to dom0 but the following codes are
505 reserved for future implementation */
506 /* send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
507 #endif
509 spin_lock(&cpe_history_lock);
510 if (!cpe_poll_enabled && cpe_vector >= 0) {
512 int i, count = 1; /* we know 1 happened now */
513 unsigned long now = jiffies;
515 for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
516 if (now - cpe_history[i] <= HZ)
517 count++;
518 }
520 IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
521 if (count >= CPE_HISTORY_LENGTH) {
523 cpe_poll_enabled = 1;
524 spin_unlock(&cpe_history_lock);
525 disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
527 /*
528 * Corrected errors will still be corrected, but
529 * make sure there's a log somewhere that indicates
530 * something is generating more than we can handle.
531 */
532 printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
534 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
536 /* lock already released, get out now */
537 return IRQ_HANDLED;
538 } else {
539 cpe_history[index++] = now;
540 if (index == CPE_HISTORY_LENGTH)
541 index = 0;
542 }
543 }
544 spin_unlock(&cpe_history_lock);
545 return IRQ_HANDLED;
546 }
548 #endif /* CONFIG_ACPI */
550 static void
551 show_min_state (pal_min_state_area_t *minstate)
552 {
553 u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
554 u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
556 printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
557 printk("pr\t\t%016lx\n", minstate->pmsa_pr);
558 printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
559 printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
560 printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
561 printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
562 printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
563 printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
564 printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
565 printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
566 printk("b1\t\t%016lx ", minstate->pmsa_br1);
567 print_symbol("%s\n", minstate->pmsa_br1);
569 printk("\nstatic registers r0-r15:\n");
570 printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
571 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
572 printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
573 minstate->pmsa_gr[3], minstate->pmsa_gr[4],
574 minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
575 printk(" r8-11 %016lx %016lx %016lx %016lx\n",
576 minstate->pmsa_gr[7], minstate->pmsa_gr[8],
577 minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
578 printk("r12-15 %016lx %016lx %016lx %016lx\n",
579 minstate->pmsa_gr[11], minstate->pmsa_gr[12],
580 minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
582 printk("\nbank 0:\n");
583 printk("r16-19 %016lx %016lx %016lx %016lx\n",
584 minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
585 minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
586 printk("r20-23 %016lx %016lx %016lx %016lx\n",
587 minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
588 minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
589 printk("r24-27 %016lx %016lx %016lx %016lx\n",
590 minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
591 minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
592 printk("r28-31 %016lx %016lx %016lx %016lx\n",
593 minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
594 minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
596 printk("\nbank 1:\n");
597 printk("r16-19 %016lx %016lx %016lx %016lx\n",
598 minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
599 minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
600 printk("r20-23 %016lx %016lx %016lx %016lx\n",
601 minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
602 minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
603 printk("r24-27 %016lx %016lx %016lx %016lx\n",
604 minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
605 minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
606 printk("r28-31 %016lx %016lx %016lx %016lx\n",
607 minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
608 minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
609 }
611 static void
612 fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
613 {
614 u64 *dst_banked, *src_banked, bit, shift, nat_bits;
615 int i;
617 /*
618 * First, update the pt-regs and switch-stack structures with the contents stored
619 * in the min-state area:
620 */
621 if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
622 pt->cr_ipsr = ms->pmsa_xpsr;
623 pt->cr_iip = ms->pmsa_xip;
624 pt->cr_ifs = ms->pmsa_xfs;
625 } else {
626 pt->cr_ipsr = ms->pmsa_ipsr;
627 pt->cr_iip = ms->pmsa_iip;
628 pt->cr_ifs = ms->pmsa_ifs;
629 }
630 pt->ar_rsc = ms->pmsa_rsc;
631 pt->pr = ms->pmsa_pr;
632 pt->r1 = ms->pmsa_gr[0];
633 pt->r2 = ms->pmsa_gr[1];
634 pt->r3 = ms->pmsa_gr[2];
635 sw->r4 = ms->pmsa_gr[3];
636 sw->r5 = ms->pmsa_gr[4];
637 sw->r6 = ms->pmsa_gr[5];
638 sw->r7 = ms->pmsa_gr[6];
639 pt->r8 = ms->pmsa_gr[7];
640 pt->r9 = ms->pmsa_gr[8];
641 pt->r10 = ms->pmsa_gr[9];
642 pt->r11 = ms->pmsa_gr[10];
643 pt->r12 = ms->pmsa_gr[11];
644 pt->r13 = ms->pmsa_gr[12];
645 pt->r14 = ms->pmsa_gr[13];
646 pt->r15 = ms->pmsa_gr[14];
647 dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */
648 src_banked = ms->pmsa_bank1_gr;
649 for (i = 0; i < 16; ++i)
650 dst_banked[i] = src_banked[i];
651 pt->b0 = ms->pmsa_br0;
652 sw->b1 = ms->pmsa_br1;
654 /* construct the NaT bits for the pt-regs structure: */
655 # define PUT_NAT_BIT(dst, addr) \
656 do { \
657 bit = nat_bits & 1; nat_bits >>= 1; \
658 shift = ((unsigned long) addr >> 3) & 0x3f; \
659 dst = ((dst) & ~(1UL << shift)) | (bit << shift); \
660 } while (0)
662 /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
663 shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
664 nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
666 PUT_NAT_BIT(sw->caller_unat, &pt->r1);
667 PUT_NAT_BIT(sw->caller_unat, &pt->r2);
668 PUT_NAT_BIT(sw->caller_unat, &pt->r3);
669 PUT_NAT_BIT(sw->ar_unat, &sw->r4);
670 PUT_NAT_BIT(sw->ar_unat, &sw->r5);
671 PUT_NAT_BIT(sw->ar_unat, &sw->r6);
672 PUT_NAT_BIT(sw->ar_unat, &sw->r7);
673 PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9);
674 PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11);
675 PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13);
676 PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15);
677 nat_bits >>= 16; /* skip over bank0 NaT bits */
678 PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17);
679 PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19);
680 PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21);
681 PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23);
682 PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25);
683 PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27);
684 PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29);
685 PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31);
686 }
688 #ifdef XEN
689 static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
690 static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
691 static atomic_t num_stopped_cpus = ATOMIC_INIT(0);
692 extern void show_stack (struct task_struct *, unsigned long *);
694 #define CPU_FLUSH_RETRY_MAX 5
695 static void
696 init_cache_flush (void)
697 {
698 unsigned long flags;
699 int i;
700 s64 rval = 0;
701 u64 vector, progress = 0;
703 for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
704 local_irq_save(flags);
705 rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
706 0, &progress, &vector);
707 local_irq_restore(flags);
708 if (rval == 0){
709 printk("\nPAL cache flush success\n");
710 return;
711 }
712 }
713 printk("\nPAL cache flush failed. status=%ld\n",rval);
714 }
716 static void inline
717 save_ksp (struct unw_frame_info *info)
718 {
719 current->arch._thread.ksp = (__u64)(info->sw) - 16;
720 wmb();
721 init_cache_flush();
722 }
724 static void
725 freeze_cpu_osinit (struct unw_frame_info *info, void *arg)
726 {
727 save_ksp(info);
728 atomic_inc(&num_stopped_cpus);
729 printk("%s: CPU%d init handler done\n",
730 __FUNCTION__, smp_processor_id());
731 for (;;)
732 local_irq_disable();
733 }
735 /* FIXME */
736 static void
737 try_crashdump(struct unw_frame_info *info, void *arg)
738 {
739 save_ksp(info);
740 printk("\nINIT dump complete. Please reboot now.\n");
741 for (;;)
742 local_irq_disable();
743 }
744 #endif /* XEN */
746 static void
747 init_handler_platform (pal_min_state_area_t *ms,
748 struct pt_regs *pt, struct switch_stack *sw)
749 {
750 struct unw_frame_info info;
752 /* if a kernel debugger is available call it here else just dump the registers */
754 /*
755 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
756 * generated via the BMC's command-line interface, but since the console is on the
757 * same serial line, the user will need some time to switch out of the BMC before
758 * the dump begins.
759 */
760 printk("Delaying for 5 seconds...\n");
761 udelay(5*1000000);
762 #ifdef XEN
763 fetch_min_state(ms, pt, sw);
764 spin_lock(&show_stack_lock);
765 #endif
766 show_min_state(ms);
768 #ifdef XEN
769 printk("Backtrace of current vcpu (vcpu_id %d of domid %d)\n",
770 current->vcpu_id, current->domain->domain_id);
771 #else
772 printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
773 fetch_min_state(ms, pt, sw);
774 #endif
775 unw_init_from_interruption(&info, current, pt, sw);
776 ia64_do_show_stack(&info, NULL);
777 #ifdef XEN
778 spin_unlock(&show_stack_lock);
780 if (spin_trylock(&init_dump_lock)) {
781 struct domain *d;
782 struct vcpu *v;
783 #ifdef CONFIG_SMP
784 int other_cpus = num_online_cpus() - 1;
785 int wait = 1000 * other_cpus;
787 while ((atomic_read(&num_stopped_cpus) != other_cpus) && wait--)
788 udelay(1000);
789 if (other_cpus && wait < 0)
790 printk("timeout %d\n", atomic_read(&num_stopped_cpus));
791 #endif
792 if (opt_noreboot) {
793 /* this route is for dump routine */
794 unw_init_running(try_crashdump, pt);
795 } else {
796 rcu_read_lock(&domlist_read_lock);
797 for_each_domain(d) {
798 for_each_vcpu(d, v) {
799 printk("Backtrace of current vcpu "
800 "(vcpu_id %d of domid %d)\n",
801 v->vcpu_id, d->domain_id);
802 show_stack(v, NULL);
803 }
804 }
805 rcu_read_unlock(&domlist_read_lock);
806 }
807 }
808 unw_init_running(freeze_cpu_osinit, NULL);
809 #else /* XEN */
810 #ifdef CONFIG_SMP
811 /* read_trylock() would be handy... */
812 if (!tasklist_lock.write_lock)
813 read_lock(&tasklist_lock);
814 #endif
815 {
816 struct task_struct *g, *t;
817 do_each_thread (g, t) {
818 if (t == current)
819 continue;
821 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
822 show_stack(t, NULL);
823 } while_each_thread (g, t);
824 }
825 #ifdef CONFIG_SMP
826 if (!tasklist_lock.write_lock)
827 read_unlock(&tasklist_lock);
828 #endif
830 printk("\nINIT dump complete. Please reboot now.\n");
831 #endif /* XEN */
832 while (1); /* hang city if no debugger */
833 }
835 #ifdef CONFIG_ACPI
836 /*
837 * ia64_mca_register_cpev
838 *
839 * Register the corrected platform error vector with SAL.
840 *
841 * Inputs
842 * cpev Corrected Platform Error Vector number
843 *
844 * Outputs
845 * None
846 */
847 static void
848 ia64_mca_register_cpev (int cpev)
849 {
850 /* Register the CPE interrupt vector with SAL */
851 struct ia64_sal_retval isrv;
853 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
854 if (isrv.status) {
855 printk(KERN_ERR "Failed to register Corrected Platform "
856 "Error interrupt vector with SAL (status %ld)\n", isrv.status);
857 return;
858 }
860 IA64_MCA_DEBUG("%s: corrected platform error "
861 "vector %#x registered\n", __FUNCTION__, cpev);
862 }
863 #endif /* CONFIG_ACPI */
865 #endif /* PLATFORM_MCA_HANDLERS */
867 /*
868 * ia64_mca_cmc_vector_setup
869 *
870 * Setup the corrected machine check vector register in the processor.
871 * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
872 * This function is invoked on a per-processor basis.
873 *
874 * Inputs
875 * None
876 *
877 * Outputs
878 * None
879 */
880 void
881 ia64_mca_cmc_vector_setup (void)
882 {
883 cmcv_reg_t cmcv;
885 cmcv.cmcv_regval = 0;
886 cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
887 cmcv.cmcv_vector = IA64_CMC_VECTOR;
888 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
890 IA64_MCA_DEBUG("%s: CPU %d corrected "
891 "machine check vector %#x registered.\n",
892 __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
894 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
895 __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
896 }
898 /*
899 * ia64_mca_cmc_vector_disable
900 *
901 * Mask the corrected machine check vector register in the processor.
902 * This function is invoked on a per-processor basis.
903 *
904 * Inputs
905 * dummy(unused)
906 *
907 * Outputs
908 * None
909 */
910 static void
911 ia64_mca_cmc_vector_disable (void *dummy)
912 {
913 cmcv_reg_t cmcv;
915 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
917 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
918 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
920 IA64_MCA_DEBUG("%s: CPU %d corrected "
921 "machine check vector %#x disabled.\n",
922 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
923 }
925 /*
926 * ia64_mca_cmc_vector_enable
927 *
928 * Unmask the corrected machine check vector register in the processor.
929 * This function is invoked on a per-processor basis.
930 *
931 * Inputs
932 * dummy(unused)
933 *
934 * Outputs
935 * None
936 */
937 static void
938 ia64_mca_cmc_vector_enable (void *dummy)
939 {
940 cmcv_reg_t cmcv;
942 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
944 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
945 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
947 IA64_MCA_DEBUG("%s: CPU %d corrected "
948 "machine check vector %#x enabled.\n",
949 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
950 }
952 #ifndef XEN
953 /*
954 * ia64_mca_cmc_vector_disable_keventd
955 *
956 * Called via keventd (smp_call_function() is not safe in interrupt context) to
957 * disable the cmc interrupt vector.
958 */
959 static void
960 ia64_mca_cmc_vector_disable_keventd(void *unused)
961 {
962 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
963 }
965 /*
966 * ia64_mca_cmc_vector_enable_keventd
967 *
968 * Called via keventd (smp_call_function() is not safe in interrupt context) to
969 * enable the cmc interrupt vector.
970 */
971 static void
972 ia64_mca_cmc_vector_enable_keventd(void *unused)
973 {
974 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
975 }
976 #endif /* !XEN */
978 /*
979 * ia64_mca_wakeup_ipi_wait
980 *
981 * Wait for the inter-cpu interrupt to be sent by the
982 * monarch processor once it is done with handling the
983 * MCA.
984 *
985 * Inputs : None
986 * Outputs : None
987 */
988 static void
989 ia64_mca_wakeup_ipi_wait(void)
990 {
991 int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
992 int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
993 u64 irr = 0;
995 do {
996 switch(irr_num) {
997 case 0:
998 irr = ia64_getreg(_IA64_REG_CR_IRR0);
999 break;
1000 case 1:
1001 irr = ia64_getreg(_IA64_REG_CR_IRR1);
1002 break;
1003 case 2:
1004 irr = ia64_getreg(_IA64_REG_CR_IRR2);
1005 break;
1006 case 3:
1007 irr = ia64_getreg(_IA64_REG_CR_IRR3);
1008 break;
1010 cpu_relax();
1011 } while (!(irr & (1UL << irr_bit))) ;
1014 /*
1015 * ia64_mca_wakeup
1017 * Send an inter-cpu interrupt to wake-up a particular cpu
1018 * and mark that cpu to be out of rendez.
1020 * Inputs : cpuid
1021 * Outputs : None
1022 */
1023 static void
1024 ia64_mca_wakeup(int cpu)
1026 platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
1027 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1031 /*
1032 * ia64_mca_wakeup_all
1034 * Wakeup all the cpus which have rendez'ed previously.
1036 * Inputs : None
1037 * Outputs : None
1038 */
1039 static void
1040 ia64_mca_wakeup_all(void)
1042 int cpu;
1044 /* Clear the Rendez checkin flag for all cpus */
1045 for(cpu = 0; cpu < NR_CPUS; cpu++) {
1046 if (!cpu_online(cpu))
1047 continue;
1048 if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
1049 ia64_mca_wakeup(cpu);
1054 /*
1055 * ia64_mca_rendez_interrupt_handler
1057 * This is handler used to put slave processors into spinloop
1058 * while the monarch processor does the mca handling and later
1059 * wake each slave up once the monarch is done.
1061 * Inputs : None
1062 * Outputs : None
1063 */
1064 static irqreturn_t
1065 ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
1067 unsigned long flags;
1068 int cpu = smp_processor_id();
1070 /* Mask all interrupts */
1071 local_irq_save(flags);
1073 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
1074 /* Register with the SAL monarch that the slave has
1075 * reached SAL
1076 */
1077 ia64_sal_mc_rendez();
1079 /* Wait for the wakeup IPI from the monarch
1080 * This waiting is done by polling on the wakeup-interrupt
1081 * vector bit in the processor's IRRs
1082 */
1083 ia64_mca_wakeup_ipi_wait();
1085 /* Enable all interrupts */
1086 local_irq_restore(flags);
1087 return IRQ_HANDLED;
1090 /*
1091 * ia64_mca_wakeup_int_handler
1093 * The interrupt handler for processing the inter-cpu interrupt to the
1094 * slave cpu which was spinning in the rendez loop.
1095 * Since this spinning is done by turning off the interrupts and
1096 * polling on the wakeup-interrupt bit in the IRR, there is
1097 * nothing useful to be done in the handler.
1099 * Inputs : wakeup_irq (Wakeup-interrupt bit)
1100 * arg (Interrupt handler specific argument)
1101 * ptregs (Exception frame at the time of the interrupt)
1102 * Outputs : None
1104 */
1105 static irqreturn_t
1106 ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
1108 return IRQ_HANDLED;
1111 /*
1112 * ia64_return_to_sal_check
1114 * This is function called before going back from the OS_MCA handler
1115 * to the OS_MCA dispatch code which finally takes the control back
1116 * to the SAL.
1117 * The main purpose of this routine is to setup the OS_MCA to SAL
1118 * return state which can be used by the OS_MCA dispatch code
1119 * just before going back to SAL.
1121 * Inputs : None
1122 * Outputs : None
1123 */
1125 static void
1126 ia64_return_to_sal_check(int recover)
1128 #ifdef XEN
1129 int cpu = smp_processor_id();
1130 #endif
1132 /* Copy over some relevant stuff from the sal_to_os_mca_handoff
1133 * so that it can be used at the time of os_mca_to_sal_handoff
1134 */
1135 #ifdef XEN
1136 ia64_os_to_sal_handoff_state.imots_sal_gp =
1137 ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
1139 ia64_os_to_sal_handoff_state.imots_sal_check_ra =
1140 ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
1141 #else
1142 ia64_os_to_sal_handoff_state.imots_sal_gp =
1143 ia64_sal_to_os_handoff_state.imsto_sal_gp;
1145 ia64_os_to_sal_handoff_state.imots_sal_check_ra =
1146 ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
1147 #endif
1149 if (recover)
1150 ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
1151 else
1152 ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
1154 /* Default = tell SAL to return to same context */
1155 ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
1157 #ifdef XEN
1158 ia64_os_to_sal_handoff_state.imots_new_min_state =
1159 (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
1160 #else
1161 ia64_os_to_sal_handoff_state.imots_new_min_state =
1162 (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
1163 #endif
1167 /* Function pointer for extra MCA recovery */
1168 int (*ia64_mca_ucmc_extension)
1169 (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
1170 = NULL;
1172 int
1173 ia64_reg_MCA_extension(void *fn)
1175 if (ia64_mca_ucmc_extension)
1176 return 1;
1178 ia64_mca_ucmc_extension = fn;
1179 return 0;
1182 void
1183 ia64_unreg_MCA_extension(void)
1185 if (ia64_mca_ucmc_extension)
1186 ia64_mca_ucmc_extension = NULL;
1189 EXPORT_SYMBOL(ia64_reg_MCA_extension);
1190 EXPORT_SYMBOL(ia64_unreg_MCA_extension);
1192 /*
1193 * ia64_mca_ucmc_handler
1195 * This is uncorrectable machine check handler called from OS_MCA
1196 * dispatch code which is in turn called from SAL_CHECK().
1197 * This is the place where the core of OS MCA handling is done.
1198 * Right now the logs are extracted and displayed in a well-defined
1199 * format. This handler code is supposed to be run only on the
1200 * monarch processor. Once the monarch is done with MCA handling
1201 * further MCA logging is enabled by clearing logs.
1202 * Monarch also has the duty of sending wakeup-IPIs to pull the
1203 * slave processors out of rendezvous spinloop.
1205 * Inputs : None
1206 * Outputs : None
1207 */
1208 void
1209 ia64_mca_ucmc_handler(void)
1211 #ifdef XEN
1212 int cpu = smp_processor_id();
1213 pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
1214 &ia64_sal_to_os_handoff_state[cpu].proc_state_param;
1215 #else
1216 pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
1217 &ia64_sal_to_os_handoff_state.proc_state_param;
1218 #endif
1219 int recover;
1221 #ifndef XEN
1222 /* Get the MCA error record and log it */
1223 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
1224 #else
1225 ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
1226 send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
1227 #endif
1229 /* TLB error is only exist in this SAL error record */
1230 recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
1231 /* other error recovery */
1232 #ifndef XEN
1233 || (ia64_mca_ucmc_extension
1234 && ia64_mca_ucmc_extension(
1235 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
1236 &ia64_sal_to_os_handoff_state,
1237 &ia64_os_to_sal_handoff_state));
1238 #else
1240 #endif
1242 #ifndef XEN
1243 if (recover) {
1244 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
1245 rh->severity = sal_log_severity_corrected;
1246 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1248 #endif
1249 /*
1250 * Wakeup all the processors which are spinning in the rendezvous
1251 * loop.
1252 */
1253 ia64_mca_wakeup_all();
1255 /* Return to SAL */
1256 ia64_return_to_sal_check(recover);
1259 #ifndef XEN
1260 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
1261 static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
1262 #endif
1264 /*
1265 * ia64_mca_cmc_int_handler
1267 * This is corrected machine check interrupt handler.
1268 * Right now the logs are extracted and displayed in a well-defined
1269 * format.
1271 * Inputs
1272 * interrupt number
1273 * client data arg ptr
1274 * saved registers ptr
1276 * Outputs
1277 * None
1278 */
1279 static irqreturn_t
1280 ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1282 static unsigned long cmc_history[CMC_HISTORY_LENGTH];
1283 static int index;
1284 static DEFINE_SPINLOCK(cmc_history_lock);
1286 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1287 __FUNCTION__, cmc_irq, smp_processor_id());
1289 /* SAL spec states this should run w/ interrupts enabled */
1290 local_irq_enable();
1292 #ifndef XEN
1293 /* Get the CMC error record and log it */
1294 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1295 #else
1296 ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
1297 send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
1298 #endif
1300 spin_lock(&cmc_history_lock);
1301 if (!cmc_polling_enabled) {
1302 int i, count = 1; /* we know 1 happened now */
1303 unsigned long now = jiffies;
1305 for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
1306 if (now - cmc_history[i] <= HZ)
1307 count++;
1310 IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
1311 if (count >= CMC_HISTORY_LENGTH) {
1313 cmc_polling_enabled = 1;
1314 spin_unlock(&cmc_history_lock);
1315 #ifndef XEN /* XXX FIXME */
1316 schedule_work(&cmc_disable_work);
1317 #else
1318 cpumask_raise_softirq(cpu_online_map,
1319 CMC_DISABLE_SOFTIRQ);
1320 #endif
1322 /*
1323 * Corrected errors will still be corrected, but
1324 * make sure there's a log somewhere that indicates
1325 * something is generating more than we can handle.
1326 */
1327 printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
1329 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1331 /* lock already released, get out now */
1332 return IRQ_HANDLED;
1333 } else {
1334 cmc_history[index++] = now;
1335 if (index == CMC_HISTORY_LENGTH)
1336 index = 0;
1339 spin_unlock(&cmc_history_lock);
1340 return IRQ_HANDLED;
1343 /*
1344 * ia64_mca_cmc_int_caller
1346 * Triggered by sw interrupt from CMC polling routine. Calls
1347 * real interrupt handler and either triggers a sw interrupt
1348 * on the next cpu or does cleanup at the end.
1350 * Inputs
1351 * interrupt number
1352 * client data arg ptr
1353 * saved registers ptr
1354 * Outputs
1355 * handled
1356 */
1357 static irqreturn_t
1358 ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
1360 static int start_count = -1;
1361 unsigned int cpuid;
1363 cpuid = smp_processor_id();
1365 /* If first cpu, update count */
1366 if (start_count == -1)
1367 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
1369 #ifndef XEN
1370 ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
1371 #else
1372 IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
1373 __FUNCTION__, cmc_irq, smp_processor_id());
1374 ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
1375 #endif
1377 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
1379 if (cpuid < NR_CPUS) {
1380 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1381 } else {
1382 /* If no log record, switch out of polling mode */
1383 if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
1385 printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
1386 #ifndef XEN /* XXX FIXME */
1387 schedule_work(&cmc_enable_work);
1388 #else
1389 cpumask_raise_softirq(cpu_online_map,
1390 CMC_ENABLE_SOFTIRQ);
1391 #endif
1392 cmc_polling_enabled = 0;
1394 } else {
1396 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1399 start_count = -1;
1401 return IRQ_HANDLED;
1404 /*
1405 * ia64_mca_cmc_poll
1407 * Poll for Corrected Machine Checks (CMCs)
1409 * Inputs : dummy(unused)
1410 * Outputs : None
1412 */
1413 static void
1414 #ifndef XEN
1415 ia64_mca_cmc_poll (unsigned long dummy)
1416 #else
1417 ia64_mca_cmc_poll (void *dummy)
1418 #endif
1420 /* Trigger a CMC interrupt cascade */
1421 platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1424 /*
1425 * ia64_mca_cpe_int_caller
1427 * Triggered by sw interrupt from CPE polling routine. Calls
1428 * real interrupt handler and either triggers a sw interrupt
1429 * on the next cpu or does cleanup at the end.
1431 * Inputs
1432 * interrupt number
1433 * client data arg ptr
1434 * saved registers ptr
1435 * Outputs
1436 * handled
1437 */
1438 #ifdef CONFIG_ACPI
1440 static irqreturn_t
1441 ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
1443 static int start_count = -1;
1444 #ifdef XEN
1445 static unsigned long poll_time = MIN_CPE_POLL_INTERVAL;
1446 #else
1447 static int poll_time = MIN_CPE_POLL_INTERVAL;
1448 #endif
1449 unsigned int cpuid;
1451 cpuid = smp_processor_id();
1453 /* If first cpu, update count */
1454 if (start_count == -1)
1455 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
1457 #ifndef XEN
1458 ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
1459 #else
1460 IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
1461 __FUNCTION__, cpe_irq, smp_processor_id());
1462 ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
1463 #endif
1465 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
1467 if (cpuid < NR_CPUS) {
1468 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1469 } else {
1470 /*
1471 * If a log was recorded, increase our polling frequency,
1472 * otherwise, backoff or return to interrupt mode.
1473 */
1474 if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
1475 poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
1476 } else if (cpe_vector < 0) {
1477 poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
1478 } else {
1479 poll_time = MIN_CPE_POLL_INTERVAL;
1481 printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
1482 enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
1483 cpe_poll_enabled = 0;
1486 if (cpe_poll_enabled)
1487 mod_timer(&cpe_poll_timer, jiffies + poll_time);
1488 start_count = -1;
1490 return IRQ_HANDLED;
1493 /*
1494 * ia64_mca_cpe_poll
1496 * Poll for Corrected Platform Errors (CPEs), trigger interrupt
1497 * on first cpu, from there it will trickle through all the cpus.
1499 * Inputs : dummy(unused)
1500 * Outputs : None
1502 */
1503 static void
1504 #ifndef XEN
1505 ia64_mca_cpe_poll (unsigned long dummy)
1506 #else
1507 ia64_mca_cpe_poll (void *dummy)
1508 #endif
1510 /* Trigger a CPE interrupt cascade */
1511 platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1514 #endif /* CONFIG_ACPI */
1516 /*
1517 * C portion of the OS INIT handler
1519 * Called from ia64_monarch_init_handler
1521 * Inputs: pointer to pt_regs where processor info was saved.
1523 * Returns:
1524 * 0 if SAL must warm boot the System
1525 * 1 if SAL must return to interrupted context using PAL_MC_RESUME
1527 */
1528 void
1529 ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
1531 pal_min_state_area_t *ms;
1533 #ifndef XEN
1534 oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */
1535 console_loglevel = 15; /* make sure printks make it to console */
1537 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
1538 ia64_sal_to_os_handoff_state.proc_state_param);
1540 /*
1541 * Address of minstate area provided by PAL is physical,
1542 * uncacheable (bit 63 set). Convert to Linux virtual
1543 * address in region 6.
1544 */
1545 ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
1546 #else
1547 int cpu = smp_processor_id();
1549 console_start_sync();
1550 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
1551 ia64_sal_to_os_handoff_state[cpu].proc_state_param);
1553 /* Xen virtual address in region 7. */
1554 ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
1555 #endif
1557 init_handler_platform(ms, pt, sw); /* call platform specific routines */
1560 static int __init
1561 ia64_mca_disable_cpe_polling(char *str)
1563 cpe_poll_enabled = 0;
1564 return 1;
1567 __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
1569 static struct irqaction cmci_irqaction = {
1570 .handler = ia64_mca_cmc_int_handler,
1571 #ifndef XEN
1572 .flags = SA_INTERRUPT,
1573 #endif
1574 .name = "cmc_hndlr"
1575 };
1577 static struct irqaction cmcp_irqaction = {
1578 .handler = ia64_mca_cmc_int_caller,
1579 #ifndef XEN
1580 .flags = SA_INTERRUPT,
1581 #endif
1582 .name = "cmc_poll"
1583 };
1585 static struct irqaction mca_rdzv_irqaction = {
1586 .handler = ia64_mca_rendez_int_handler,
1587 #ifndef XEN
1588 .flags = SA_INTERRUPT,
1589 #endif
1590 .name = "mca_rdzv"
1591 };
1593 static struct irqaction mca_wkup_irqaction = {
1594 .handler = ia64_mca_wakeup_int_handler,
1595 #ifndef XEN
1596 .flags = SA_INTERRUPT,
1597 #endif
1598 .name = "mca_wkup"
1599 };
1601 #ifdef CONFIG_ACPI
1602 static struct irqaction mca_cpe_irqaction = {
1603 .handler = ia64_mca_cpe_int_handler,
1604 #ifndef XEN
1605 .flags = SA_INTERRUPT,
1606 #endif
1607 .name = "cpe_hndlr"
1608 };
1610 static struct irqaction mca_cpep_irqaction = {
1611 .handler = ia64_mca_cpe_int_caller,
1612 #ifndef XEN
1613 .flags = SA_INTERRUPT,
1614 #endif
1615 .name = "cpe_poll"
1616 };
1617 #endif /* CONFIG_ACPI */
1619 /* Do per-CPU MCA-related initialization. */
1621 void __devinit
1622 ia64_mca_cpu_init(void *cpu_data)
1624 void *pal_vaddr;
1626 if (smp_processor_id() == 0) {
1627 void *mca_data;
1628 int cpu;
1630 #ifdef XEN
1631 unsigned int pageorder;
1632 pageorder = get_order_from_bytes(sizeof(struct ia64_mca_cpu));
1633 #else
1634 mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
1635 * NR_CPUS);
1636 #endif
1637 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1638 #ifdef XEN
1639 struct page_info *page;
1640 page = alloc_domheap_pages(NULL, pageorder, 0);
1641 mca_data = page? page_to_virt(page): NULL;
1642 __per_cpu_mca[cpu] = __pa(mca_data);
1643 IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx"
1644 "(mca_data[%d]=%lx)\n",
1645 __FUNCTION__, cpu, __per_cpu_mca[cpu],
1646 cpu, (u64)mca_data);
1647 #else
1648 __per_cpu_mca[cpu] = __pa(mca_data);
1649 mca_data += sizeof(struct ia64_mca_cpu);
1650 #endif
1653 #ifdef XEN
1654 else if (sal_queue) {
1655 int i;
1656 for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
1657 ia64_log_queue(i, 0);
1659 #endif
1661 /*
1662 * The MCA info structure was allocated earlier and its
1663 * physical address saved in __per_cpu_mca[cpu]. Copy that
1664 * address * to ia64_mca_data so we can access it as a per-CPU
1665 * variable.
1666 */
1667 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
1668 #ifdef XEN
1669 IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
1670 smp_processor_id(), __get_cpu_var(ia64_mca_data));
1672 /* sal_to_os_handoff for smp support */
1673 __get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
1674 __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
1675 IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
1676 smp_processor_id(),
1677 __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
1678 #endif
1680 /*
1681 * Stash away a copy of the PTE needed to map the per-CPU page.
1682 * We may need it during MCA recovery.
1683 */
1684 __get_cpu_var(ia64_mca_per_cpu_pte) =
1685 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
1687 /*
1688 * Also, stash away a copy of the PAL address and the PTE
1689 * needed to map it.
1690 */
1691 pal_vaddr = efi_get_pal_addr();
1692 if (!pal_vaddr)
1693 return;
1694 __get_cpu_var(ia64_mca_pal_base) =
1695 GRANULEROUNDDOWN((unsigned long) pal_vaddr);
1696 __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
1697 PAGE_KERNEL));
1700 /*
1701 * ia64_mca_init
1703 * Do all the system level mca specific initialization.
1705 * 1. Register spinloop and wakeup request interrupt vectors
1707 * 2. Register OS_MCA handler entry point
1709 * 3. Register OS_INIT handler entry point
1711 * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1713 * Note that this initialization is done very early before some kernel
1714 * services are available.
1716 * Inputs : None
1718 * Outputs : None
1719 */
1720 void __init
1721 ia64_mca_init(void)
1723 ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
1724 ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
1725 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1726 int i;
1727 s64 rc;
1728 struct ia64_sal_retval isrv;
1729 u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
1731 #ifdef XEN
1732 slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
1733 #endif
1735 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
1737 /* Clear the Rendez checkin flag for all cpus */
1738 for(i = 0 ; i < NR_CPUS; i++)
1739 ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1741 /*
1742 * Register the rendezvous spinloop and wakeup mechanism with SAL
1743 */
1745 /* Register the rendezvous interrupt vector with SAL */
1746 while (1) {
1747 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
1748 SAL_MC_PARAM_MECHANISM_INT,
1749 IA64_MCA_RENDEZ_VECTOR,
1750 timeout,
1751 SAL_MC_PARAM_RZ_ALWAYS);
1752 rc = isrv.status;
1753 if (rc == 0)
1754 break;
1755 if (rc == -2) {
1756 printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1757 "%ld to %ld milliseconds\n", timeout, isrv.v0);
1758 timeout = isrv.v0;
1759 continue;
1761 printk(KERN_ERR "Failed to register rendezvous interrupt "
1762 "with SAL (status %ld)\n", rc);
1763 return;
1766 /* Register the wakeup interrupt vector with SAL */
1767 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
1768 SAL_MC_PARAM_MECHANISM_INT,
1769 IA64_MCA_WAKEUP_VECTOR,
1770 0, 0);
1771 rc = isrv.status;
1772 if (rc) {
1773 printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
1774 "(status %ld)\n", rc);
1775 return;
1778 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
1780 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
1781 /*
1782 * XXX - disable SAL checksum by setting size to 0; should be
1783 * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
1784 */
1785 ia64_mc_info.imi_mca_handler_size = 0;
1787 /* Register the os mca handler with SAL */
1788 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
1789 ia64_mc_info.imi_mca_handler,
1790 ia64_tpa(mca_hldlr_ptr->gp),
1791 ia64_mc_info.imi_mca_handler_size,
1792 0, 0, 0)))
1794 printk(KERN_ERR "Failed to register OS MCA handler with SAL "
1795 "(status %ld)\n", rc);
1796 return;
1799 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
1800 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1802 /*
1803 * XXX - disable SAL checksum by setting size to 0, should be
1804 * size of the actual init handler in mca_asm.S.
1805 */
1806 ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp);
1807 ia64_mc_info.imi_monarch_init_handler_size = 0;
1808 ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp);
1809 ia64_mc_info.imi_slave_init_handler_size = 0;
1811 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
1812 ia64_mc_info.imi_monarch_init_handler);
1814 /* Register the os init handler with SAL */
1815 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
1816 ia64_mc_info.imi_monarch_init_handler,
1817 ia64_tpa(ia64_getreg(_IA64_REG_GP)),
1818 ia64_mc_info.imi_monarch_init_handler_size,
1819 ia64_mc_info.imi_slave_init_handler,
1820 ia64_tpa(ia64_getreg(_IA64_REG_GP)),
1821 ia64_mc_info.imi_slave_init_handler_size)))
1823 printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
1824 "(status %ld)\n", rc);
1825 return;
1828 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
1830 /*
1831 * Configure the CMCI/P vector and handler. Interrupts for CMC are
1832 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
1833 */
1834 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
1835 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
1836 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
1838 /* Setup the MCA rendezvous interrupt vector */
1839 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
1841 /* Setup the MCA wakeup interrupt vector */
1842 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
1844 #ifdef CONFIG_ACPI
1845 /* Setup the CPEI/P handler */
1846 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
1847 #endif
1849 /* Initialize the areas set aside by the OS to buffer the
1850 * platform/processor error states for MCA/INIT/CMC
1851 * handling.
1852 */
1853 ia64_log_init(SAL_INFO_TYPE_MCA);
1854 ia64_log_init(SAL_INFO_TYPE_INIT);
1855 ia64_log_init(SAL_INFO_TYPE_CMC);
1856 ia64_log_init(SAL_INFO_TYPE_CPE);
1858 #ifdef XEN
1859 INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_MCA]);
1860 INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_INIT]);
1861 INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CMC]);
1862 INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CPE]);
1864 /* NULL sal_queue used elsewhere to determine MCA init state */
1865 sal_queue = sal_log_queues;
1867 open_softirq(CMC_DISABLE_SOFTIRQ,
1868 (softirq_handler)ia64_mca_cmc_vector_disable);
1869 open_softirq(CMC_ENABLE_SOFTIRQ,
1870 (softirq_handler)ia64_mca_cmc_vector_enable);
1872 for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
1873 ia64_log_queue(i, 0);
1874 #endif
1876 mca_init = 1;
1877 printk(KERN_INFO "MCA related initialization done\n");
1880 /*
1881 * ia64_mca_late_init
1883 * Opportunity to setup things that require initialization later
1884 * than ia64_mca_init. Setup a timer to poll for CPEs if the
1885 * platform doesn't support an interrupt driven mechanism.
1887 * Inputs : None
1888 * Outputs : Status
1889 */
1890 static int __init
1891 ia64_mca_late_init(void)
1893 if (!mca_init)
1894 return 0;
1896 /* Setup the CMCI/P vector and handler */
1897 #ifndef XEN
1898 init_timer(&cmc_poll_timer);
1899 cmc_poll_timer.function = ia64_mca_cmc_poll;
1900 #else
1901 init_timer(&cmc_poll_timer, ia64_mca_cmc_poll,
1902 NULL, smp_processor_id());
1903 #endif
1905 /* Unmask/enable the vector */
1906 cmc_polling_enabled = 0;
1907 #ifndef XEN /* XXX FIXME */
1908 schedule_work(&cmc_enable_work);
1909 #else
1910 cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);
1911 #endif
1913 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
1915 #ifdef CONFIG_ACPI
1916 /* Setup the CPEI/P vector and handler */
1917 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
1918 #ifndef XEN
1919 init_timer(&cpe_poll_timer);
1920 cpe_poll_timer.function = ia64_mca_cpe_poll;
1921 #else
1922 init_timer(&cpe_poll_timer, ia64_mca_cpe_poll,
1923 NULL,smp_processor_id());
1924 #endif
1927 irq_desc_t *desc;
1928 #ifndef XEN
1929 unsigned int irq;
1930 #endif
1932 if (cpe_vector >= 0) {
1933 /* If platform supports CPEI, enable the irq. */
1934 cpe_poll_enabled = 0;
1935 #ifndef XEN
1936 for (irq = 0; irq < NR_IRQS; ++irq)
1937 if (irq_to_vector(irq) == cpe_vector) {
1938 desc = irq_descp(irq);
1939 desc->status |= IRQ_PER_CPU;
1940 setup_vector(irq, &mca_cpe_irqaction);
1942 #else
1943 desc = irq_descp(cpe_vector);
1944 desc->status |= IRQ_PER_CPU;
1945 setup_vector(cpe_vector, &mca_cpe_irqaction);
1946 #endif
1947 ia64_mca_register_cpev(cpe_vector);
1948 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
1949 } else {
1950 /* If platform doesn't support CPEI, get the timer going. */
1951 if (cpe_poll_enabled) {
1952 ia64_mca_cpe_poll(0UL);
1953 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
1957 #endif
1959 return 0;
1962 #ifndef XEN
1963 device_initcall(ia64_mca_late_init);
1964 #else
1965 __initcall(ia64_mca_late_init);
1966 #endif