ia64/xen-unstable

view xen/arch/ia64/linux-xen/mca.c @ 16788:37a3e770a85c

[IA64] domheap: Allocate mca related objects from domheap instead of xenheap

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 2796311c6a55
children af992824b5cf 9e3be0660c1e
line source
1 /*
2 * File: mca.c
3 * Purpose: Generic MCA handling layer
4 *
5 * Updated for latest kernel
6 * Copyright (C) 2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * Copyright (C) 2002 Dell Inc.
10 * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
11 *
12 * Copyright (C) 2002 Intel
13 * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
14 *
15 * Copyright (C) 2001 Intel
16 * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
17 *
18 * Copyright (C) 2000 Intel
19 * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
20 *
21 * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
22 * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
23 *
24 * 03/04/15 D. Mosberger Added INIT backtrace support.
25 * 02/03/25 M. Domsch GUID cleanups
26 *
27 * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
28 * error flag, set SAL default return values, changed
29 * error record structure to linked list, added init call
30 * to sal_get_state_info_size().
31 *
32 * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected
33 * platform errors, completed code for logging of
34 * corrected & uncorrected machine check errors, and
35 * updated for conformance with Nov. 2000 revision of the
36 * SAL 3.0 spec.
37 * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
38 * added min save state dump, added INIT handler.
39 *
40 * 2003-12-08 Keith Owens <kaos@sgi.com>
41 * smp_call_function() must not be called from interrupt context (can
42 * deadlock on tasklist_lock). Use keventd to call smp_call_function().
43 *
44 * 2004-02-01 Keith Owens <kaos@sgi.com>
45 * Avoid deadlock when using printk() for MCA and INIT records.
46 * Delete all record printing code, moved to salinfo_decode in user space.
47 * Mark variables and functions static where possible.
48 * Delete dead variables and functions.
49 * Reorder to remove the need for forward declarations and to consolidate
50 * related code.
51 */
52 #include <linux/config.h>
53 #include <linux/types.h>
54 #include <linux/init.h>
55 #include <linux/sched.h>
56 #include <linux/interrupt.h>
57 #include <linux/irq.h>
58 #include <linux/kallsyms.h>
59 #include <linux/smp_lock.h>
60 #include <linux/bootmem.h>
61 #include <linux/acpi.h>
62 #include <linux/timer.h>
63 #include <linux/module.h>
64 #include <linux/kernel.h>
65 #include <linux/smp.h>
66 #include <linux/workqueue.h>
68 #include <asm/delay.h>
69 #include <asm/machvec.h>
70 #include <asm/meminit.h>
71 #include <asm/page.h>
72 #include <asm/ptrace.h>
73 #include <asm/system.h>
74 #include <asm/sal.h>
75 #include <asm/mca.h>
77 #include <asm/irq.h>
78 #include <asm/hw_irq.h>
80 #ifdef XEN
81 #include <xen/symbols.h>
82 #include <xen/mm.h>
83 #include <xen/console.h>
84 #include <xen/event.h>
85 #include <xen/softirq.h>
86 #include <asm/xenmca.h>
87 #include <linux/shutdown.h>
88 #endif
90 #if defined(IA64_MCA_DEBUG_INFO)
91 # define IA64_MCA_DEBUG(fmt...) printk(fmt)
92 #else
93 # define IA64_MCA_DEBUG(fmt...)
94 #endif
96 /* Used by mca_asm.S */
97 #ifndef XEN
98 ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state;
99 #else
100 ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state[NR_CPUS];
101 DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr);
102 #endif
103 ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state;
104 u64 ia64_mca_serialize;
105 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
106 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
107 DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
108 DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
110 unsigned long __per_cpu_mca[NR_CPUS];
112 /* In mca_asm.S */
113 extern void ia64_monarch_init_handler (void);
114 extern void ia64_slave_init_handler (void);
115 #ifdef XEN
116 extern void setup_vector (unsigned int vec, struct irqaction *action);
117 #define setup_irq(irq, action) setup_vector(irq, action)
118 #endif
120 static ia64_mc_info_t ia64_mc_info;
122 #ifdef XEN
123 #define jiffies NOW()
124 #undef HZ
125 #define HZ 1000000000UL
126 #endif
128 #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
129 #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
130 #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
131 #define CPE_HISTORY_LENGTH 5
132 #define CMC_HISTORY_LENGTH 5
134 #ifndef XEN
135 static struct timer_list cpe_poll_timer;
136 static struct timer_list cmc_poll_timer;
137 #else
138 #define mod_timer(timer, expires) set_timer(timer, expires)
139 static struct timer cpe_poll_timer;
140 static struct timer cmc_poll_timer;
141 #endif
142 /*
143 * This variable tells whether we are currently in polling mode.
144 * Start with this in the wrong state so we won't play w/ timers
145 * before the system is ready.
146 */
147 static int cmc_polling_enabled = 1;
149 /*
150 * Clearing this variable prevents CPE polling from getting activated
151 * in mca_late_init. Use it if your system doesn't provide a CPEI,
152 * but encounters problems retrieving CPE logs. This should only be
153 * necessary for debugging.
154 */
155 static int cpe_poll_enabled = 1;
157 extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
159 static int mca_init;
161 /*
162 * IA64_MCA log support
163 */
164 #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
165 #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
167 typedef struct ia64_state_log_s
168 {
169 spinlock_t isl_lock;
170 int isl_index;
171 unsigned long isl_count;
172 ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
173 } ia64_state_log_t;
175 static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
177 #ifndef XEN
178 #define IA64_LOG_ALLOCATE(it, size) \
179 {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
180 (ia64_err_rec_t *)alloc_bootmem(size); \
181 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
182 (ia64_err_rec_t *)alloc_bootmem(size);}
183 #else
184 #define IA64_LOG_ALLOCATE(it, size) \
185 do { \
186 unsigned int pageorder; \
187 struct page_info *page; \
188 pageorder = get_order_from_bytes(size); \
189 page = alloc_domheap_pages(NULL, pageorder, 0); \
190 ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
191 page? (ia64_err_rec_t *)page_to_virt(page): NULL; \
192 page = alloc_domheap_pages(NULL, pageorder, 0); \
193 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
194 page? (ia64_err_rec_t *)page_to_virt(page): NULL; \
195 } while(0)
196 #endif
198 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
199 #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
200 #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
201 #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
202 #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
203 #define IA64_LOG_INDEX_INC(it) \
204 {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
205 ia64_state_log[it].isl_count++;}
206 #define IA64_LOG_INDEX_DEC(it) \
207 ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
208 #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
209 #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
210 #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
212 #ifdef XEN
213 struct list_head *sal_queue, sal_log_queues[IA64_MAX_LOG_TYPES];
214 sal_log_record_header_t *sal_record;
215 DEFINE_SPINLOCK(sal_queue_lock);
216 #endif
218 /*
219 * ia64_log_init
220 * Reset the OS ia64 log buffer
221 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
222 * Outputs : None
223 */
224 static void
225 ia64_log_init(int sal_info_type)
226 {
227 u64 max_size = 0;
229 IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
230 IA64_LOG_LOCK_INIT(sal_info_type);
232 // SAL will tell us the maximum size of any error record of this type
233 max_size = ia64_sal_get_state_info_size(sal_info_type);
234 if (!max_size)
235 /* alloc_bootmem() doesn't like zero-sized allocations! */
236 return;
238 // set up OS data structures to hold error info
239 IA64_LOG_ALLOCATE(sal_info_type, max_size);
240 memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
241 memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
243 #ifdef XEN
244 if (sal_record == NULL) {
245 unsigned int pageorder;
246 struct page_info *page;
247 pageorder = get_order_from_bytes(max_size);
248 page = alloc_domheap_pages(NULL, pageorder, 0);
249 BUG_ON(page == NULL);
250 sal_record = (sal_log_record_header_t *)page_to_virt(page);
251 BUG_ON(sal_record == NULL);
252 }
253 #endif
254 }
256 #ifndef XEN
257 /*
258 * ia64_log_get
259 *
260 * Get the current MCA log from SAL and copy it into the OS log buffer.
261 *
262 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
263 * irq_safe whether you can use printk at this point
264 * Outputs : size (total record length)
265 * *buffer (ptr to error record)
266 *
267 */
268 static u64
269 ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
270 {
271 sal_log_record_header_t *log_buffer;
272 u64 total_len = 0;
273 int s;
275 IA64_LOG_LOCK(sal_info_type);
277 /* Get the process state information */
278 log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
280 total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
282 if (total_len) {
283 IA64_LOG_INDEX_INC(sal_info_type);
284 IA64_LOG_UNLOCK(sal_info_type);
285 if (irq_safe) {
286 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
287 "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
288 }
289 *buffer = (u8 *) log_buffer;
290 return total_len;
291 } else {
292 IA64_LOG_UNLOCK(sal_info_type);
293 return 0;
294 }
295 }
297 /*
298 * ia64_mca_log_sal_error_record
299 *
300 * This function retrieves a specified error record type from SAL
301 * and wakes up any processes waiting for error records.
302 *
303 * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT)
304 */
305 static void
306 ia64_mca_log_sal_error_record(int sal_info_type)
307 {
308 u8 *buffer;
309 sal_log_record_header_t *rh;
310 u64 size;
311 int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
312 #ifdef IA64_MCA_DEBUG_INFO
313 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
314 #endif
316 size = ia64_log_get(sal_info_type, &buffer, irq_safe);
317 if (!size)
318 return;
320 salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
322 if (irq_safe)
323 IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
324 smp_processor_id(),
325 sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
327 /* Clear logs from corrected errors in case there's no user-level logger */
328 rh = (sal_log_record_header_t *)buffer;
329 if (rh->severity == sal_log_severity_corrected)
330 ia64_sal_clear_state_info(sal_info_type);
331 }
332 #else /* !XEN */
333 /*
334 * ia64_log_queue
335 *
336 * Get the current MCA log from SAL and copy it into the OS log buffer.
337 *
338 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
339 * Outputs : size (total record length)
340 * *buffer (ptr to error record)
341 *
342 */
343 static u64
344 ia64_log_queue(int sal_info_type, int virq)
345 {
346 sal_log_record_header_t *log_buffer;
347 u64 total_len = 0;
348 int s;
349 sal_queue_entry_t *e;
350 unsigned long flags;
352 IA64_LOG_LOCK(sal_info_type);
354 /* Get the process state information */
355 log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
357 total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
359 if (total_len) {
360 int queue_type;
362 spin_lock_irqsave(&sal_queue_lock, flags);
364 if (sal_info_type == SAL_INFO_TYPE_MCA && virq == VIRQ_MCA_CMC)
365 queue_type = SAL_INFO_TYPE_CMC;
366 else
367 queue_type = sal_info_type;
369 e = xmalloc(sal_queue_entry_t);
370 BUG_ON(e == NULL);
371 e->cpuid = smp_processor_id();
372 e->sal_info_type = sal_info_type;
373 e->vector = IA64_CMC_VECTOR;
374 e->virq = virq;
375 e->length = total_len;
377 list_add_tail(&e->list, &sal_queue[queue_type]);
378 spin_unlock_irqrestore(&sal_queue_lock, flags);
380 IA64_LOG_INDEX_INC(sal_info_type);
381 IA64_LOG_UNLOCK(sal_info_type);
382 if (sal_info_type != SAL_INFO_TYPE_MCA &&
383 sal_info_type != SAL_INFO_TYPE_INIT) {
384 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
385 "Record length = %ld\n", __FUNCTION__,
386 sal_info_type, total_len);
387 }
388 return total_len;
389 } else {
390 IA64_LOG_UNLOCK(sal_info_type);
391 return 0;
392 }
393 }
394 #endif /* !XEN */
396 /*
397 * platform dependent error handling
398 */
399 #ifndef PLATFORM_MCA_HANDLERS
401 #ifdef CONFIG_ACPI
403 #ifdef XEN
404 /**
405 * Copy from linux/kernel/irq/manage.c
406 *
407 * disable_irq_nosync - disable an irq without waiting
408 * @irq: Interrupt to disable
409 *
410 * Disable the selected interrupt line. Disables and Enables are
411 * nested.
412 * Unlike disable_irq(), this function does not ensure existing
413 * instances of the IRQ handler have completed before returning.
414 *
415 * This function may be called from IRQ context.
416 */
417 void disable_irq_nosync(unsigned int irq)
418 {
419 irq_desc_t *desc = irq_desc + irq;
420 unsigned long flags;
422 if (irq >= NR_IRQS)
423 return;
425 spin_lock_irqsave(&desc->lock, flags);
426 if (!desc->depth++) {
427 desc->status |= IRQ_DISABLED;
428 desc->handler->disable(irq);
429 }
430 spin_unlock_irqrestore(&desc->lock, flags);
431 }
433 /**
434 * Copy from linux/kernel/irq/manage.c
435 *
436 * enable_irq - enable handling of an irq
437 * @irq: Interrupt to enable
438 *
439 * Undoes the effect of one call to disable_irq(). If this
440 * matches the last disable, processing of interrupts on this
441 * IRQ line is re-enabled.
442 *
443 * This function may be called from IRQ context.
444 */
445 void enable_irq(unsigned int irq)
446 {
447 irq_desc_t *desc = irq_desc + irq;
448 unsigned long flags;
450 if (irq >= NR_IRQS)
451 return;
453 spin_lock_irqsave(&desc->lock, flags);
454 switch (desc->depth) {
455 case 0:
456 WARN_ON(1);
457 break;
458 case 1: {
459 unsigned int status = desc->status & ~IRQ_DISABLED;
461 desc->status = status;
462 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
463 desc->status = status | IRQ_REPLAY;
464 hw_resend_irq(desc->handler,irq);
465 }
466 desc->handler->enable(irq);
467 /* fall-through */
468 }
469 default:
470 desc->depth--;
471 }
472 spin_unlock_irqrestore(&desc->lock, flags);
473 }
474 #endif /* XEN */
476 int cpe_vector = -1;
478 static irqreturn_t
479 ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
480 {
481 static unsigned long cpe_history[CPE_HISTORY_LENGTH];
482 static int index;
483 static DEFINE_SPINLOCK(cpe_history_lock);
485 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
486 __FUNCTION__, cpe_irq, smp_processor_id());
488 /* SAL spec states this should run w/ interrupts enabled */
489 local_irq_enable();
491 #ifndef XEN
492 /* Get the CPE error record and log it */
493 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
494 #else
495 ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
496 /* CPE error does not inform to dom0 but the following codes are
497 reserved for future implementation */
498 /* send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
499 #endif
501 spin_lock(&cpe_history_lock);
502 if (!cpe_poll_enabled && cpe_vector >= 0) {
504 int i, count = 1; /* we know 1 happened now */
505 unsigned long now = jiffies;
507 for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
508 if (now - cpe_history[i] <= HZ)
509 count++;
510 }
512 IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
513 if (count >= CPE_HISTORY_LENGTH) {
515 cpe_poll_enabled = 1;
516 spin_unlock(&cpe_history_lock);
517 disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
519 /*
520 * Corrected errors will still be corrected, but
521 * make sure there's a log somewhere that indicates
522 * something is generating more than we can handle.
523 */
524 printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
526 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
528 /* lock already released, get out now */
529 return IRQ_HANDLED;
530 } else {
531 cpe_history[index++] = now;
532 if (index == CPE_HISTORY_LENGTH)
533 index = 0;
534 }
535 }
536 spin_unlock(&cpe_history_lock);
537 return IRQ_HANDLED;
538 }
540 #endif /* CONFIG_ACPI */
542 static void
543 show_min_state (pal_min_state_area_t *minstate)
544 {
545 u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
546 u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
548 printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
549 printk("pr\t\t%016lx\n", minstate->pmsa_pr);
550 printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
551 printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
552 printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
553 printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
554 printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
555 printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
556 printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
557 printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
558 printk("b1\t\t%016lx ", minstate->pmsa_br1);
559 print_symbol("%s\n", minstate->pmsa_br1);
561 printk("\nstatic registers r0-r15:\n");
562 printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
563 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
564 printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
565 minstate->pmsa_gr[3], minstate->pmsa_gr[4],
566 minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
567 printk(" r8-11 %016lx %016lx %016lx %016lx\n",
568 minstate->pmsa_gr[7], minstate->pmsa_gr[8],
569 minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
570 printk("r12-15 %016lx %016lx %016lx %016lx\n",
571 minstate->pmsa_gr[11], minstate->pmsa_gr[12],
572 minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
574 printk("\nbank 0:\n");
575 printk("r16-19 %016lx %016lx %016lx %016lx\n",
576 minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
577 minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
578 printk("r20-23 %016lx %016lx %016lx %016lx\n",
579 minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
580 minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
581 printk("r24-27 %016lx %016lx %016lx %016lx\n",
582 minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
583 minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
584 printk("r28-31 %016lx %016lx %016lx %016lx\n",
585 minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
586 minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
588 printk("\nbank 1:\n");
589 printk("r16-19 %016lx %016lx %016lx %016lx\n",
590 minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
591 minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
592 printk("r20-23 %016lx %016lx %016lx %016lx\n",
593 minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
594 minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
595 printk("r24-27 %016lx %016lx %016lx %016lx\n",
596 minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
597 minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
598 printk("r28-31 %016lx %016lx %016lx %016lx\n",
599 minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
600 minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
601 }
603 static void
604 fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
605 {
606 u64 *dst_banked, *src_banked, bit, shift, nat_bits;
607 int i;
609 /*
610 * First, update the pt-regs and switch-stack structures with the contents stored
611 * in the min-state area:
612 */
613 if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
614 pt->cr_ipsr = ms->pmsa_xpsr;
615 pt->cr_iip = ms->pmsa_xip;
616 pt->cr_ifs = ms->pmsa_xfs;
617 } else {
618 pt->cr_ipsr = ms->pmsa_ipsr;
619 pt->cr_iip = ms->pmsa_iip;
620 pt->cr_ifs = ms->pmsa_ifs;
621 }
622 pt->ar_rsc = ms->pmsa_rsc;
623 pt->pr = ms->pmsa_pr;
624 pt->r1 = ms->pmsa_gr[0];
625 pt->r2 = ms->pmsa_gr[1];
626 pt->r3 = ms->pmsa_gr[2];
627 sw->r4 = ms->pmsa_gr[3];
628 sw->r5 = ms->pmsa_gr[4];
629 sw->r6 = ms->pmsa_gr[5];
630 sw->r7 = ms->pmsa_gr[6];
631 pt->r8 = ms->pmsa_gr[7];
632 pt->r9 = ms->pmsa_gr[8];
633 pt->r10 = ms->pmsa_gr[9];
634 pt->r11 = ms->pmsa_gr[10];
635 pt->r12 = ms->pmsa_gr[11];
636 pt->r13 = ms->pmsa_gr[12];
637 pt->r14 = ms->pmsa_gr[13];
638 pt->r15 = ms->pmsa_gr[14];
639 dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */
640 src_banked = ms->pmsa_bank1_gr;
641 for (i = 0; i < 16; ++i)
642 dst_banked[i] = src_banked[i];
643 pt->b0 = ms->pmsa_br0;
644 sw->b1 = ms->pmsa_br1;
646 /* construct the NaT bits for the pt-regs structure: */
647 # define PUT_NAT_BIT(dst, addr) \
648 do { \
649 bit = nat_bits & 1; nat_bits >>= 1; \
650 shift = ((unsigned long) addr >> 3) & 0x3f; \
651 dst = ((dst) & ~(1UL << shift)) | (bit << shift); \
652 } while (0)
654 /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
655 shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
656 nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
658 PUT_NAT_BIT(sw->caller_unat, &pt->r1);
659 PUT_NAT_BIT(sw->caller_unat, &pt->r2);
660 PUT_NAT_BIT(sw->caller_unat, &pt->r3);
661 PUT_NAT_BIT(sw->ar_unat, &sw->r4);
662 PUT_NAT_BIT(sw->ar_unat, &sw->r5);
663 PUT_NAT_BIT(sw->ar_unat, &sw->r6);
664 PUT_NAT_BIT(sw->ar_unat, &sw->r7);
665 PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9);
666 PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11);
667 PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13);
668 PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15);
669 nat_bits >>= 16; /* skip over bank0 NaT bits */
670 PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17);
671 PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19);
672 PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21);
673 PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23);
674 PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25);
675 PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27);
676 PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29);
677 PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31);
678 }
680 #ifdef XEN
681 static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
682 static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
683 static atomic_t num_stopped_cpus = ATOMIC_INIT(0);
684 extern void show_stack (struct task_struct *, unsigned long *);
686 #define CPU_FLUSH_RETRY_MAX 5
687 static void
688 init_cache_flush (void)
689 {
690 unsigned long flags;
691 int i;
692 s64 rval = 0;
693 u64 vector, progress = 0;
695 for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
696 local_irq_save(flags);
697 rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
698 0, &progress, &vector);
699 local_irq_restore(flags);
700 if (rval == 0){
701 printk("\nPAL cache flush success\n");
702 return;
703 }
704 }
705 printk("\nPAL cache flush failed. status=%ld\n",rval);
706 }
708 static void inline
709 save_ksp (struct unw_frame_info *info)
710 {
711 current->arch._thread.ksp = (__u64)(info->sw) - 16;
712 wmb();
713 init_cache_flush();
714 }
716 static void
717 freeze_cpu_osinit (struct unw_frame_info *info, void *arg)
718 {
719 save_ksp(info);
720 atomic_inc(&num_stopped_cpus);
721 printk("%s: CPU%d init handler done\n",
722 __FUNCTION__, smp_processor_id());
723 for (;;)
724 local_irq_disable();
725 }
727 /* FIXME */
728 static void
729 try_crashdump(struct unw_frame_info *info, void *arg)
730 {
731 save_ksp(info);
732 printk("\nINIT dump complete. Please reboot now.\n");
733 for (;;)
734 local_irq_disable();
735 }
736 #endif /* XEN */
738 static void
739 init_handler_platform (pal_min_state_area_t *ms,
740 struct pt_regs *pt, struct switch_stack *sw)
741 {
742 struct unw_frame_info info;
744 /* if a kernel debugger is available call it here else just dump the registers */
746 /*
747 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
748 * generated via the BMC's command-line interface, but since the console is on the
749 * same serial line, the user will need some time to switch out of the BMC before
750 * the dump begins.
751 */
752 printk("Delaying for 5 seconds...\n");
753 udelay(5*1000000);
754 #ifdef XEN
755 fetch_min_state(ms, pt, sw);
756 spin_lock(&show_stack_lock);
757 #endif
758 show_min_state(ms);
760 #ifdef XEN
761 printk("Backtrace of current vcpu (vcpu_id %d of domid %d)\n",
762 current->vcpu_id, current->domain->domain_id);
763 #else
764 printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
765 fetch_min_state(ms, pt, sw);
766 #endif
767 unw_init_from_interruption(&info, current, pt, sw);
768 ia64_do_show_stack(&info, NULL);
769 #ifdef XEN
770 spin_unlock(&show_stack_lock);
772 if (spin_trylock(&init_dump_lock)) {
773 struct domain *d;
774 struct vcpu *v;
775 #ifdef CONFIG_SMP
776 int other_cpus = num_online_cpus() - 1;
777 int wait = 1000 * other_cpus;
779 while ((atomic_read(&num_stopped_cpus) != other_cpus) && wait--)
780 udelay(1000);
781 if (other_cpus && wait < 0)
782 printk("timeout %d\n", atomic_read(&num_stopped_cpus));
783 #endif
784 if (opt_noreboot) {
785 /* this route is for dump routine */
786 unw_init_running(try_crashdump, pt);
787 } else {
788 rcu_read_lock(&domlist_read_lock);
789 for_each_domain(d) {
790 for_each_vcpu(d, v) {
791 printk("Backtrace of current vcpu "
792 "(vcpu_id %d of domid %d)\n",
793 v->vcpu_id, d->domain_id);
794 show_stack(v, NULL);
795 }
796 }
797 rcu_read_unlock(&domlist_read_lock);
798 }
799 }
800 unw_init_running(freeze_cpu_osinit, NULL);
801 #else /* XEN */
802 #ifdef CONFIG_SMP
803 /* read_trylock() would be handy... */
804 if (!tasklist_lock.write_lock)
805 read_lock(&tasklist_lock);
806 #endif
807 {
808 struct task_struct *g, *t;
809 do_each_thread (g, t) {
810 if (t == current)
811 continue;
813 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
814 show_stack(t, NULL);
815 } while_each_thread (g, t);
816 }
817 #ifdef CONFIG_SMP
818 if (!tasklist_lock.write_lock)
819 read_unlock(&tasklist_lock);
820 #endif
822 printk("\nINIT dump complete. Please reboot now.\n");
823 #endif /* XEN */
824 while (1); /* hang city if no debugger */
825 }
827 #ifdef CONFIG_ACPI
828 /*
829 * ia64_mca_register_cpev
830 *
831 * Register the corrected platform error vector with SAL.
832 *
833 * Inputs
834 * cpev Corrected Platform Error Vector number
835 *
836 * Outputs
837 * None
838 */
839 static void
840 ia64_mca_register_cpev (int cpev)
841 {
842 /* Register the CPE interrupt vector with SAL */
843 struct ia64_sal_retval isrv;
845 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
846 if (isrv.status) {
847 printk(KERN_ERR "Failed to register Corrected Platform "
848 "Error interrupt vector with SAL (status %ld)\n", isrv.status);
849 return;
850 }
852 IA64_MCA_DEBUG("%s: corrected platform error "
853 "vector %#x registered\n", __FUNCTION__, cpev);
854 }
855 #endif /* CONFIG_ACPI */
857 #endif /* PLATFORM_MCA_HANDLERS */
859 /*
860 * ia64_mca_cmc_vector_setup
861 *
862 * Setup the corrected machine check vector register in the processor.
863 * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
864 * This function is invoked on a per-processor basis.
865 *
866 * Inputs
867 * None
868 *
869 * Outputs
870 * None
871 */
872 void
873 ia64_mca_cmc_vector_setup (void)
874 {
875 cmcv_reg_t cmcv;
877 cmcv.cmcv_regval = 0;
878 cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
879 cmcv.cmcv_vector = IA64_CMC_VECTOR;
880 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
882 IA64_MCA_DEBUG("%s: CPU %d corrected "
883 "machine check vector %#x registered.\n",
884 __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
886 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
887 __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
888 }
890 /*
891 * ia64_mca_cmc_vector_disable
892 *
893 * Mask the corrected machine check vector register in the processor.
894 * This function is invoked on a per-processor basis.
895 *
896 * Inputs
897 * dummy(unused)
898 *
899 * Outputs
900 * None
901 */
902 static void
903 ia64_mca_cmc_vector_disable (void *dummy)
904 {
905 cmcv_reg_t cmcv;
907 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
909 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
910 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
912 IA64_MCA_DEBUG("%s: CPU %d corrected "
913 "machine check vector %#x disabled.\n",
914 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
915 }
917 /*
918 * ia64_mca_cmc_vector_enable
919 *
920 * Unmask the corrected machine check vector register in the processor.
921 * This function is invoked on a per-processor basis.
922 *
923 * Inputs
924 * dummy(unused)
925 *
926 * Outputs
927 * None
928 */
929 static void
930 ia64_mca_cmc_vector_enable (void *dummy)
931 {
932 cmcv_reg_t cmcv;
934 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
936 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
937 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
939 IA64_MCA_DEBUG("%s: CPU %d corrected "
940 "machine check vector %#x enabled.\n",
941 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
942 }
944 #ifndef XEN
945 /*
946 * ia64_mca_cmc_vector_disable_keventd
947 *
948 * Called via keventd (smp_call_function() is not safe in interrupt context) to
949 * disable the cmc interrupt vector.
950 */
951 static void
952 ia64_mca_cmc_vector_disable_keventd(void *unused)
953 {
954 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
955 }
957 /*
958 * ia64_mca_cmc_vector_enable_keventd
959 *
960 * Called via keventd (smp_call_function() is not safe in interrupt context) to
961 * enable the cmc interrupt vector.
962 */
963 static void
964 ia64_mca_cmc_vector_enable_keventd(void *unused)
965 {
966 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
967 }
968 #endif /* !XEN */
970 /*
971 * ia64_mca_wakeup_ipi_wait
972 *
973 * Wait for the inter-cpu interrupt to be sent by the
974 * monarch processor once it is done with handling the
975 * MCA.
976 *
977 * Inputs : None
978 * Outputs : None
979 */
980 static void
981 ia64_mca_wakeup_ipi_wait(void)
982 {
983 int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
984 int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
985 u64 irr = 0;
987 do {
988 switch(irr_num) {
989 case 0:
990 irr = ia64_getreg(_IA64_REG_CR_IRR0);
991 break;
992 case 1:
993 irr = ia64_getreg(_IA64_REG_CR_IRR1);
994 break;
995 case 2:
996 irr = ia64_getreg(_IA64_REG_CR_IRR2);
997 break;
998 case 3:
999 irr = ia64_getreg(_IA64_REG_CR_IRR3);
1000 break;
1002 cpu_relax();
1003 } while (!(irr & (1UL << irr_bit))) ;
1006 /*
1007 * ia64_mca_wakeup
1009 * Send an inter-cpu interrupt to wake-up a particular cpu
1010 * and mark that cpu to be out of rendez.
1012 * Inputs : cpuid
1013 * Outputs : None
1014 */
1015 static void
1016 ia64_mca_wakeup(int cpu)
1018 platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
1019 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1023 /*
1024 * ia64_mca_wakeup_all
1026 * Wakeup all the cpus which have rendez'ed previously.
1028 * Inputs : None
1029 * Outputs : None
1030 */
1031 static void
1032 ia64_mca_wakeup_all(void)
1034 int cpu;
1036 /* Clear the Rendez checkin flag for all cpus */
1037 for(cpu = 0; cpu < NR_CPUS; cpu++) {
1038 if (!cpu_online(cpu))
1039 continue;
1040 if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
1041 ia64_mca_wakeup(cpu);
1046 /*
1047 * ia64_mca_rendez_interrupt_handler
1049 * This is handler used to put slave processors into spinloop
1050 * while the monarch processor does the mca handling and later
1051 * wake each slave up once the monarch is done.
1053 * Inputs : None
1054 * Outputs : None
1055 */
1056 static irqreturn_t
1057 ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
1059 unsigned long flags;
1060 int cpu = smp_processor_id();
1062 /* Mask all interrupts */
1063 local_irq_save(flags);
1065 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
1066 /* Register with the SAL monarch that the slave has
1067 * reached SAL
1068 */
1069 ia64_sal_mc_rendez();
1071 /* Wait for the wakeup IPI from the monarch
1072 * This waiting is done by polling on the wakeup-interrupt
1073 * vector bit in the processor's IRRs
1074 */
1075 ia64_mca_wakeup_ipi_wait();
1077 /* Enable all interrupts */
1078 local_irq_restore(flags);
1079 return IRQ_HANDLED;
1082 /*
1083 * ia64_mca_wakeup_int_handler
1085 * The interrupt handler for processing the inter-cpu interrupt to the
1086 * slave cpu which was spinning in the rendez loop.
1087 * Since this spinning is done by turning off the interrupts and
1088 * polling on the wakeup-interrupt bit in the IRR, there is
1089 * nothing useful to be done in the handler.
1091 * Inputs : wakeup_irq (Wakeup-interrupt bit)
1092 * arg (Interrupt handler specific argument)
1093 * ptregs (Exception frame at the time of the interrupt)
1094 * Outputs : None
1096 */
1097 static irqreturn_t
1098 ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
1100 return IRQ_HANDLED;
1103 /*
1104 * ia64_return_to_sal_check
1106 * This is function called before going back from the OS_MCA handler
1107 * to the OS_MCA dispatch code which finally takes the control back
1108 * to the SAL.
1109 * The main purpose of this routine is to setup the OS_MCA to SAL
1110 * return state which can be used by the OS_MCA dispatch code
1111 * just before going back to SAL.
1113 * Inputs : None
1114 * Outputs : None
1115 */
1117 static void
1118 ia64_return_to_sal_check(int recover)
1120 #ifdef XEN
1121 int cpu = smp_processor_id();
1122 #endif
1124 /* Copy over some relevant stuff from the sal_to_os_mca_handoff
1125 * so that it can be used at the time of os_mca_to_sal_handoff
1126 */
1127 #ifdef XEN
1128 ia64_os_to_sal_handoff_state.imots_sal_gp =
1129 ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
1131 ia64_os_to_sal_handoff_state.imots_sal_check_ra =
1132 ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
1133 #else
1134 ia64_os_to_sal_handoff_state.imots_sal_gp =
1135 ia64_sal_to_os_handoff_state.imsto_sal_gp;
1137 ia64_os_to_sal_handoff_state.imots_sal_check_ra =
1138 ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
1139 #endif
1141 if (recover)
1142 ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
1143 else
1144 ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
1146 /* Default = tell SAL to return to same context */
1147 ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
1149 #ifdef XEN
1150 ia64_os_to_sal_handoff_state.imots_new_min_state =
1151 (u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
1152 #else
1153 ia64_os_to_sal_handoff_state.imots_new_min_state =
1154 (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
1155 #endif
1159 /* Function pointer for extra MCA recovery */
1160 int (*ia64_mca_ucmc_extension)
1161 (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
1162 = NULL;
1164 int
1165 ia64_reg_MCA_extension(void *fn)
1167 if (ia64_mca_ucmc_extension)
1168 return 1;
1170 ia64_mca_ucmc_extension = fn;
1171 return 0;
1174 void
1175 ia64_unreg_MCA_extension(void)
1177 if (ia64_mca_ucmc_extension)
1178 ia64_mca_ucmc_extension = NULL;
1181 EXPORT_SYMBOL(ia64_reg_MCA_extension);
1182 EXPORT_SYMBOL(ia64_unreg_MCA_extension);
1184 /*
1185 * ia64_mca_ucmc_handler
1187 * This is uncorrectable machine check handler called from OS_MCA
1188 * dispatch code which is in turn called from SAL_CHECK().
1189 * This is the place where the core of OS MCA handling is done.
1190 * Right now the logs are extracted and displayed in a well-defined
1191 * format. This handler code is supposed to be run only on the
1192 * monarch processor. Once the monarch is done with MCA handling
1193 * further MCA logging is enabled by clearing logs.
1194 * Monarch also has the duty of sending wakeup-IPIs to pull the
1195 * slave processors out of rendezvous spinloop.
1197 * Inputs : None
1198 * Outputs : None
1199 */
1200 void
1201 ia64_mca_ucmc_handler(void)
1203 #ifdef XEN
1204 int cpu = smp_processor_id();
1205 pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
1206 &ia64_sal_to_os_handoff_state[cpu].proc_state_param;
1207 #else
1208 pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
1209 &ia64_sal_to_os_handoff_state.proc_state_param;
1210 #endif
1211 int recover;
1213 #ifndef XEN
1214 /* Get the MCA error record and log it */
1215 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
1216 #else
1217 ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
1218 send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
1219 #endif
1221 /* TLB error is only exist in this SAL error record */
1222 recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
1223 /* other error recovery */
1224 #ifndef XEN
1225 || (ia64_mca_ucmc_extension
1226 && ia64_mca_ucmc_extension(
1227 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
1228 &ia64_sal_to_os_handoff_state,
1229 &ia64_os_to_sal_handoff_state));
1230 #else
1232 #endif
1234 #ifndef XEN
1235 if (recover) {
1236 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
1237 rh->severity = sal_log_severity_corrected;
1238 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1240 #endif
1241 /*
1242 * Wakeup all the processors which are spinning in the rendezvous
1243 * loop.
1244 */
1245 ia64_mca_wakeup_all();
1247 /* Return to SAL */
1248 ia64_return_to_sal_check(recover);
1251 #ifndef XEN
1252 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
1253 static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
1254 #endif
1256 /*
1257 * ia64_mca_cmc_int_handler
1259 * This is corrected machine check interrupt handler.
1260 * Right now the logs are extracted and displayed in a well-defined
1261 * format.
1263 * Inputs
1264 * interrupt number
1265 * client data arg ptr
1266 * saved registers ptr
1268 * Outputs
1269 * None
1270 */
1271 static irqreturn_t
1272 ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1274 static unsigned long cmc_history[CMC_HISTORY_LENGTH];
1275 static int index;
1276 static DEFINE_SPINLOCK(cmc_history_lock);
1278 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1279 __FUNCTION__, cmc_irq, smp_processor_id());
1281 /* SAL spec states this should run w/ interrupts enabled */
1282 local_irq_enable();
1284 #ifndef XEN
1285 /* Get the CMC error record and log it */
1286 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1287 #else
1288 ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
1289 send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
1290 #endif
1292 spin_lock(&cmc_history_lock);
1293 if (!cmc_polling_enabled) {
1294 int i, count = 1; /* we know 1 happened now */
1295 unsigned long now = jiffies;
1297 for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
1298 if (now - cmc_history[i] <= HZ)
1299 count++;
1302 IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
1303 if (count >= CMC_HISTORY_LENGTH) {
1305 cmc_polling_enabled = 1;
1306 spin_unlock(&cmc_history_lock);
1307 #ifndef XEN /* XXX FIXME */
1308 schedule_work(&cmc_disable_work);
1309 #else
1310 cpumask_raise_softirq(cpu_online_map,
1311 CMC_DISABLE_SOFTIRQ);
1312 #endif
1314 /*
1315 * Corrected errors will still be corrected, but
1316 * make sure there's a log somewhere that indicates
1317 * something is generating more than we can handle.
1318 */
1319 printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
1321 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1323 /* lock already released, get out now */
1324 return IRQ_HANDLED;
1325 } else {
1326 cmc_history[index++] = now;
1327 if (index == CMC_HISTORY_LENGTH)
1328 index = 0;
1331 spin_unlock(&cmc_history_lock);
1332 return IRQ_HANDLED;
1335 /*
1336 * ia64_mca_cmc_int_caller
1338 * Triggered by sw interrupt from CMC polling routine. Calls
1339 * real interrupt handler and either triggers a sw interrupt
1340 * on the next cpu or does cleanup at the end.
1342 * Inputs
1343 * interrupt number
1344 * client data arg ptr
1345 * saved registers ptr
1346 * Outputs
1347 * handled
1348 */
1349 static irqreturn_t
1350 ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
1352 static int start_count = -1;
1353 unsigned int cpuid;
1355 cpuid = smp_processor_id();
1357 /* If first cpu, update count */
1358 if (start_count == -1)
1359 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
1361 #ifndef XEN
1362 ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
1363 #else
1364 IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
1365 __FUNCTION__, cmc_irq, smp_processor_id());
1366 ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
1367 #endif
1369 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
1371 if (cpuid < NR_CPUS) {
1372 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1373 } else {
1374 /* If no log record, switch out of polling mode */
1375 if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
1377 printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
1378 #ifndef XEN /* XXX FIXME */
1379 schedule_work(&cmc_enable_work);
1380 #else
1381 cpumask_raise_softirq(cpu_online_map,
1382 CMC_ENABLE_SOFTIRQ);
1383 #endif
1384 cmc_polling_enabled = 0;
1386 } else {
1388 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1391 start_count = -1;
1393 return IRQ_HANDLED;
1396 /*
1397 * ia64_mca_cmc_poll
1399 * Poll for Corrected Machine Checks (CMCs)
1401 * Inputs : dummy(unused)
1402 * Outputs : None
1404 */
1405 static void
1406 #ifndef XEN
1407 ia64_mca_cmc_poll (unsigned long dummy)
1408 #else
1409 ia64_mca_cmc_poll (void *dummy)
1410 #endif
1412 /* Trigger a CMC interrupt cascade */
1413 platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1416 /*
1417 * ia64_mca_cpe_int_caller
1419 * Triggered by sw interrupt from CPE polling routine. Calls
1420 * real interrupt handler and either triggers a sw interrupt
1421 * on the next cpu or does cleanup at the end.
1423 * Inputs
1424 * interrupt number
1425 * client data arg ptr
1426 * saved registers ptr
1427 * Outputs
1428 * handled
1429 */
1430 #ifdef CONFIG_ACPI
1432 static irqreturn_t
1433 ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
1435 static int start_count = -1;
1436 #ifdef XEN
1437 static unsigned long poll_time = MIN_CPE_POLL_INTERVAL;
1438 #else
1439 static int poll_time = MIN_CPE_POLL_INTERVAL;
1440 #endif
1441 unsigned int cpuid;
1443 cpuid = smp_processor_id();
1445 /* If first cpu, update count */
1446 if (start_count == -1)
1447 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
1449 #ifndef XEN
1450 ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
1451 #else
1452 IA64_MCA_DEBUG("%s: received polling vector = %#x on CPU %d\n",
1453 __FUNCTION__, cpe_irq, smp_processor_id());
1454 ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE);
1455 #endif
1457 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
1459 if (cpuid < NR_CPUS) {
1460 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1461 } else {
1462 /*
1463 * If a log was recorded, increase our polling frequency,
1464 * otherwise, backoff or return to interrupt mode.
1465 */
1466 if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
1467 poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
1468 } else if (cpe_vector < 0) {
1469 poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
1470 } else {
1471 poll_time = MIN_CPE_POLL_INTERVAL;
1473 printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
1474 enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
1475 cpe_poll_enabled = 0;
1478 if (cpe_poll_enabled)
1479 mod_timer(&cpe_poll_timer, jiffies + poll_time);
1480 start_count = -1;
1482 return IRQ_HANDLED;
1485 /*
1486 * ia64_mca_cpe_poll
1488 * Poll for Corrected Platform Errors (CPEs), trigger interrupt
1489 * on first cpu, from there it will trickle through all the cpus.
1491 * Inputs : dummy(unused)
1492 * Outputs : None
1494 */
1495 static void
1496 #ifndef XEN
1497 ia64_mca_cpe_poll (unsigned long dummy)
1498 #else
1499 ia64_mca_cpe_poll (void *dummy)
1500 #endif
1502 /* Trigger a CPE interrupt cascade */
1503 platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1506 #endif /* CONFIG_ACPI */
1508 /*
1509 * C portion of the OS INIT handler
1511 * Called from ia64_monarch_init_handler
1513 * Inputs: pointer to pt_regs where processor info was saved.
1515 * Returns:
1516 * 0 if SAL must warm boot the System
1517 * 1 if SAL must return to interrupted context using PAL_MC_RESUME
1519 */
1520 void
1521 ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
1523 pal_min_state_area_t *ms;
1525 #ifndef XEN
1526 oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */
1527 console_loglevel = 15; /* make sure printks make it to console */
1529 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
1530 ia64_sal_to_os_handoff_state.proc_state_param);
1532 /*
1533 * Address of minstate area provided by PAL is physical,
1534 * uncacheable (bit 63 set). Convert to Linux virtual
1535 * address in region 6.
1536 */
1537 ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
1538 #else
1539 int cpu = smp_processor_id();
1541 console_start_sync();
1542 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
1543 ia64_sal_to_os_handoff_state[cpu].proc_state_param);
1545 /* Xen virtual address in region 7. */
1546 ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
1547 #endif
1549 init_handler_platform(ms, pt, sw); /* call platform specific routines */
1552 static int __init
1553 ia64_mca_disable_cpe_polling(char *str)
1555 cpe_poll_enabled = 0;
1556 return 1;
1559 __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
1561 static struct irqaction cmci_irqaction = {
1562 .handler = ia64_mca_cmc_int_handler,
1563 #ifndef XEN
1564 .flags = SA_INTERRUPT,
1565 #endif
1566 .name = "cmc_hndlr"
1567 };
1569 static struct irqaction cmcp_irqaction = {
1570 .handler = ia64_mca_cmc_int_caller,
1571 #ifndef XEN
1572 .flags = SA_INTERRUPT,
1573 #endif
1574 .name = "cmc_poll"
1575 };
1577 static struct irqaction mca_rdzv_irqaction = {
1578 .handler = ia64_mca_rendez_int_handler,
1579 #ifndef XEN
1580 .flags = SA_INTERRUPT,
1581 #endif
1582 .name = "mca_rdzv"
1583 };
1585 static struct irqaction mca_wkup_irqaction = {
1586 .handler = ia64_mca_wakeup_int_handler,
1587 #ifndef XEN
1588 .flags = SA_INTERRUPT,
1589 #endif
1590 .name = "mca_wkup"
1591 };
1593 #ifdef CONFIG_ACPI
1594 static struct irqaction mca_cpe_irqaction = {
1595 .handler = ia64_mca_cpe_int_handler,
1596 #ifndef XEN
1597 .flags = SA_INTERRUPT,
1598 #endif
1599 .name = "cpe_hndlr"
1600 };
1602 static struct irqaction mca_cpep_irqaction = {
1603 .handler = ia64_mca_cpe_int_caller,
1604 #ifndef XEN
1605 .flags = SA_INTERRUPT,
1606 #endif
1607 .name = "cpe_poll"
1608 };
1609 #endif /* CONFIG_ACPI */
1611 /* Do per-CPU MCA-related initialization. */
1613 void __devinit
1614 ia64_mca_cpu_init(void *cpu_data)
1616 void *pal_vaddr;
1618 if (smp_processor_id() == 0) {
1619 void *mca_data;
1620 int cpu;
1622 #ifdef XEN
1623 unsigned int pageorder;
1624 pageorder = get_order_from_bytes(sizeof(struct ia64_mca_cpu));
1625 #else
1626 mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
1627 * NR_CPUS);
1628 #endif
1629 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1630 #ifdef XEN
1631 struct page_info *page;
1632 page = alloc_domheap_pages(NULL, pageorder, 0);
1633 mca_data = page? page_to_virt(page): NULL;
1634 __per_cpu_mca[cpu] = __pa(mca_data);
1635 IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx"
1636 "(mca_data[%d]=%lx)\n",
1637 __FUNCTION__, cpu, __per_cpu_mca[cpu],
1638 cpu, (u64)mca_data);
1639 #else
1640 __per_cpu_mca[cpu] = __pa(mca_data);
1641 mca_data += sizeof(struct ia64_mca_cpu);
1642 #endif
1645 #ifdef XEN
1646 else if (sal_queue) {
1647 int i;
1648 for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
1649 ia64_log_queue(i, 0);
1651 #endif
1653 /*
1654 * The MCA info structure was allocated earlier and its
1655 * physical address saved in __per_cpu_mca[cpu]. Copy that
1656 * address * to ia64_mca_data so we can access it as a per-CPU
1657 * variable.
1658 */
1659 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
1660 #ifdef XEN
1661 IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
1662 smp_processor_id(), __get_cpu_var(ia64_mca_data));
1664 /* sal_to_os_handoff for smp support */
1665 __get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
1666 __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
1667 IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
1668 smp_processor_id(),
1669 __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
1670 #endif
1672 /*
1673 * Stash away a copy of the PTE needed to map the per-CPU page.
1674 * We may need it during MCA recovery.
1675 */
1676 __get_cpu_var(ia64_mca_per_cpu_pte) =
1677 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
1679 /*
1680 * Also, stash away a copy of the PAL address and the PTE
1681 * needed to map it.
1682 */
1683 pal_vaddr = efi_get_pal_addr();
1684 if (!pal_vaddr)
1685 return;
1686 __get_cpu_var(ia64_mca_pal_base) =
1687 GRANULEROUNDDOWN((unsigned long) pal_vaddr);
1688 __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
1689 PAGE_KERNEL));
1692 /*
1693 * ia64_mca_init
1695 * Do all the system level mca specific initialization.
1697 * 1. Register spinloop and wakeup request interrupt vectors
1699 * 2. Register OS_MCA handler entry point
1701 * 3. Register OS_INIT handler entry point
1703 * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1705 * Note that this initialization is done very early before some kernel
1706 * services are available.
1708 * Inputs : None
1710 * Outputs : None
1711 */
1712 void __init
1713 ia64_mca_init(void)
1715 ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
1716 ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
1717 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1718 int i;
1719 s64 rc;
1720 struct ia64_sal_retval isrv;
1721 u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
1723 #ifdef XEN
1724 slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
1725 #endif
1727 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
1729 /* Clear the Rendez checkin flag for all cpus */
1730 for(i = 0 ; i < NR_CPUS; i++)
1731 ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1733 /*
1734 * Register the rendezvous spinloop and wakeup mechanism with SAL
1735 */
1737 /* Register the rendezvous interrupt vector with SAL */
1738 while (1) {
1739 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
1740 SAL_MC_PARAM_MECHANISM_INT,
1741 IA64_MCA_RENDEZ_VECTOR,
1742 timeout,
1743 SAL_MC_PARAM_RZ_ALWAYS);
1744 rc = isrv.status;
1745 if (rc == 0)
1746 break;
1747 if (rc == -2) {
1748 printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1749 "%ld to %ld milliseconds\n", timeout, isrv.v0);
1750 timeout = isrv.v0;
1751 continue;
1753 printk(KERN_ERR "Failed to register rendezvous interrupt "
1754 "with SAL (status %ld)\n", rc);
1755 return;
1758 /* Register the wakeup interrupt vector with SAL */
1759 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
1760 SAL_MC_PARAM_MECHANISM_INT,
1761 IA64_MCA_WAKEUP_VECTOR,
1762 0, 0);
1763 rc = isrv.status;
1764 if (rc) {
1765 printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
1766 "(status %ld)\n", rc);
1767 return;
1770 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
1772 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
1773 /*
1774 * XXX - disable SAL checksum by setting size to 0; should be
1775 * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
1776 */
1777 ia64_mc_info.imi_mca_handler_size = 0;
1779 /* Register the os mca handler with SAL */
1780 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
1781 ia64_mc_info.imi_mca_handler,
1782 ia64_tpa(mca_hldlr_ptr->gp),
1783 ia64_mc_info.imi_mca_handler_size,
1784 0, 0, 0)))
1786 printk(KERN_ERR "Failed to register OS MCA handler with SAL "
1787 "(status %ld)\n", rc);
1788 return;
1791 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
1792 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1794 /*
1795 * XXX - disable SAL checksum by setting size to 0, should be
1796 * size of the actual init handler in mca_asm.S.
1797 */
1798 ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp);
1799 ia64_mc_info.imi_monarch_init_handler_size = 0;
1800 ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp);
1801 ia64_mc_info.imi_slave_init_handler_size = 0;
1803 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
1804 ia64_mc_info.imi_monarch_init_handler);
1806 /* Register the os init handler with SAL */
1807 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
1808 ia64_mc_info.imi_monarch_init_handler,
1809 ia64_tpa(ia64_getreg(_IA64_REG_GP)),
1810 ia64_mc_info.imi_monarch_init_handler_size,
1811 ia64_mc_info.imi_slave_init_handler,
1812 ia64_tpa(ia64_getreg(_IA64_REG_GP)),
1813 ia64_mc_info.imi_slave_init_handler_size)))
1815 printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
1816 "(status %ld)\n", rc);
1817 return;
1820 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
1822 /*
1823 * Configure the CMCI/P vector and handler. Interrupts for CMC are
1824 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
1825 */
1826 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
1827 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
1828 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
1830 /* Setup the MCA rendezvous interrupt vector */
1831 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
1833 /* Setup the MCA wakeup interrupt vector */
1834 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
1836 #ifdef CONFIG_ACPI
1837 /* Setup the CPEI/P handler */
1838 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
1839 #endif
1841 /* Initialize the areas set aside by the OS to buffer the
1842 * platform/processor error states for MCA/INIT/CMC
1843 * handling.
1844 */
1845 ia64_log_init(SAL_INFO_TYPE_MCA);
1846 ia64_log_init(SAL_INFO_TYPE_INIT);
1847 ia64_log_init(SAL_INFO_TYPE_CMC);
1848 ia64_log_init(SAL_INFO_TYPE_CPE);
1850 #ifdef XEN
1851 INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_MCA]);
1852 INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_INIT]);
1853 INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CMC]);
1854 INIT_LIST_HEAD(&sal_log_queues[SAL_INFO_TYPE_CPE]);
1856 /* NULL sal_queue used elsewhere to determine MCA init state */
1857 sal_queue = sal_log_queues;
1859 open_softirq(CMC_DISABLE_SOFTIRQ,
1860 (softirq_handler)ia64_mca_cmc_vector_disable);
1861 open_softirq(CMC_ENABLE_SOFTIRQ,
1862 (softirq_handler)ia64_mca_cmc_vector_enable);
1864 for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
1865 ia64_log_queue(i, 0);
1866 #endif
1868 mca_init = 1;
1869 printk(KERN_INFO "MCA related initialization done\n");
1872 /*
1873 * ia64_mca_late_init
1875 * Opportunity to setup things that require initialization later
1876 * than ia64_mca_init. Setup a timer to poll for CPEs if the
1877 * platform doesn't support an interrupt driven mechanism.
1879 * Inputs : None
1880 * Outputs : Status
1881 */
1882 static int __init
1883 ia64_mca_late_init(void)
1885 if (!mca_init)
1886 return 0;
1888 /* Setup the CMCI/P vector and handler */
1889 #ifndef XEN
1890 init_timer(&cmc_poll_timer);
1891 cmc_poll_timer.function = ia64_mca_cmc_poll;
1892 #else
1893 init_timer(&cmc_poll_timer, ia64_mca_cmc_poll,
1894 NULL, smp_processor_id());
1895 #endif
1897 /* Unmask/enable the vector */
1898 cmc_polling_enabled = 0;
1899 #ifndef XEN /* XXX FIXME */
1900 schedule_work(&cmc_enable_work);
1901 #else
1902 cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);
1903 #endif
1905 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
1907 #ifdef CONFIG_ACPI
1908 /* Setup the CPEI/P vector and handler */
1909 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
1910 #ifndef XEN
1911 init_timer(&cpe_poll_timer);
1912 cpe_poll_timer.function = ia64_mca_cpe_poll;
1913 #else
1914 init_timer(&cpe_poll_timer, ia64_mca_cpe_poll,
1915 NULL,smp_processor_id());
1916 #endif
1919 irq_desc_t *desc;
1920 unsigned int irq;
1922 if (cpe_vector >= 0) {
1923 /* If platform supports CPEI, enable the irq. */
1924 cpe_poll_enabled = 0;
1925 for (irq = 0; irq < NR_IRQS; ++irq)
1926 if (irq_to_vector(irq) == cpe_vector) {
1927 desc = irq_descp(irq);
1928 desc->status |= IRQ_PER_CPU;
1929 setup_irq(irq, &mca_cpe_irqaction);
1931 ia64_mca_register_cpev(cpe_vector);
1932 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
1933 } else {
1934 /* If platform doesn't support CPEI, get the timer going. */
1935 if (cpe_poll_enabled) {
1936 ia64_mca_cpe_poll(0UL);
1937 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
1941 #endif
1943 return 0;
1946 #ifndef XEN
1947 device_initcall(ia64_mca_late_init);
1948 #else
1949 __initcall(ia64_mca_late_init);
1950 #endif