ia64/xen-unstable

changeset 10683:97c290c7b015

[IA64] merge in INIT patches
author awilliam@xenbuild.aw
date Sun Jul 09 20:04:23 2006 -0600 (2006-07-09)
parents 50ed5c116b4d efdfbb40db3f
children a1482fd74530
files xen/include/asm-ia64/linux/asm/asmmacro.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Fri Jul 07 10:36:31 2006 -0600
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Sun Jul 09 20:04:23 2006 -0600
     1.3 @@ -8,6 +8,7 @@
     1.4  #include <xen/sched.h>
     1.5  #include <asm/processor.h>
     1.6  #include <asm/ptrace.h>
     1.7 +#include <asm/mca.h>
     1.8  #include <public/xen.h>
     1.9  #include <asm/tlb.h>
    1.10  #include <asm/regs.h>
    1.11 @@ -33,6 +34,9 @@ void foo(void)
    1.12  	DEFINE(SHARED_INFO_SIZE, sizeof (struct shared_info));
    1.13  
    1.14  	BLANK();
    1.15 +	DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu, init_stack));
    1.16 +
    1.17 +	BLANK();
    1.18  #ifdef   VTI_DEBUG
    1.19  	DEFINE(IVT_CUR_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_current));
    1.20  	DEFINE(IVT_DBG_OFS, offsetof(struct vcpu, arch.arch_vmx.ivt_debug));
     2.1 --- a/xen/arch/ia64/linux-xen/Makefile	Fri Jul 07 10:36:31 2006 -0600
     2.2 +++ b/xen/arch/ia64/linux-xen/Makefile	Sun Jul 09 20:04:23 2006 -0600
     2.3 @@ -1,6 +1,8 @@
     2.4  obj-y += efi.o
     2.5  obj-y += entry.o
     2.6  obj-y += irq_ia64.o
     2.7 +obj-y += mca.o
     2.8 +obj-y += mca_asm.o
     2.9  obj-y += mm_contig.o
    2.10  obj-y += pal.o
    2.11  obj-y += process-linux-xen.o
     3.1 --- a/xen/arch/ia64/linux-xen/README.origin	Fri Jul 07 10:36:31 2006 -0600
     3.2 +++ b/xen/arch/ia64/linux-xen/README.origin	Sun Jul 09 20:04:23 2006 -0600
     3.3 @@ -11,6 +11,8 @@ entry.S			-> linux/arch/ia64/kernel/entr
     3.4  head.S			-> linux/arch/ia64/kernel/head.S
     3.5  hpsim_ssc.h		-> linux/arch/ia64/hp/sim/hpsim_ssc.h
     3.6  irq_ia64.c		-> linux/arch/ia64/kernel/irq_ia64.c
     3.7 +mca.c			-> linux/arch/ia64/kernel/mca.c
     3.8 +mca_asm.S		-> linux/arch/ia64/kernel/mca_asm.S
     3.9  minstate.h		-> linux/arch/ia64/kernel/minstate.h
    3.10  mm_contig.c		-> linux/arch/ia64/mm/contig.c
    3.11  pal.S			-> linux/arch/ia64/kernel/pal.S
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/xen/arch/ia64/linux-xen/mca.c	Sun Jul 09 20:04:23 2006 -0600
     4.3 @@ -0,0 +1,1600 @@
     4.4 +/*
     4.5 + * File:	mca.c
     4.6 + * Purpose:	Generic MCA handling layer
     4.7 + *
     4.8 + * Updated for latest kernel
     4.9 + * Copyright (C) 2003 Hewlett-Packard Co
    4.10 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    4.11 + *
    4.12 + * Copyright (C) 2002 Dell Inc.
    4.13 + * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
    4.14 + *
    4.15 + * Copyright (C) 2002 Intel
    4.16 + * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
    4.17 + *
    4.18 + * Copyright (C) 2001 Intel
    4.19 + * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
    4.20 + *
    4.21 + * Copyright (C) 2000 Intel
    4.22 + * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
    4.23 + *
    4.24 + * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
    4.25 + * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
    4.26 + *
    4.27 + * 03/04/15 D. Mosberger Added INIT backtrace support.
    4.28 + * 02/03/25 M. Domsch	GUID cleanups
    4.29 + *
    4.30 + * 02/01/04 J. Hall	Aligned MCA stack to 16 bytes, added platform vs. CPU
    4.31 + *			error flag, set SAL default return values, changed
    4.32 + *			error record structure to linked list, added init call
    4.33 + *			to sal_get_state_info_size().
    4.34 + *
    4.35 + * 01/01/03 F. Lewis    Added setup of CMCI and CPEI IRQs, logging of corrected
    4.36 + *                      platform errors, completed code for logging of
    4.37 + *                      corrected & uncorrected machine check errors, and
    4.38 + *                      updated for conformance with Nov. 2000 revision of the
    4.39 + *                      SAL 3.0 spec.
    4.40 + * 00/03/29 C. Fleckenstein  Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
    4.41 + *                           added min save state dump, added INIT handler.
    4.42 + *
    4.43 + * 2003-12-08 Keith Owens <kaos@sgi.com>
    4.44 + *            smp_call_function() must not be called from interrupt context (can
    4.45 + *            deadlock on tasklist_lock).  Use keventd to call smp_call_function().
    4.46 + *
    4.47 + * 2004-02-01 Keith Owens <kaos@sgi.com>
    4.48 + *            Avoid deadlock when using printk() for MCA and INIT records.
    4.49 + *            Delete all record printing code, moved to salinfo_decode in user space.
    4.50 + *            Mark variables and functions static where possible.
    4.51 + *            Delete dead variables and functions.
    4.52 + *            Reorder to remove the need for forward declarations and to consolidate
    4.53 + *            related code.
    4.54 + */
    4.55 +#include <linux/config.h>
    4.56 +#include <linux/types.h>
    4.57 +#include <linux/init.h>
    4.58 +#include <linux/sched.h>
    4.59 +#include <linux/interrupt.h>
    4.60 +#include <linux/irq.h>
    4.61 +#include <linux/kallsyms.h>
    4.62 +#include <linux/smp_lock.h>
    4.63 +#include <linux/bootmem.h>
    4.64 +#include <linux/acpi.h>
    4.65 +#include <linux/timer.h>
    4.66 +#include <linux/module.h>
    4.67 +#include <linux/kernel.h>
    4.68 +#include <linux/smp.h>
    4.69 +#include <linux/workqueue.h>
    4.70 +
    4.71 +#include <asm/delay.h>
    4.72 +#include <asm/machvec.h>
    4.73 +#include <asm/meminit.h>
    4.74 +#include <asm/page.h>
    4.75 +#include <asm/ptrace.h>
    4.76 +#include <asm/system.h>
    4.77 +#include <asm/sal.h>
    4.78 +#include <asm/mca.h>
    4.79 +
    4.80 +#include <asm/irq.h>
    4.81 +#include <asm/hw_irq.h>
    4.82 +
    4.83 +#ifdef XEN
    4.84 +#include <xen/symbols.h>
    4.85 +#endif
    4.86 +
    4.87 +#if defined(IA64_MCA_DEBUG_INFO)
    4.88 +# define IA64_MCA_DEBUG(fmt...)	printk(fmt)
    4.89 +#else
    4.90 +# define IA64_MCA_DEBUG(fmt...)
    4.91 +#endif
    4.92 +
    4.93 +/* Used by mca_asm.S */
    4.94 +#ifndef XEN
    4.95 +ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state;
    4.96 +#else
    4.97 +ia64_mca_sal_to_os_state_t	ia64_sal_to_os_handoff_state[NR_CPUS];
    4.98 +DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr); 
    4.99 +#endif
   4.100 +ia64_mca_os_to_sal_state_t	ia64_os_to_sal_handoff_state;
   4.101 +u64				ia64_mca_serialize;
   4.102 +DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
   4.103 +DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
   4.104 +DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */
   4.105 +DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
   4.106 +
   4.107 +unsigned long __per_cpu_mca[NR_CPUS];
   4.108 +
   4.109 +/* In mca_asm.S */
   4.110 +extern void			ia64_monarch_init_handler (void);
   4.111 +extern void			ia64_slave_init_handler (void);
   4.112 +
   4.113 +static ia64_mc_info_t		ia64_mc_info;
   4.114 +
   4.115 +#ifndef XEN
   4.116 +#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
   4.117 +#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
   4.118 +#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
   4.119 +#define CPE_HISTORY_LENGTH    5
   4.120 +#define CMC_HISTORY_LENGTH    5
   4.121 +
   4.122 +static struct timer_list cpe_poll_timer;
   4.123 +static struct timer_list cmc_poll_timer;
   4.124 +/*
   4.125 + * This variable tells whether we are currently in polling mode.
   4.126 + * Start with this in the wrong state so we won't play w/ timers
   4.127 + * before the system is ready.
   4.128 + */
   4.129 +static int cmc_polling_enabled = 1;
   4.130 +
   4.131 +/*
   4.132 + * Clearing this variable prevents CPE polling from getting activated
   4.133 + * in mca_late_init.  Use it if your system doesn't provide a CPEI,
   4.134 + * but encounters problems retrieving CPE logs.  This should only be
   4.135 + * necessary for debugging.
   4.136 + */
   4.137 +static int cpe_poll_enabled = 1;
   4.138 +
   4.139 +extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
   4.140 +#endif /* !XEN */
   4.141 +
   4.142 +static int mca_init;
   4.143 +
   4.144 +#ifndef XEN
   4.145 +/*
   4.146 + * IA64_MCA log support
   4.147 + */
   4.148 +#define IA64_MAX_LOGS		2	/* Double-buffering for nested MCAs */
   4.149 +#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */
   4.150 +
   4.151 +typedef struct ia64_state_log_s
   4.152 +{
   4.153 +	spinlock_t	isl_lock;
   4.154 +	int		isl_index;
   4.155 +	unsigned long	isl_count;
   4.156 +	ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
   4.157 +} ia64_state_log_t;
   4.158 +
   4.159 +static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
   4.160 +
   4.161 +#define IA64_LOG_ALLOCATE(it, size) \
   4.162 +	{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
   4.163 +		(ia64_err_rec_t *)alloc_bootmem(size); \
   4.164 +	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
   4.165 +		(ia64_err_rec_t *)alloc_bootmem(size);}
   4.166 +#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
   4.167 +#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
   4.168 +#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
   4.169 +#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
   4.170 +#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
   4.171 +#define IA64_LOG_INDEX_INC(it) \
   4.172 +    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
   4.173 +    ia64_state_log[it].isl_count++;}
   4.174 +#define IA64_LOG_INDEX_DEC(it) \
   4.175 +    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
   4.176 +#define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
   4.177 +#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
   4.178 +#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
   4.179 +
   4.180 +/*
   4.181 + * ia64_log_init
   4.182 + *	Reset the OS ia64 log buffer
   4.183 + * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
   4.184 + * Outputs	:	None
   4.185 + */
   4.186 +static void
   4.187 +ia64_log_init(int sal_info_type)
   4.188 +{
   4.189 +	u64	max_size = 0;
   4.190 +
   4.191 +	IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
   4.192 +	IA64_LOG_LOCK_INIT(sal_info_type);
   4.193 +
   4.194 +	// SAL will tell us the maximum size of any error record of this type
   4.195 +	max_size = ia64_sal_get_state_info_size(sal_info_type);
   4.196 +	if (!max_size)
   4.197 +		/* alloc_bootmem() doesn't like zero-sized allocations! */
   4.198 +		return;
   4.199 +
   4.200 +	// set up OS data structures to hold error info
   4.201 +	IA64_LOG_ALLOCATE(sal_info_type, max_size);
   4.202 +	memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
   4.203 +	memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
   4.204 +}
   4.205 +
   4.206 +/*
   4.207 + * ia64_log_get
   4.208 + *
   4.209 + *	Get the current MCA log from SAL and copy it into the OS log buffer.
   4.210 + *
   4.211 + *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
   4.212 + *              irq_safe    whether you can use printk at this point
   4.213 + *  Outputs :   size        (total record length)
   4.214 + *              *buffer     (ptr to error record)
   4.215 + *
   4.216 + */
   4.217 +static u64
   4.218 +ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
   4.219 +{
   4.220 +	sal_log_record_header_t     *log_buffer;
   4.221 +	u64                         total_len = 0;
   4.222 +	int                         s;
   4.223 +
   4.224 +	IA64_LOG_LOCK(sal_info_type);
   4.225 +
   4.226 +	/* Get the process state information */
   4.227 +	log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
   4.228 +
   4.229 +	total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
   4.230 +
   4.231 +	if (total_len) {
   4.232 +		IA64_LOG_INDEX_INC(sal_info_type);
   4.233 +		IA64_LOG_UNLOCK(sal_info_type);
   4.234 +		if (irq_safe) {
   4.235 +			IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
   4.236 +				       "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
   4.237 +		}
   4.238 +		*buffer = (u8 *) log_buffer;
   4.239 +		return total_len;
   4.240 +	} else {
   4.241 +		IA64_LOG_UNLOCK(sal_info_type);
   4.242 +		return 0;
   4.243 +	}
   4.244 +}
   4.245 +
   4.246 +/*
   4.247 + *  ia64_mca_log_sal_error_record
   4.248 + *
   4.249 + *  This function retrieves a specified error record type from SAL
   4.250 + *  and wakes up any processes waiting for error records.
   4.251 + *
   4.252 + *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE/INIT)
   4.253 + */
   4.254 +static void
   4.255 +ia64_mca_log_sal_error_record(int sal_info_type)
   4.256 +{
   4.257 +	u8 *buffer;
   4.258 +	sal_log_record_header_t *rh;
   4.259 +	u64 size;
   4.260 +	int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
   4.261 +#ifdef IA64_MCA_DEBUG_INFO
   4.262 +	static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
   4.263 +#endif
   4.264 +
   4.265 +	size = ia64_log_get(sal_info_type, &buffer, irq_safe);
   4.266 +	if (!size)
   4.267 +		return;
   4.268 +
   4.269 +	salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
   4.270 +
   4.271 +	if (irq_safe)
   4.272 +		IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
   4.273 +			smp_processor_id(),
   4.274 +			sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
   4.275 +
   4.276 +	/* Clear logs from corrected errors in case there's no user-level logger */
   4.277 +	rh = (sal_log_record_header_t *)buffer;
   4.278 +	if (rh->severity == sal_log_severity_corrected)
   4.279 +		ia64_sal_clear_state_info(sal_info_type);
   4.280 +}
   4.281 +
   4.282 +/*
   4.283 + * platform dependent error handling
   4.284 + */
   4.285 +#endif /* !XEN */
   4.286 +#ifndef PLATFORM_MCA_HANDLERS
   4.287 +#ifndef XEN
   4.288 +
   4.289 +#ifdef CONFIG_ACPI
   4.290 +
   4.291 +int cpe_vector = -1;
   4.292 +
   4.293 +static irqreturn_t
   4.294 +ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
   4.295 +{
   4.296 +	static unsigned long	cpe_history[CPE_HISTORY_LENGTH];
   4.297 +	static int		index;
   4.298 +	static DEFINE_SPINLOCK(cpe_history_lock);
   4.299 +
   4.300 +	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
   4.301 +		       __FUNCTION__, cpe_irq, smp_processor_id());
   4.302 +
   4.303 +	/* SAL spec states this should run w/ interrupts enabled */
   4.304 +	local_irq_enable();
   4.305 +
   4.306 +	/* Get the CPE error record and log it */
   4.307 +	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
   4.308 +
   4.309 +	spin_lock(&cpe_history_lock);
   4.310 +	if (!cpe_poll_enabled && cpe_vector >= 0) {
   4.311 +
   4.312 +		int i, count = 1; /* we know 1 happened now */
   4.313 +		unsigned long now = jiffies;
   4.314 +
   4.315 +		for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
   4.316 +			if (now - cpe_history[i] <= HZ)
   4.317 +				count++;
   4.318 +		}
   4.319 +
   4.320 +		IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
   4.321 +		if (count >= CPE_HISTORY_LENGTH) {
   4.322 +
   4.323 +			cpe_poll_enabled = 1;
   4.324 +			spin_unlock(&cpe_history_lock);
   4.325 +			disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
   4.326 +
   4.327 +			/*
   4.328 +			 * Corrected errors will still be corrected, but
   4.329 +			 * make sure there's a log somewhere that indicates
   4.330 +			 * something is generating more than we can handle.
   4.331 +			 */
   4.332 +			printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
   4.333 +
   4.334 +			mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
   4.335 +
   4.336 +			/* lock already released, get out now */
   4.337 +			return IRQ_HANDLED;
   4.338 +		} else {
   4.339 +			cpe_history[index++] = now;
   4.340 +			if (index == CPE_HISTORY_LENGTH)
   4.341 +				index = 0;
   4.342 +		}
   4.343 +	}
   4.344 +	spin_unlock(&cpe_history_lock);
   4.345 +	return IRQ_HANDLED;
   4.346 +}
   4.347 +
   4.348 +#endif /* CONFIG_ACPI */
   4.349 +#endif /* !XEN */
   4.350 +
   4.351 +static void
   4.352 +show_min_state (pal_min_state_area_t *minstate)
   4.353 +{
   4.354 +	u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
   4.355 +	u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
   4.356 +
   4.357 +	printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
   4.358 +	printk("pr\t\t%016lx\n", minstate->pmsa_pr);
   4.359 +	printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
   4.360 +	printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
   4.361 +	printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
   4.362 +	printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
   4.363 +	printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
   4.364 +	printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
   4.365 +	printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
   4.366 +	printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
   4.367 +	printk("b1\t\t%016lx ", minstate->pmsa_br1);
   4.368 +	print_symbol("%s\n", minstate->pmsa_br1);
   4.369 +
   4.370 +	printk("\nstatic registers r0-r15:\n");
   4.371 +	printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
   4.372 +	       0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
   4.373 +	printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
   4.374 +	       minstate->pmsa_gr[3], minstate->pmsa_gr[4],
   4.375 +	       minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
   4.376 +	printk(" r8-11 %016lx %016lx %016lx %016lx\n",
   4.377 +	       minstate->pmsa_gr[7], minstate->pmsa_gr[8],
   4.378 +	       minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
   4.379 +	printk("r12-15 %016lx %016lx %016lx %016lx\n",
   4.380 +	       minstate->pmsa_gr[11], minstate->pmsa_gr[12],
   4.381 +	       minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
   4.382 +
   4.383 +	printk("\nbank 0:\n");
   4.384 +	printk("r16-19 %016lx %016lx %016lx %016lx\n",
   4.385 +	       minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
   4.386 +	       minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
   4.387 +	printk("r20-23 %016lx %016lx %016lx %016lx\n",
   4.388 +	       minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
   4.389 +	       minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
   4.390 +	printk("r24-27 %016lx %016lx %016lx %016lx\n",
   4.391 +	       minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
   4.392 +	       minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
   4.393 +	printk("r28-31 %016lx %016lx %016lx %016lx\n",
   4.394 +	       minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
   4.395 +	       minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
   4.396 +
   4.397 +	printk("\nbank 1:\n");
   4.398 +	printk("r16-19 %016lx %016lx %016lx %016lx\n",
   4.399 +	       minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
   4.400 +	       minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
   4.401 +	printk("r20-23 %016lx %016lx %016lx %016lx\n",
   4.402 +	       minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
   4.403 +	       minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
   4.404 +	printk("r24-27 %016lx %016lx %016lx %016lx\n",
   4.405 +	       minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
   4.406 +	       minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
   4.407 +	printk("r28-31 %016lx %016lx %016lx %016lx\n",
   4.408 +	       minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
   4.409 +	       minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
   4.410 +}
   4.411 +
   4.412 +static void
   4.413 +fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
   4.414 +{
   4.415 +	u64 *dst_banked, *src_banked, bit, shift, nat_bits;
   4.416 +	int i;
   4.417 +
   4.418 +	/*
   4.419 +	 * First, update the pt-regs and switch-stack structures with the contents stored
   4.420 +	 * in the min-state area:
   4.421 +	 */
   4.422 +	if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
   4.423 +		pt->cr_ipsr = ms->pmsa_xpsr;
   4.424 +		pt->cr_iip = ms->pmsa_xip;
   4.425 +		pt->cr_ifs = ms->pmsa_xfs;
   4.426 +	} else {
   4.427 +		pt->cr_ipsr = ms->pmsa_ipsr;
   4.428 +		pt->cr_iip = ms->pmsa_iip;
   4.429 +		pt->cr_ifs = ms->pmsa_ifs;
   4.430 +	}
   4.431 +	pt->ar_rsc = ms->pmsa_rsc;
   4.432 +	pt->pr = ms->pmsa_pr;
   4.433 +	pt->r1 = ms->pmsa_gr[0];
   4.434 +	pt->r2 = ms->pmsa_gr[1];
   4.435 +	pt->r3 = ms->pmsa_gr[2];
   4.436 +	sw->r4 = ms->pmsa_gr[3];
   4.437 +	sw->r5 = ms->pmsa_gr[4];
   4.438 +	sw->r6 = ms->pmsa_gr[5];
   4.439 +	sw->r7 = ms->pmsa_gr[6];
   4.440 +	pt->r8 = ms->pmsa_gr[7];
   4.441 +	pt->r9 = ms->pmsa_gr[8];
   4.442 +	pt->r10 = ms->pmsa_gr[9];
   4.443 +	pt->r11 = ms->pmsa_gr[10];
   4.444 +	pt->r12 = ms->pmsa_gr[11];
   4.445 +	pt->r13 = ms->pmsa_gr[12];
   4.446 +	pt->r14 = ms->pmsa_gr[13];
   4.447 +	pt->r15 = ms->pmsa_gr[14];
   4.448 +	dst_banked = &pt->r16;		/* r16-r31 are contiguous in struct pt_regs */
   4.449 +	src_banked = ms->pmsa_bank1_gr;
   4.450 +	for (i = 0; i < 16; ++i)
   4.451 +		dst_banked[i] = src_banked[i];
   4.452 +	pt->b0 = ms->pmsa_br0;
   4.453 +	sw->b1 = ms->pmsa_br1;
   4.454 +
   4.455 +	/* construct the NaT bits for the pt-regs structure: */
   4.456 +#	define PUT_NAT_BIT(dst, addr)					\
   4.457 +	do {								\
   4.458 +		bit = nat_bits & 1; nat_bits >>= 1;			\
   4.459 +		shift = ((unsigned long) addr >> 3) & 0x3f;		\
   4.460 +		dst = ((dst) & ~(1UL << shift)) | (bit << shift);	\
   4.461 +	} while (0)
   4.462 +
   4.463 +	/* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
   4.464 +	shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
   4.465 +	nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
   4.466 +
   4.467 +	PUT_NAT_BIT(sw->caller_unat, &pt->r1);
   4.468 +	PUT_NAT_BIT(sw->caller_unat, &pt->r2);
   4.469 +	PUT_NAT_BIT(sw->caller_unat, &pt->r3);
   4.470 +	PUT_NAT_BIT(sw->ar_unat, &sw->r4);
   4.471 +	PUT_NAT_BIT(sw->ar_unat, &sw->r5);
   4.472 +	PUT_NAT_BIT(sw->ar_unat, &sw->r6);
   4.473 +	PUT_NAT_BIT(sw->ar_unat, &sw->r7);
   4.474 +	PUT_NAT_BIT(sw->caller_unat, &pt->r8);	PUT_NAT_BIT(sw->caller_unat, &pt->r9);
   4.475 +	PUT_NAT_BIT(sw->caller_unat, &pt->r10);	PUT_NAT_BIT(sw->caller_unat, &pt->r11);
   4.476 +	PUT_NAT_BIT(sw->caller_unat, &pt->r12);	PUT_NAT_BIT(sw->caller_unat, &pt->r13);
   4.477 +	PUT_NAT_BIT(sw->caller_unat, &pt->r14);	PUT_NAT_BIT(sw->caller_unat, &pt->r15);
   4.478 +	nat_bits >>= 16;	/* skip over bank0 NaT bits */
   4.479 +	PUT_NAT_BIT(sw->caller_unat, &pt->r16);	PUT_NAT_BIT(sw->caller_unat, &pt->r17);
   4.480 +	PUT_NAT_BIT(sw->caller_unat, &pt->r18);	PUT_NAT_BIT(sw->caller_unat, &pt->r19);
   4.481 +	PUT_NAT_BIT(sw->caller_unat, &pt->r20);	PUT_NAT_BIT(sw->caller_unat, &pt->r21);
   4.482 +	PUT_NAT_BIT(sw->caller_unat, &pt->r22);	PUT_NAT_BIT(sw->caller_unat, &pt->r23);
   4.483 +	PUT_NAT_BIT(sw->caller_unat, &pt->r24);	PUT_NAT_BIT(sw->caller_unat, &pt->r25);
   4.484 +	PUT_NAT_BIT(sw->caller_unat, &pt->r26);	PUT_NAT_BIT(sw->caller_unat, &pt->r27);
   4.485 +	PUT_NAT_BIT(sw->caller_unat, &pt->r28);	PUT_NAT_BIT(sw->caller_unat, &pt->r29);
   4.486 +	PUT_NAT_BIT(sw->caller_unat, &pt->r30);	PUT_NAT_BIT(sw->caller_unat, &pt->r31);
   4.487 +}
   4.488 +
   4.489 +#ifdef XEN
   4.490 +static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
   4.491 +static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
   4.492 +
   4.493 +static void
   4.494 +save_ksp (struct unw_frame_info *info, void *arg)
   4.495 +{
   4.496 +	current->arch._thread.ksp = (__u64)(info->sw) - 16;
   4.497 +	wmb();
   4.498 +}
   4.499 +
   4.500 +/* FIXME */
   4.501 +int try_crashdump(struct pt_regs *a) { return 0; }
   4.502 +
   4.503 +#define CPU_FLUSH_RETRY_MAX 5
   4.504 +static void
   4.505 +init_cache_flush (void)
   4.506 +{
   4.507 +	unsigned long flags;
   4.508 +	int i;
   4.509 +	s64 rval = 0;
   4.510 +	u64 vector, progress = 0;
   4.511 +
   4.512 +	for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
   4.513 +		local_irq_save(flags);
   4.514 +		rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
   4.515 +		                            0, &progress, &vector);
   4.516 +		local_irq_restore(flags);
   4.517 +		if (rval == 0){
   4.518 +			printk("\nPAL cache flush success\n");
   4.519 +			return;
   4.520 +		}
   4.521 +	}
   4.522 +	printk("\nPAL cache flush failed. status=%ld\n",rval);
   4.523 +}
   4.524 +#endif /* XEN */
   4.525 +
   4.526 +static void
   4.527 +init_handler_platform (pal_min_state_area_t *ms,
   4.528 +		       struct pt_regs *pt, struct switch_stack *sw)
   4.529 +{
   4.530 +	struct unw_frame_info info;
   4.531 +
   4.532 +	/* if a kernel debugger is available call it here else just dump the registers */
   4.533 +
   4.534 +	/*
   4.535 +	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
   4.536 +	 * generated via the BMC's command-line interface, but since the console is on the
   4.537 +	 * same serial line, the user will need some time to switch out of the BMC before
   4.538 +	 * the dump begins.
   4.539 +	 */
   4.540 +	printk("Delaying for 5 seconds...\n");
   4.541 +	udelay(5*1000000);
   4.542 +#ifdef XEN
   4.543 +	fetch_min_state(ms, pt, sw);
   4.544 +	spin_lock(&show_stack_lock);
   4.545 +#endif
   4.546 +	show_min_state(ms);
   4.547 +
   4.548 +#ifdef XEN
   4.549 +	printk("Backtrace of current vcpu (vcpu_id %d)\n", current->vcpu_id);
   4.550 +#else
   4.551 +	printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
   4.552 +	fetch_min_state(ms, pt, sw);
   4.553 +#endif
   4.554 +	unw_init_from_interruption(&info, current, pt, sw);
   4.555 +	ia64_do_show_stack(&info, NULL);
   4.556 +#ifdef XEN
   4.557 +	unw_init_running(save_ksp, NULL);
   4.558 +	spin_unlock(&show_stack_lock);
   4.559 +	wmb();
   4.560 +	init_cache_flush();
   4.561 +
   4.562 +	if (spin_trylock(&init_dump_lock)) {
   4.563 +#ifdef CONFIG_SMP
   4.564 +		udelay(5*1000000);
   4.565 +#endif
   4.566 +		if (try_crashdump(pt) == 0)
   4.567 +			printk("\nINIT dump complete.  Please reboot now.\n");
   4.568 +	}
   4.569 +	printk("%s: CPU%d init handler done\n",
   4.570 +	       __FUNCTION__, smp_processor_id());
   4.571 +#else /* XEN */
   4.572 +#ifdef CONFIG_SMP
   4.573 +	/* read_trylock() would be handy... */
   4.574 +	if (!tasklist_lock.write_lock)
   4.575 +		read_lock(&tasklist_lock);
   4.576 +#endif
   4.577 +	{
   4.578 +		struct task_struct *g, *t;
   4.579 +		do_each_thread (g, t) {
   4.580 +			if (t == current)
   4.581 +				continue;
   4.582 +
   4.583 +			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
   4.584 +			show_stack(t, NULL);
   4.585 +		} while_each_thread (g, t);
   4.586 +	}
   4.587 +#ifdef CONFIG_SMP
   4.588 +	if (!tasklist_lock.write_lock)
   4.589 +		read_unlock(&tasklist_lock);
   4.590 +#endif
   4.591 +
   4.592 +	printk("\nINIT dump complete.  Please reboot now.\n");
   4.593 +#endif /* XEN */
   4.594 +	while (1);			/* hang city if no debugger */
   4.595 +}
   4.596 +
   4.597 +#ifndef XEN
   4.598 +#ifdef CONFIG_ACPI
   4.599 +/*
   4.600 + * ia64_mca_register_cpev
   4.601 + *
   4.602 + *  Register the corrected platform error vector with SAL.
   4.603 + *
   4.604 + *  Inputs
   4.605 + *      cpev        Corrected Platform Error Vector number
   4.606 + *
   4.607 + *  Outputs
   4.608 + *      None
   4.609 + */
   4.610 +static void
   4.611 +ia64_mca_register_cpev (int cpev)
   4.612 +{
   4.613 +	/* Register the CPE interrupt vector with SAL */
   4.614 +	struct ia64_sal_retval isrv;
   4.615 +
   4.616 +	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
   4.617 +	if (isrv.status) {
   4.618 +		printk(KERN_ERR "Failed to register Corrected Platform "
   4.619 +		       "Error interrupt vector with SAL (status %ld)\n", isrv.status);
   4.620 +		return;
   4.621 +	}
   4.622 +
   4.623 +	IA64_MCA_DEBUG("%s: corrected platform error "
   4.624 +		       "vector %#x registered\n", __FUNCTION__, cpev);
   4.625 +}
   4.626 +#endif /* CONFIG_ACPI */
   4.627 +
   4.628 +#endif /* !XEN */
   4.629 +#endif /* PLATFORM_MCA_HANDLERS */
   4.630 +#ifndef XEN
   4.631 +
   4.632 +/*
   4.633 + * ia64_mca_cmc_vector_setup
   4.634 + *
   4.635 + *  Setup the corrected machine check vector register in the processor.
   4.636 + *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
   4.637 + *  This function is invoked on a per-processor basis.
   4.638 + *
   4.639 + * Inputs
   4.640 + *      None
   4.641 + *
   4.642 + * Outputs
   4.643 + *	None
   4.644 + */
   4.645 +void
   4.646 +ia64_mca_cmc_vector_setup (void)
   4.647 +{
   4.648 +	cmcv_reg_t	cmcv;
   4.649 +
   4.650 +	cmcv.cmcv_regval	= 0;
   4.651 +	cmcv.cmcv_mask		= 1;        /* Mask/disable interrupt at first */
   4.652 +	cmcv.cmcv_vector	= IA64_CMC_VECTOR;
   4.653 +	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
   4.654 +
   4.655 +	IA64_MCA_DEBUG("%s: CPU %d corrected "
   4.656 +		       "machine check vector %#x registered.\n",
   4.657 +		       __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
   4.658 +
   4.659 +	IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
   4.660 +		       __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
   4.661 +}
   4.662 +
   4.663 +/*
   4.664 + * ia64_mca_cmc_vector_disable
   4.665 + *
   4.666 + *  Mask the corrected machine check vector register in the processor.
   4.667 + *  This function is invoked on a per-processor basis.
   4.668 + *
   4.669 + * Inputs
   4.670 + *      dummy(unused)
   4.671 + *
   4.672 + * Outputs
   4.673 + *	None
   4.674 + */
   4.675 +static void
   4.676 +ia64_mca_cmc_vector_disable (void *dummy)
   4.677 +{
   4.678 +	cmcv_reg_t	cmcv;
   4.679 +
   4.680 +	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
   4.681 +
   4.682 +	cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
   4.683 +	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
   4.684 +
   4.685 +	IA64_MCA_DEBUG("%s: CPU %d corrected "
   4.686 +		       "machine check vector %#x disabled.\n",
   4.687 +		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
   4.688 +}
   4.689 +
   4.690 +/*
   4.691 + * ia64_mca_cmc_vector_enable
   4.692 + *
   4.693 + *  Unmask the corrected machine check vector register in the processor.
   4.694 + *  This function is invoked on a per-processor basis.
   4.695 + *
   4.696 + * Inputs
   4.697 + *      dummy(unused)
   4.698 + *
   4.699 + * Outputs
   4.700 + *	None
   4.701 + */
   4.702 +static void
   4.703 +ia64_mca_cmc_vector_enable (void *dummy)
   4.704 +{
   4.705 +	cmcv_reg_t	cmcv;
   4.706 +
   4.707 +	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
   4.708 +
   4.709 +	cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
   4.710 +	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
   4.711 +
   4.712 +	IA64_MCA_DEBUG("%s: CPU %d corrected "
   4.713 +		       "machine check vector %#x enabled.\n",
   4.714 +		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
   4.715 +}
   4.716 +
   4.717 +/*
   4.718 + * ia64_mca_cmc_vector_disable_keventd
   4.719 + *
   4.720 + * Called via keventd (smp_call_function() is not safe in interrupt context) to
   4.721 + * disable the cmc interrupt vector.
   4.722 + */
   4.723 +static void
   4.724 +ia64_mca_cmc_vector_disable_keventd(void *unused)
   4.725 +{
   4.726 +	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
   4.727 +}
   4.728 +
   4.729 +/*
   4.730 + * ia64_mca_cmc_vector_enable_keventd
   4.731 + *
   4.732 + * Called via keventd (smp_call_function() is not safe in interrupt context) to
   4.733 + * enable the cmc interrupt vector.
   4.734 + */
   4.735 +static void
   4.736 +ia64_mca_cmc_vector_enable_keventd(void *unused)
   4.737 +{
   4.738 +	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
   4.739 +}
   4.740 +
   4.741 +/*
   4.742 + * ia64_mca_wakeup_ipi_wait
   4.743 + *
   4.744 + *	Wait for the inter-cpu interrupt to be sent by the
   4.745 + *	monarch processor once it is done with handling the
   4.746 + *	MCA.
   4.747 + *
   4.748 + *  Inputs  :   None
   4.749 + *  Outputs :   None
   4.750 + */
   4.751 +static void
   4.752 +ia64_mca_wakeup_ipi_wait(void)
   4.753 +{
   4.754 +	int	irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
   4.755 +	int	irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
   4.756 +	u64	irr = 0;
   4.757 +
   4.758 +	do {
   4.759 +		switch(irr_num) {
   4.760 +		      case 0:
   4.761 +			irr = ia64_getreg(_IA64_REG_CR_IRR0);
   4.762 +			break;
   4.763 +		      case 1:
   4.764 +			irr = ia64_getreg(_IA64_REG_CR_IRR1);
   4.765 +			break;
   4.766 +		      case 2:
   4.767 +			irr = ia64_getreg(_IA64_REG_CR_IRR2);
   4.768 +			break;
   4.769 +		      case 3:
   4.770 +			irr = ia64_getreg(_IA64_REG_CR_IRR3);
   4.771 +			break;
   4.772 +		}
   4.773 +		cpu_relax();
   4.774 +	} while (!(irr & (1UL << irr_bit))) ;
   4.775 +}
   4.776 +
   4.777 +/*
   4.778 + * ia64_mca_wakeup
   4.779 + *
   4.780 + *	Send an inter-cpu interrupt to wake-up a particular cpu
   4.781 + *	and mark that cpu to be out of rendez.
   4.782 + *
   4.783 + *  Inputs  :   cpuid
   4.784 + *  Outputs :   None
   4.785 + */
   4.786 +static void
   4.787 +ia64_mca_wakeup(int cpu)
   4.788 +{
   4.789 +	platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
   4.790 +	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
   4.791 +
   4.792 +}
   4.793 +
   4.794 +/*
   4.795 + * ia64_mca_wakeup_all
   4.796 + *
   4.797 + *	Wakeup all the cpus which have rendez'ed previously.
   4.798 + *
   4.799 + *  Inputs  :   None
   4.800 + *  Outputs :   None
   4.801 + */
   4.802 +static void
   4.803 +ia64_mca_wakeup_all(void)
   4.804 +{
   4.805 +	int cpu;
   4.806 +
   4.807 +	/* Clear the Rendez checkin flag for all cpus */
   4.808 +	for(cpu = 0; cpu < NR_CPUS; cpu++) {
   4.809 +		if (!cpu_online(cpu))
   4.810 +			continue;
   4.811 +		if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
   4.812 +			ia64_mca_wakeup(cpu);
   4.813 +	}
   4.814 +
   4.815 +}
   4.816 +
   4.817 +/*
   4.818 + * ia64_mca_rendez_interrupt_handler
   4.819 + *
   4.820 + *	This is handler used to put slave processors into spinloop
   4.821 + *	while the monarch processor does the mca handling and later
   4.822 + *	wake each slave up once the monarch is done.
   4.823 + *
   4.824 + *  Inputs  :   None
   4.825 + *  Outputs :   None
   4.826 + */
   4.827 +static irqreturn_t
   4.828 +ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
   4.829 +{
   4.830 +	unsigned long flags;
   4.831 +	int cpu = smp_processor_id();
   4.832 +
   4.833 +	/* Mask all interrupts */
   4.834 +	local_irq_save(flags);
   4.835 +
   4.836 +	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
   4.837 +	/* Register with the SAL monarch that the slave has
   4.838 +	 * reached SAL
   4.839 +	 */
   4.840 +	ia64_sal_mc_rendez();
   4.841 +
   4.842 +	/* Wait for the wakeup IPI from the monarch
   4.843 +	 * This waiting is done by polling on the wakeup-interrupt
   4.844 +	 * vector bit in the processor's IRRs
   4.845 +	 */
   4.846 +	ia64_mca_wakeup_ipi_wait();
   4.847 +
   4.848 +	/* Enable all interrupts */
   4.849 +	local_irq_restore(flags);
   4.850 +	return IRQ_HANDLED;
   4.851 +}
   4.852 +
   4.853 +/*
   4.854 + * ia64_mca_wakeup_int_handler
   4.855 + *
   4.856 + *	The interrupt handler for processing the inter-cpu interrupt to the
   4.857 + *	slave cpu which was spinning in the rendez loop.
   4.858 + *	Since this spinning is done by turning off the interrupts and
   4.859 + *	polling on the wakeup-interrupt bit in the IRR, there is
   4.860 + *	nothing useful to be done in the handler.
   4.861 + *
   4.862 + *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
   4.863 + *	arg		(Interrupt handler specific argument)
   4.864 + *	ptregs		(Exception frame at the time of the interrupt)
   4.865 + *  Outputs :   None
   4.866 + *
   4.867 + */
   4.868 +static irqreturn_t
   4.869 +ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
   4.870 +{
   4.871 +	return IRQ_HANDLED;
   4.872 +}
   4.873 +
   4.874 +/*
   4.875 + * ia64_return_to_sal_check
   4.876 + *
   4.877 + *	This is function called before going back from the OS_MCA handler
   4.878 + *	to the OS_MCA dispatch code which finally takes the control back
   4.879 + *	to the SAL.
   4.880 + *	The main purpose of this routine is to setup the OS_MCA to SAL
   4.881 + *	return state which can be used by the OS_MCA dispatch code
   4.882 + *	just before going back to SAL.
   4.883 + *
   4.884 + *  Inputs  :   None
   4.885 + *  Outputs :   None
   4.886 + */
   4.887 +
   4.888 +static void
   4.889 +ia64_return_to_sal_check(int recover)
   4.890 +{
   4.891 +
   4.892 +	/* Copy over some relevant stuff from the sal_to_os_mca_handoff
   4.893 +	 * so that it can be used at the time of os_mca_to_sal_handoff
   4.894 +	 */
   4.895 +	ia64_os_to_sal_handoff_state.imots_sal_gp =
   4.896 +		ia64_sal_to_os_handoff_state.imsto_sal_gp;
   4.897 +
   4.898 +	ia64_os_to_sal_handoff_state.imots_sal_check_ra =
   4.899 +		ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
   4.900 +
   4.901 +	if (recover)
   4.902 +		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
   4.903 +	else
   4.904 +		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
   4.905 +
   4.906 +	/* Default = tell SAL to return to same context */
   4.907 +	ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
   4.908 +
   4.909 +	ia64_os_to_sal_handoff_state.imots_new_min_state =
   4.910 +		(u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
   4.911 +
   4.912 +}
   4.913 +
   4.914 +/* Function pointer for extra MCA recovery */
   4.915 +int (*ia64_mca_ucmc_extension)
   4.916 +	(void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
   4.917 +	= NULL;
   4.918 +
   4.919 +int
   4.920 +ia64_reg_MCA_extension(void *fn)
   4.921 +{
   4.922 +	if (ia64_mca_ucmc_extension)
   4.923 +		return 1;
   4.924 +
   4.925 +	ia64_mca_ucmc_extension = fn;
   4.926 +	return 0;
   4.927 +}
   4.928 +
   4.929 +void
   4.930 +ia64_unreg_MCA_extension(void)
   4.931 +{
   4.932 +	if (ia64_mca_ucmc_extension)
   4.933 +		ia64_mca_ucmc_extension = NULL;
   4.934 +}
   4.935 +
   4.936 +EXPORT_SYMBOL(ia64_reg_MCA_extension);
   4.937 +EXPORT_SYMBOL(ia64_unreg_MCA_extension);
   4.938 +
   4.939 +/*
   4.940 + * ia64_mca_ucmc_handler
   4.941 + *
   4.942 + *	This is uncorrectable machine check handler called from OS_MCA
   4.943 + *	dispatch code which is in turn called from SAL_CHECK().
   4.944 + *	This is the place where the core of OS MCA handling is done.
   4.945 + *	Right now the logs are extracted and displayed in a well-defined
   4.946 + *	format. This handler code is supposed to be run only on the
   4.947 + *	monarch processor. Once the monarch is done with MCA handling
   4.948 + *	further MCA logging is enabled by clearing logs.
   4.949 + *	Monarch also has the duty of sending wakeup-IPIs to pull the
   4.950 + *	slave processors out of rendezvous spinloop.
   4.951 + *
   4.952 + *  Inputs  :   None
   4.953 + *  Outputs :   None
   4.954 + */
   4.955 +void
   4.956 +ia64_mca_ucmc_handler(void)
   4.957 +{
   4.958 +	pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
   4.959 +		&ia64_sal_to_os_handoff_state.proc_state_param;
   4.960 +	int recover; 
   4.961 +
   4.962 +	/* Get the MCA error record and log it */
   4.963 +	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
   4.964 +
   4.965 +	/* TLB error is only exist in this SAL error record */
   4.966 +	recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
   4.967 +	/* other error recovery */
   4.968 +	   || (ia64_mca_ucmc_extension 
   4.969 +		&& ia64_mca_ucmc_extension(
   4.970 +			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
   4.971 +			&ia64_sal_to_os_handoff_state,
   4.972 +			&ia64_os_to_sal_handoff_state)); 
   4.973 +
   4.974 +	if (recover) {
   4.975 +		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
   4.976 +		rh->severity = sal_log_severity_corrected;
   4.977 +		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
   4.978 +	}
   4.979 +	/*
   4.980 +	 *  Wakeup all the processors which are spinning in the rendezvous
   4.981 +	 *  loop.
   4.982 +	 */
   4.983 +	ia64_mca_wakeup_all();
   4.984 +
   4.985 +	/* Return to SAL */
   4.986 +	ia64_return_to_sal_check(recover);
   4.987 +}
   4.988 +
   4.989 +static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
   4.990 +static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
   4.991 +
   4.992 +/*
   4.993 + * ia64_mca_cmc_int_handler
   4.994 + *
   4.995 + *  This is corrected machine check interrupt handler.
   4.996 + *	Right now the logs are extracted and displayed in a well-defined
   4.997 + *	format.
   4.998 + *
   4.999 + * Inputs
  4.1000 + *      interrupt number
  4.1001 + *      client data arg ptr
  4.1002 + *      saved registers ptr
  4.1003 + *
  4.1004 + * Outputs
  4.1005 + *	None
  4.1006 + */
  4.1007 +static irqreturn_t
  4.1008 +ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
  4.1009 +{
  4.1010 +	static unsigned long	cmc_history[CMC_HISTORY_LENGTH];
  4.1011 +	static int		index;
  4.1012 +	static DEFINE_SPINLOCK(cmc_history_lock);
  4.1013 +
  4.1014 +	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
  4.1015 +		       __FUNCTION__, cmc_irq, smp_processor_id());
  4.1016 +
  4.1017 +	/* SAL spec states this should run w/ interrupts enabled */
  4.1018 +	local_irq_enable();
  4.1019 +
  4.1020 +	/* Get the CMC error record and log it */
  4.1021 +	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
  4.1022 +
  4.1023 +	spin_lock(&cmc_history_lock);
  4.1024 +	if (!cmc_polling_enabled) {
  4.1025 +		int i, count = 1; /* we know 1 happened now */
  4.1026 +		unsigned long now = jiffies;
  4.1027 +
  4.1028 +		for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
  4.1029 +			if (now - cmc_history[i] <= HZ)
  4.1030 +				count++;
  4.1031 +		}
  4.1032 +
  4.1033 +		IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
  4.1034 +		if (count >= CMC_HISTORY_LENGTH) {
  4.1035 +
  4.1036 +			cmc_polling_enabled = 1;
  4.1037 +			spin_unlock(&cmc_history_lock);
  4.1038 +			schedule_work(&cmc_disable_work);
  4.1039 +
  4.1040 +			/*
  4.1041 +			 * Corrected errors will still be corrected, but
  4.1042 +			 * make sure there's a log somewhere that indicates
  4.1043 +			 * something is generating more than we can handle.
  4.1044 +			 */
  4.1045 +			printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
  4.1046 +
  4.1047 +			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
  4.1048 +
  4.1049 +			/* lock already released, get out now */
  4.1050 +			return IRQ_HANDLED;
  4.1051 +		} else {
  4.1052 +			cmc_history[index++] = now;
  4.1053 +			if (index == CMC_HISTORY_LENGTH)
  4.1054 +				index = 0;
  4.1055 +		}
  4.1056 +	}
  4.1057 +	spin_unlock(&cmc_history_lock);
  4.1058 +	return IRQ_HANDLED;
  4.1059 +}
  4.1060 +
  4.1061 +/*
  4.1062 + *  ia64_mca_cmc_int_caller
  4.1063 + *
  4.1064 + * 	Triggered by sw interrupt from CMC polling routine.  Calls
  4.1065 + * 	real interrupt handler and either triggers a sw interrupt
  4.1066 + * 	on the next cpu or does cleanup at the end.
  4.1067 + *
  4.1068 + * Inputs
  4.1069 + *	interrupt number
  4.1070 + *	client data arg ptr
  4.1071 + *	saved registers ptr
  4.1072 + * Outputs
  4.1073 + * 	handled
  4.1074 + */
  4.1075 +static irqreturn_t
  4.1076 +ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
  4.1077 +{
  4.1078 +	static int start_count = -1;
  4.1079 +	unsigned int cpuid;
  4.1080 +
  4.1081 +	cpuid = smp_processor_id();
  4.1082 +
  4.1083 +	/* If first cpu, update count */
  4.1084 +	if (start_count == -1)
  4.1085 +		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
  4.1086 +
  4.1087 +	ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
  4.1088 +
  4.1089 +	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
  4.1090 +
  4.1091 +	if (cpuid < NR_CPUS) {
  4.1092 +		platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
  4.1093 +	} else {
  4.1094 +		/* If no log record, switch out of polling mode */
  4.1095 +		if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
  4.1096 +
  4.1097 +			printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
  4.1098 +			schedule_work(&cmc_enable_work);
  4.1099 +			cmc_polling_enabled = 0;
  4.1100 +
  4.1101 +		} else {
  4.1102 +
  4.1103 +			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
  4.1104 +		}
  4.1105 +
  4.1106 +		start_count = -1;
  4.1107 +	}
  4.1108 +
  4.1109 +	return IRQ_HANDLED;
  4.1110 +}
  4.1111 +
  4.1112 +/*
  4.1113 + *  ia64_mca_cmc_poll
  4.1114 + *
  4.1115 + *	Poll for Corrected Machine Checks (CMCs)
  4.1116 + *
  4.1117 + * Inputs   :   dummy(unused)
  4.1118 + * Outputs  :   None
  4.1119 + *
  4.1120 + */
  4.1121 +static void
  4.1122 +ia64_mca_cmc_poll (unsigned long dummy)
  4.1123 +{
  4.1124 +	/* Trigger a CMC interrupt cascade  */
  4.1125 +	platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
  4.1126 +}
  4.1127 +
  4.1128 +/*
  4.1129 + *  ia64_mca_cpe_int_caller
  4.1130 + *
  4.1131 + * 	Triggered by sw interrupt from CPE polling routine.  Calls
  4.1132 + * 	real interrupt handler and either triggers a sw interrupt
  4.1133 + * 	on the next cpu or does cleanup at the end.
  4.1134 + *
  4.1135 + * Inputs
  4.1136 + *	interrupt number
  4.1137 + *	client data arg ptr
  4.1138 + *	saved registers ptr
  4.1139 + * Outputs
  4.1140 + * 	handled
  4.1141 + */
  4.1142 +#ifdef CONFIG_ACPI
  4.1143 +
  4.1144 +static irqreturn_t
  4.1145 +ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
  4.1146 +{
  4.1147 +	static int start_count = -1;
  4.1148 +	static int poll_time = MIN_CPE_POLL_INTERVAL;
  4.1149 +	unsigned int cpuid;
  4.1150 +
  4.1151 +	cpuid = smp_processor_id();
  4.1152 +
  4.1153 +	/* If first cpu, update count */
  4.1154 +	if (start_count == -1)
  4.1155 +		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
  4.1156 +
  4.1157 +	ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
  4.1158 +
  4.1159 +	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
  4.1160 +
  4.1161 +	if (cpuid < NR_CPUS) {
  4.1162 +		platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
  4.1163 +	} else {
  4.1164 +		/*
  4.1165 +		 * If a log was recorded, increase our polling frequency,
  4.1166 +		 * otherwise, backoff or return to interrupt mode.
  4.1167 +		 */
  4.1168 +		if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
  4.1169 +			poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
  4.1170 +		} else if (cpe_vector < 0) {
  4.1171 +			poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
  4.1172 +		} else {
  4.1173 +			poll_time = MIN_CPE_POLL_INTERVAL;
  4.1174 +
  4.1175 +			printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
  4.1176 +			enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
  4.1177 +			cpe_poll_enabled = 0;
  4.1178 +		}
  4.1179 +
  4.1180 +		if (cpe_poll_enabled)
  4.1181 +			mod_timer(&cpe_poll_timer, jiffies + poll_time);
  4.1182 +		start_count = -1;
  4.1183 +	}
  4.1184 +
  4.1185 +	return IRQ_HANDLED;
  4.1186 +}
  4.1187 +
  4.1188 +/*
  4.1189 + *  ia64_mca_cpe_poll
  4.1190 + *
  4.1191 + *	Poll for Corrected Platform Errors (CPEs), trigger interrupt
  4.1192 + *	on first cpu, from there it will trickle through all the cpus.
  4.1193 + *
  4.1194 + * Inputs   :   dummy(unused)
  4.1195 + * Outputs  :   None
  4.1196 + *
  4.1197 + */
  4.1198 +static void
  4.1199 +ia64_mca_cpe_poll (unsigned long dummy)
  4.1200 +{
  4.1201 +	/* Trigger a CPE interrupt cascade  */
  4.1202 +	platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
  4.1203 +}
  4.1204 +
  4.1205 +#endif /* CONFIG_ACPI */
  4.1206 +#endif /* !XEN */
  4.1207 +
  4.1208 +/*
  4.1209 + * C portion of the OS INIT handler
  4.1210 + *
  4.1211 + * Called from ia64_monarch_init_handler
  4.1212 + *
  4.1213 + * Inputs: pointer to pt_regs where processor info was saved.
  4.1214 + *
  4.1215 + * Returns:
  4.1216 + *   0 if SAL must warm boot the System
  4.1217 + *   1 if SAL must return to interrupted context using PAL_MC_RESUME
  4.1218 + *
  4.1219 + */
  4.1220 +void
  4.1221 +ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
  4.1222 +{
  4.1223 +	pal_min_state_area_t *ms;
  4.1224 +#ifdef XEN
  4.1225 +	int cpu = smp_processor_id();
  4.1226 +
  4.1227 +	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
  4.1228 +	       ia64_sal_to_os_handoff_state[cpu].proc_state_param);
  4.1229 +#endif
  4.1230 +
  4.1231 +#ifndef XEN
  4.1232 +	oops_in_progress = 1;	/* avoid deadlock in printk, but it makes recovery dodgy */
  4.1233 +	console_loglevel = 15;	/* make sure printks make it to console */
  4.1234 +
  4.1235 +	printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
  4.1236 +		ia64_sal_to_os_handoff_state.proc_state_param);
  4.1237 +
  4.1238 +	/*
  4.1239 +	 * Address of minstate area provided by PAL is physical,
  4.1240 +	 * uncacheable (bit 63 set). Convert to Linux virtual
  4.1241 +	 * address in region 6.
  4.1242 +	 */
  4.1243 +	ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
  4.1244 +#else
  4.1245 +	/* Xen virtual address in region 7. */
  4.1246 +	ms = __va((pal_min_state_area_t *)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
  4.1247 +#endif
  4.1248 +
  4.1249 +	init_handler_platform(ms, pt, sw);	/* call platform specific routines */
  4.1250 +}
  4.1251 +
  4.1252 +#ifndef XEN
  4.1253 +static int __init
  4.1254 +ia64_mca_disable_cpe_polling(char *str)
  4.1255 +{
  4.1256 +	cpe_poll_enabled = 0;
  4.1257 +	return 1;
  4.1258 +}
  4.1259 +
  4.1260 +__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
  4.1261 +
  4.1262 +static struct irqaction cmci_irqaction = {
  4.1263 +	.handler =	ia64_mca_cmc_int_handler,
  4.1264 +	.flags =	SA_INTERRUPT,
  4.1265 +	.name =		"cmc_hndlr"
  4.1266 +};
  4.1267 +
  4.1268 +static struct irqaction cmcp_irqaction = {
  4.1269 +	.handler =	ia64_mca_cmc_int_caller,
  4.1270 +	.flags =	SA_INTERRUPT,
  4.1271 +	.name =		"cmc_poll"
  4.1272 +};
  4.1273 +
  4.1274 +static struct irqaction mca_rdzv_irqaction = {
  4.1275 +	.handler =	ia64_mca_rendez_int_handler,
  4.1276 +	.flags =	SA_INTERRUPT,
  4.1277 +	.name =		"mca_rdzv"
  4.1278 +};
  4.1279 +
  4.1280 +static struct irqaction mca_wkup_irqaction = {
  4.1281 +	.handler =	ia64_mca_wakeup_int_handler,
  4.1282 +	.flags =	SA_INTERRUPT,
  4.1283 +	.name =		"mca_wkup"
  4.1284 +};
  4.1285 +
  4.1286 +#ifdef CONFIG_ACPI
  4.1287 +static struct irqaction mca_cpe_irqaction = {
  4.1288 +	.handler =	ia64_mca_cpe_int_handler,
  4.1289 +	.flags =	SA_INTERRUPT,
  4.1290 +	.name =		"cpe_hndlr"
  4.1291 +};
  4.1292 +
  4.1293 +static struct irqaction mca_cpep_irqaction = {
  4.1294 +	.handler =	ia64_mca_cpe_int_caller,
  4.1295 +	.flags =	SA_INTERRUPT,
  4.1296 +	.name =		"cpe_poll"
  4.1297 +};
  4.1298 +#endif /* CONFIG_ACPI */
  4.1299 +#endif /* !XEN */
  4.1300 +
  4.1301 +/* Do per-CPU MCA-related initialization.  */
  4.1302 +
  4.1303 +void __devinit
  4.1304 +ia64_mca_cpu_init(void *cpu_data)
  4.1305 +{
  4.1306 +	void *pal_vaddr;
  4.1307 +
  4.1308 +	if (smp_processor_id() == 0) {
  4.1309 +		void *mca_data;
  4.1310 +		int cpu;
  4.1311 +
  4.1312 +#ifdef XEN
  4.1313 +		unsigned int pageorder;
  4.1314 +		pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu));
  4.1315 +#else
  4.1316 +		mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
  4.1317 +					 * NR_CPUS);
  4.1318 +#endif
  4.1319 +		for (cpu = 0; cpu < NR_CPUS; cpu++) {
  4.1320 +#ifdef XEN
  4.1321 +			mca_data = alloc_xenheap_pages(pageorder);
  4.1322 +			__per_cpu_mca[cpu] = __pa(mca_data);
  4.1323 +			IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx"
  4.1324 +			               "(mca_data[%d]=%lx)\n",
  4.1325 +				       __FUNCTION__, cpu, __per_cpu_mca[cpu],
  4.1326 +				       cpu, (u64)mca_data);
  4.1327 +#else
  4.1328 +			__per_cpu_mca[cpu] = __pa(mca_data);
  4.1329 +			mca_data += sizeof(struct ia64_mca_cpu);
  4.1330 +#endif
  4.1331 +		}
  4.1332 +	}
  4.1333 +
  4.1334 +        /*
  4.1335 +         * The MCA info structure was allocated earlier and its
  4.1336 +         * physical address saved in __per_cpu_mca[cpu].  Copy that
  4.1337 +         * address * to ia64_mca_data so we can access it as a per-CPU
  4.1338 +         * variable.
  4.1339 +         */
  4.1340 +	__get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
  4.1341 +#ifdef XEN
  4.1342 +	IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
  4.1343 +	               smp_processor_id(), __get_cpu_var(ia64_mca_data));
  4.1344 +
  4.1345 +	/* sal_to_os_handoff for smp support */
  4.1346 +	__get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
  4.1347 +	              __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
  4.1348 +	IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
  4.1349 +	               smp_processor_id(),
  4.1350 +		       __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
  4.1351 +#endif
  4.1352 +
  4.1353 +	/*
  4.1354 +	 * Stash away a copy of the PTE needed to map the per-CPU page.
  4.1355 +	 * We may need it during MCA recovery.
  4.1356 +	 */
  4.1357 +	__get_cpu_var(ia64_mca_per_cpu_pte) =
  4.1358 +		pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
  4.1359 +
  4.1360 +        /*
  4.1361 +         * Also, stash away a copy of the PAL address and the PTE
  4.1362 +         * needed to map it.
  4.1363 +         */
  4.1364 +        pal_vaddr = efi_get_pal_addr();
  4.1365 +	if (!pal_vaddr)
  4.1366 +		return;
  4.1367 +	__get_cpu_var(ia64_mca_pal_base) =
  4.1368 +		GRANULEROUNDDOWN((unsigned long) pal_vaddr);
  4.1369 +	__get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
  4.1370 +							      PAGE_KERNEL));
  4.1371 +}
  4.1372 +
  4.1373 +/*
  4.1374 + * ia64_mca_init
  4.1375 + *
  4.1376 + *  Do all the system level mca specific initialization.
  4.1377 + *
  4.1378 + *	1. Register spinloop and wakeup request interrupt vectors
  4.1379 + *
  4.1380 + *	2. Register OS_MCA handler entry point
  4.1381 + *
  4.1382 + *	3. Register OS_INIT handler entry point
  4.1383 + *
  4.1384 + *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
  4.1385 + *
  4.1386 + *  Note that this initialization is done very early before some kernel
  4.1387 + *  services are available.
  4.1388 + *
  4.1389 + *  Inputs  :   None
  4.1390 + *
  4.1391 + *  Outputs :   None
  4.1392 + */
  4.1393 +void __init
  4.1394 +ia64_mca_init(void)
  4.1395 +{
  4.1396 +	ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
  4.1397 +	ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
  4.1398 +	ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
  4.1399 +#ifdef XEN
  4.1400 +	s64 rc;
  4.1401 +
  4.1402 +	slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
  4.1403 +
  4.1404 +	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
  4.1405 +#else
  4.1406 +	int i;
  4.1407 +	s64 rc;
  4.1408 +	struct ia64_sal_retval isrv;
  4.1409 +	u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;	/* platform specific */
  4.1410 +
  4.1411 +	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
  4.1412 +
  4.1413 +	/* Clear the Rendez checkin flag for all cpus */
  4.1414 +	for(i = 0 ; i < NR_CPUS; i++)
  4.1415 +		ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  4.1416 +
  4.1417 +	/*
  4.1418 +	 * Register the rendezvous spinloop and wakeup mechanism with SAL
  4.1419 +	 */
  4.1420 +
  4.1421 +	/* Register the rendezvous interrupt vector with SAL */
  4.1422 +	while (1) {
  4.1423 +		isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
  4.1424 +					      SAL_MC_PARAM_MECHANISM_INT,
  4.1425 +					      IA64_MCA_RENDEZ_VECTOR,
  4.1426 +					      timeout,
  4.1427 +					      SAL_MC_PARAM_RZ_ALWAYS);
  4.1428 +		rc = isrv.status;
  4.1429 +		if (rc == 0)
  4.1430 +			break;
  4.1431 +		if (rc == -2) {
  4.1432 +			printk(KERN_INFO "Increasing MCA rendezvous timeout from "
  4.1433 +				"%ld to %ld milliseconds\n", timeout, isrv.v0);
  4.1434 +			timeout = isrv.v0;
  4.1435 +			continue;
  4.1436 +		}
  4.1437 +		printk(KERN_ERR "Failed to register rendezvous interrupt "
  4.1438 +		       "with SAL (status %ld)\n", rc);
  4.1439 +		return;
  4.1440 +	}
  4.1441 +
  4.1442 +	/* Register the wakeup interrupt vector with SAL */
  4.1443 +	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
  4.1444 +				      SAL_MC_PARAM_MECHANISM_INT,
  4.1445 +				      IA64_MCA_WAKEUP_VECTOR,
  4.1446 +				      0, 0);
  4.1447 +	rc = isrv.status;
  4.1448 +	if (rc) {
  4.1449 +		printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
  4.1450 +		       "(status %ld)\n", rc);
  4.1451 +		return;
  4.1452 +	}
  4.1453 +
  4.1454 +	IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
  4.1455 +#endif /* !XEN */
  4.1456 +
  4.1457 +	ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
  4.1458 +	/*
  4.1459 +	 * XXX - disable SAL checksum by setting size to 0; should be
  4.1460 +	 *	ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
  4.1461 +	 */
  4.1462 +	ia64_mc_info.imi_mca_handler_size	= 0;
  4.1463 +
  4.1464 +	/* Register the os mca handler with SAL */
  4.1465 +	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
  4.1466 +				       ia64_mc_info.imi_mca_handler,
  4.1467 +				       ia64_tpa(mca_hldlr_ptr->gp),
  4.1468 +				       ia64_mc_info.imi_mca_handler_size,
  4.1469 +				       0, 0, 0)))
  4.1470 +	{
  4.1471 +		printk(KERN_ERR "Failed to register OS MCA handler with SAL "
  4.1472 +		       "(status %ld)\n", rc);
  4.1473 +		return;
  4.1474 +	}
  4.1475 +
  4.1476 +	IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
  4.1477 +		       ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
  4.1478 +
  4.1479 +	/*
  4.1480 +	 * XXX - disable SAL checksum by setting size to 0, should be
  4.1481 +	 * size of the actual init handler in mca_asm.S.
  4.1482 +	 */
  4.1483 +	ia64_mc_info.imi_monarch_init_handler		= ia64_tpa(mon_init_ptr->fp);
  4.1484 +	ia64_mc_info.imi_monarch_init_handler_size	= 0;
  4.1485 +	ia64_mc_info.imi_slave_init_handler		= ia64_tpa(slave_init_ptr->fp);
  4.1486 +	ia64_mc_info.imi_slave_init_handler_size	= 0;
  4.1487 +
  4.1488 +	IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
  4.1489 +		       ia64_mc_info.imi_monarch_init_handler);
  4.1490 +
  4.1491 +	/* Register the os init handler with SAL */
  4.1492 +	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
  4.1493 +				       ia64_mc_info.imi_monarch_init_handler,
  4.1494 +				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
  4.1495 +				       ia64_mc_info.imi_monarch_init_handler_size,
  4.1496 +				       ia64_mc_info.imi_slave_init_handler,
  4.1497 +				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
  4.1498 +				       ia64_mc_info.imi_slave_init_handler_size)))
  4.1499 +	{
  4.1500 +		printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
  4.1501 +		       "(status %ld)\n", rc);
  4.1502 +		return;
  4.1503 +	}
  4.1504 +
  4.1505 +	IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
  4.1506 +
  4.1507 +#ifndef XEN
  4.1508 +	/*
  4.1509 +	 *  Configure the CMCI/P vector and handler. Interrupts for CMC are
  4.1510 +	 *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
  4.1511 +	 */
  4.1512 +	register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
  4.1513 +	register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
  4.1514 +	ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
  4.1515 +
  4.1516 +	/* Setup the MCA rendezvous interrupt vector */
  4.1517 +	register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
  4.1518 +
  4.1519 +	/* Setup the MCA wakeup interrupt vector */
  4.1520 +	register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
  4.1521 +
  4.1522 +#ifdef CONFIG_ACPI
  4.1523 +	/* Setup the CPEI/P handler */
  4.1524 +	register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
  4.1525 +#endif
  4.1526 +
  4.1527 +	/* Initialize the areas set aside by the OS to buffer the
  4.1528 +	 * platform/processor error states for MCA/INIT/CMC
  4.1529 +	 * handling.
  4.1530 +	 */
  4.1531 +	ia64_log_init(SAL_INFO_TYPE_MCA);
  4.1532 +	ia64_log_init(SAL_INFO_TYPE_INIT);
  4.1533 +	ia64_log_init(SAL_INFO_TYPE_CMC);
  4.1534 +	ia64_log_init(SAL_INFO_TYPE_CPE);
  4.1535 +#endif /* !XEN */
  4.1536 +
  4.1537 +	mca_init = 1;
  4.1538 +	printk(KERN_INFO "MCA related initialization done\n");
  4.1539 +}
  4.1540 +
  4.1541 +#ifndef XEN
  4.1542 +/*
  4.1543 + * ia64_mca_late_init
  4.1544 + *
  4.1545 + *	Opportunity to setup things that require initialization later
  4.1546 + *	than ia64_mca_init.  Setup a timer to poll for CPEs if the
  4.1547 + *	platform doesn't support an interrupt driven mechanism.
  4.1548 + *
  4.1549 + *  Inputs  :   None
  4.1550 + *  Outputs :   Status
  4.1551 + */
  4.1552 +static int __init
  4.1553 +ia64_mca_late_init(void)
  4.1554 +{
  4.1555 +	if (!mca_init)
  4.1556 +		return 0;
  4.1557 +
  4.1558 +	/* Setup the CMCI/P vector and handler */
  4.1559 +	init_timer(&cmc_poll_timer);
  4.1560 +	cmc_poll_timer.function = ia64_mca_cmc_poll;
  4.1561 +
  4.1562 +	/* Unmask/enable the vector */
  4.1563 +	cmc_polling_enabled = 0;
  4.1564 +	schedule_work(&cmc_enable_work);
  4.1565 +
  4.1566 +	IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
  4.1567 +
  4.1568 +#ifdef CONFIG_ACPI
  4.1569 +	/* Setup the CPEI/P vector and handler */
  4.1570 +	cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
  4.1571 +	init_timer(&cpe_poll_timer);
  4.1572 +	cpe_poll_timer.function = ia64_mca_cpe_poll;
  4.1573 +
  4.1574 +	{
  4.1575 +		irq_desc_t *desc;
  4.1576 +		unsigned int irq;
  4.1577 +
  4.1578 +		if (cpe_vector >= 0) {
  4.1579 +			/* If platform supports CPEI, enable the irq. */
  4.1580 +			cpe_poll_enabled = 0;
  4.1581 +			for (irq = 0; irq < NR_IRQS; ++irq)
  4.1582 +				if (irq_to_vector(irq) == cpe_vector) {
  4.1583 +					desc = irq_descp(irq);
  4.1584 +					desc->status |= IRQ_PER_CPU;
  4.1585 +					setup_irq(irq, &mca_cpe_irqaction);
  4.1586 +				}
  4.1587 +			ia64_mca_register_cpev(cpe_vector);
  4.1588 +			IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
  4.1589 +		} else {
  4.1590 +			/* If platform doesn't support CPEI, get the timer going. */
  4.1591 +			if (cpe_poll_enabled) {
  4.1592 +				ia64_mca_cpe_poll(0UL);
  4.1593 +				IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
  4.1594 +			}
  4.1595 +		}
  4.1596 +	}
  4.1597 +#endif
  4.1598 +
  4.1599 +	return 0;
  4.1600 +}
  4.1601 +
  4.1602 +device_initcall(ia64_mca_late_init);
  4.1603 +#endif /* !XEN */
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/ia64/linux-xen/mca_asm.S	Sun Jul 09 20:04:23 2006 -0600
     5.3 @@ -0,0 +1,970 @@
     5.4 +//
     5.5 +// assembly portion of the IA64 MCA handling
     5.6 +//
     5.7 +// Mods by cfleck to integrate into kernel build
     5.8 +// 00/03/15 davidm Added various stop bits to get a clean compile
     5.9 +//
    5.10 +// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
    5.11 +//		   kstack, switch modes, jump to C INIT handler
    5.12 +//
    5.13 +// 02/01/04 J.Hall <jenna.s.hall@intel.com>
    5.14 +//		   Before entering virtual mode code:
    5.15 +//		   1. Check for TLB CPU error
    5.16 +//		   2. Restore current thread pointer to kr6
    5.17 +//		   3. Move stack ptr 16 bytes to conform to C calling convention
    5.18 +//
    5.19 +// 04/11/12 Russ Anderson <rja@sgi.com>
    5.20 +//		   Added per cpu MCA/INIT stack save areas.
    5.21 +//
    5.22 +#include <linux/config.h>
    5.23 +#include <linux/threads.h>
    5.24 +
    5.25 +#include <asm/asmmacro.h>
    5.26 +#include <asm/pgtable.h>
    5.27 +#include <asm/processor.h>
    5.28 +#include <asm/mca_asm.h>
    5.29 +#include <asm/mca.h>
    5.30 +
    5.31 +/*
    5.32 + * When we get a machine check, the kernel stack pointer is no longer
    5.33 + * valid, so we need to set a new stack pointer.
    5.34 + */
    5.35 +#define	MINSTATE_PHYS	/* Make sure stack access is physical for MINSTATE */
    5.36 +
    5.37 +/*
    5.38 + * Needed for return context to SAL
    5.39 + */
    5.40 +#define IA64_MCA_SAME_CONTEXT	0
    5.41 +#define IA64_MCA_COLD_BOOT	-2
    5.42 +
    5.43 +#include "minstate.h"
    5.44 +
    5.45 +/*
    5.46 + * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
    5.47 + *		1. GR1 = OS GP
    5.48 + *		2. GR8 = PAL_PROC physical address
    5.49 + *		3. GR9 = SAL_PROC physical address
    5.50 + *		4. GR10 = SAL GP (physical)
    5.51 + *		5. GR11 = Rendez state
    5.52 + *		6. GR12 = Return address to location within SAL_CHECK
    5.53 + */
    5.54 +#ifdef XEN
    5.55 +#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\
    5.56 +	movl	_tmp=THIS_CPU(ia64_sal_to_os_handoff_state_addr);;	\
    5.57 +	tpa	_tmp=_tmp;;				\
    5.58 +	ld8	_tmp=[_tmp];;				\
    5.59 +	st8	[_tmp]=r1,0x08;;			\
    5.60 +	st8	[_tmp]=r8,0x08;;			\
    5.61 +	st8	[_tmp]=r9,0x08;;			\
    5.62 +	st8	[_tmp]=r10,0x08;;			\
    5.63 +	st8	[_tmp]=r11,0x08;;			\
    5.64 +	st8	[_tmp]=r12,0x08;;			\
    5.65 +	st8	[_tmp]=r17,0x08;;			\
    5.66 +	st8	[_tmp]=r18,0x08
    5.67 +#else
    5.68 +#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\
    5.69 +	LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
    5.70 +	st8	[_tmp]=r1,0x08;;			\
    5.71 +	st8	[_tmp]=r8,0x08;;			\
    5.72 +	st8	[_tmp]=r9,0x08;;			\
    5.73 +	st8	[_tmp]=r10,0x08;;			\
    5.74 +	st8	[_tmp]=r11,0x08;;			\
    5.75 +	st8	[_tmp]=r12,0x08;;			\
    5.76 +	st8	[_tmp]=r17,0x08;;			\
    5.77 +	st8	[_tmp]=r18,0x08
    5.78 +
    5.79 +/*
    5.80 + * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
    5.81 + * (p6) is executed if we never entered virtual mode (TLB error)
    5.82 + * (p7) is executed if we entered virtual mode as expected (normal case)
    5.83 + *	1. GR8 = OS_MCA return status
    5.84 + *	2. GR9 = SAL GP (physical)
    5.85 + *	3. GR10 = 0/1 returning same/new context
    5.86 + *	4. GR22 = New min state save area pointer
    5.87 + *	returns ptr to SAL rtn save loc in _tmp
    5.88 + */
    5.89 +#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp)	\
    5.90 +	movl	_tmp=ia64_os_to_sal_handoff_state;;	\
    5.91 +	DATA_VA_TO_PA(_tmp);;				\
    5.92 +	ld8	r8=[_tmp],0x08;;			\
    5.93 +	ld8	r9=[_tmp],0x08;;			\
    5.94 +	ld8	r10=[_tmp],0x08;;			\
    5.95 +	ld8	r22=[_tmp],0x08;;
    5.96 +	// now _tmp is pointing to SAL rtn save location
    5.97 +
    5.98 +/*
    5.99 + * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
   5.100 + *	imots_os_status=IA64_MCA_COLD_BOOT
   5.101 + *	imots_sal_gp=SAL GP
   5.102 + *	imots_context=IA64_MCA_SAME_CONTEXT
   5.103 + *	imots_new_min_state=Min state save area pointer
   5.104 + *	imots_sal_check_ra=Return address to location within SAL_CHECK
   5.105 + *
   5.106 + */
   5.107 +#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
   5.108 +	movl	tmp=IA64_MCA_COLD_BOOT;					\
   5.109 +	movl	sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);	\
   5.110 +	movl	os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);;	\
   5.111 +	st8	[os_to_sal_handoff]=tmp,8;;				\
   5.112 +	ld8	tmp=[sal_to_os_handoff],48;;				\
   5.113 +	st8	[os_to_sal_handoff]=tmp,8;;				\
   5.114 +	movl	tmp=IA64_MCA_SAME_CONTEXT;;				\
   5.115 +	st8	[os_to_sal_handoff]=tmp,8;;				\
   5.116 +	ld8	tmp=[sal_to_os_handoff],-8;;				\
   5.117 +	st8     [os_to_sal_handoff]=tmp,8;;				\
   5.118 +	ld8	tmp=[sal_to_os_handoff];;				\
   5.119 +	st8     [os_to_sal_handoff]=tmp;;
   5.120 +
   5.121 +#define GET_IA64_MCA_DATA(reg)						\
   5.122 +	GET_THIS_PADDR(reg, ia64_mca_data)				\
   5.123 +	;;								\
   5.124 +	ld8 reg=[reg]
   5.125 +
   5.126 +#endif /* XEN */
   5.127 +	.global ia64_os_mca_dispatch
   5.128 +	.global ia64_os_mca_dispatch_end
   5.129 +#ifndef XEN
   5.130 +	.global ia64_sal_to_os_handoff_state
   5.131 +	.global	ia64_os_to_sal_handoff_state
   5.132 +	.global ia64_do_tlb_purge
   5.133 +#endif
   5.134 +
   5.135 +	.text
   5.136 +	.align 16
   5.137 +
   5.138 +#ifndef XEN
   5.139 +/*
   5.140 + * Just the TLB purge part is moved to a separate function
   5.141 + * so we can re-use the code for cpu hotplug code as well
   5.142 + * Caller should now setup b1, so we can branch once the
   5.143 + * tlb flush is complete.
   5.144 + */
   5.145 +
   5.146 +ia64_do_tlb_purge:
   5.147 +#define O(member)	IA64_CPUINFO_##member##_OFFSET
   5.148 +
   5.149 +	GET_THIS_PADDR(r2, cpu_info)	// load phys addr of cpu_info into r2
   5.150 +	;;
   5.151 +	addl r17=O(PTCE_STRIDE),r2
   5.152 +	addl r2=O(PTCE_BASE),r2
   5.153 +	;;
   5.154 +	ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;	// r18=ptce_base
   5.155 +	ld4 r19=[r2],4					// r19=ptce_count[0]
   5.156 +	ld4 r21=[r17],4					// r21=ptce_stride[0]
   5.157 +	;;
   5.158 +	ld4 r20=[r2]					// r20=ptce_count[1]
   5.159 +	ld4 r22=[r17]					// r22=ptce_stride[1]
   5.160 +	mov r24=0
   5.161 +	;;
   5.162 +	adds r20=-1,r20
   5.163 +	;;
   5.164 +#undef O
   5.165 +
   5.166 +2:
   5.167 +	cmp.ltu p6,p7=r24,r19
   5.168 +(p7)	br.cond.dpnt.few 4f
   5.169 +	mov ar.lc=r20
   5.170 +3:
   5.171 +	ptc.e r18
   5.172 +	;;
   5.173 +	add r18=r22,r18
   5.174 +	br.cloop.sptk.few 3b
   5.175 +	;;
   5.176 +	add r18=r21,r18
   5.177 +	add r24=1,r24
   5.178 +	;;
   5.179 +	br.sptk.few 2b
   5.180 +4:
   5.181 +	srlz.i 			// srlz.i implies srlz.d
   5.182 +	;;
   5.183 +
   5.184 +        // Now purge addresses formerly mapped by TR registers
   5.185 +	// 1. Purge ITR&DTR for kernel.
   5.186 +	movl r16=KERNEL_START
   5.187 +	mov r18=KERNEL_TR_PAGE_SHIFT<<2
   5.188 +	;;
   5.189 +	ptr.i r16, r18
   5.190 +	ptr.d r16, r18
   5.191 +	;;
   5.192 +	srlz.i
   5.193 +	;;
   5.194 +	srlz.d
   5.195 +	;;
   5.196 +	// 2. Purge DTR for PERCPU data.
   5.197 +	movl r16=PERCPU_ADDR
   5.198 +	mov r18=PERCPU_PAGE_SHIFT<<2
   5.199 +	;;
   5.200 +	ptr.d r16,r18
   5.201 +	;;
   5.202 +	srlz.d
   5.203 +	;;
   5.204 +	// 3. Purge ITR for PAL code.
   5.205 +	GET_THIS_PADDR(r2, ia64_mca_pal_base)
   5.206 +	;;
   5.207 +	ld8 r16=[r2]
   5.208 +	mov r18=IA64_GRANULE_SHIFT<<2
   5.209 +	;;
   5.210 +	ptr.i r16,r18
   5.211 +	;;
   5.212 +	srlz.i
   5.213 +	;;
   5.214 +	// 4. Purge DTR for stack.
   5.215 +	mov r16=IA64_KR(CURRENT_STACK)
   5.216 +	;;
   5.217 +	shl r16=r16,IA64_GRANULE_SHIFT
   5.218 +	movl r19=PAGE_OFFSET
   5.219 +	;;
   5.220 +	add r16=r19,r16
   5.221 +	mov r18=IA64_GRANULE_SHIFT<<2
   5.222 +	;;
   5.223 +	ptr.d r16,r18
   5.224 +	;;
   5.225 +	srlz.i
   5.226 +	;;
   5.227 +	// Now branch away to caller.
   5.228 +	br.sptk.many b1
   5.229 +	;;
   5.230 +
   5.231 +ia64_os_mca_dispatch:
   5.232 +
   5.233 +	// Serialize all MCA processing
   5.234 +	mov	r3=1;;
   5.235 +	LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
   5.236 +ia64_os_mca_spin:
   5.237 +	xchg8	r4=[r2],r3;;
   5.238 +	cmp.ne	p6,p0=r4,r0
   5.239 +(p6)	br ia64_os_mca_spin
   5.240 +
   5.241 +	// Save the SAL to OS MCA handoff state as defined
   5.242 +	// by SAL SPEC 3.0
   5.243 +	// NOTE : The order in which the state gets saved
   5.244 +	//	  is dependent on the way the C-structure
   5.245 +	//	  for ia64_mca_sal_to_os_state_t has been
   5.246 +	//	  defined in include/asm/mca.h
   5.247 +	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
   5.248 +	;;
   5.249 +
   5.250 +	// LOG PROCESSOR STATE INFO FROM HERE ON..
   5.251 +begin_os_mca_dump:
   5.252 +	br	ia64_os_mca_proc_state_dump;;
   5.253 +
   5.254 +ia64_os_mca_done_dump:
   5.255 +
   5.256 +	LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
   5.257 +	;;
   5.258 +	ld8 r18=[r16]		// Get processor state parameter on existing PALE_CHECK.
   5.259 +	;;
   5.260 +	tbit.nz p6,p7=r18,60
   5.261 +(p7)	br.spnt done_tlb_purge_and_reload
   5.262 +
   5.263 +	// The following code purges TC and TR entries. Then reload all TC entries.
   5.264 +	// Purge percpu data TC entries.
   5.265 +begin_tlb_purge_and_reload:
   5.266 +	movl r18=ia64_reload_tr;;
   5.267 +	LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
   5.268 +	mov b1=r18;;
   5.269 +	br.sptk.many ia64_do_tlb_purge;;
   5.270 +
   5.271 +ia64_reload_tr:
   5.272 +	// Finally reload the TR registers.
   5.273 +	// 1. Reload DTR/ITR registers for kernel.
   5.274 +	mov r18=KERNEL_TR_PAGE_SHIFT<<2
   5.275 +	movl r17=KERNEL_START
   5.276 +	;;
   5.277 +	mov cr.itir=r18
   5.278 +	mov cr.ifa=r17
   5.279 +        mov r16=IA64_TR_KERNEL
   5.280 +	mov r19=ip
   5.281 +	movl r18=PAGE_KERNEL
   5.282 +	;;
   5.283 +        dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
   5.284 +	;;
   5.285 +	or r18=r17,r18
   5.286 +	;;
   5.287 +        itr.i itr[r16]=r18
   5.288 +	;;
   5.289 +        itr.d dtr[r16]=r18
   5.290 +        ;;
   5.291 +	srlz.i
   5.292 +	srlz.d
   5.293 +	;;
   5.294 +	// 2. Reload DTR register for PERCPU data.
   5.295 +	GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
   5.296 +	;;
   5.297 +	movl r16=PERCPU_ADDR		// vaddr
   5.298 +	movl r18=PERCPU_PAGE_SHIFT<<2
   5.299 +	;;
   5.300 +	mov cr.itir=r18
   5.301 +	mov cr.ifa=r16
   5.302 +	;;
   5.303 +	ld8 r18=[r2]			// load per-CPU PTE
   5.304 +	mov r16=IA64_TR_PERCPU_DATA;
   5.305 +	;;
   5.306 +	itr.d dtr[r16]=r18
   5.307 +	;;
   5.308 +	srlz.d
   5.309 +	;;
   5.310 +	// 3. Reload ITR for PAL code.
   5.311 +	GET_THIS_PADDR(r2, ia64_mca_pal_pte)
   5.312 +	;;
   5.313 +	ld8 r18=[r2]			// load PAL PTE
   5.314 +	;;
   5.315 +	GET_THIS_PADDR(r2, ia64_mca_pal_base)
   5.316 +	;;
   5.317 +	ld8 r16=[r2]			// load PAL vaddr
   5.318 +	mov r19=IA64_GRANULE_SHIFT<<2
   5.319 +	;;
   5.320 +	mov cr.itir=r19
   5.321 +	mov cr.ifa=r16
   5.322 +	mov r20=IA64_TR_PALCODE
   5.323 +	;;
   5.324 +	itr.i itr[r20]=r18
   5.325 +	;;
   5.326 +	srlz.i
   5.327 +	;;
   5.328 +	// 4. Reload DTR for stack.
   5.329 +	mov r16=IA64_KR(CURRENT_STACK)
   5.330 +	;;
   5.331 +	shl r16=r16,IA64_GRANULE_SHIFT
   5.332 +	movl r19=PAGE_OFFSET
   5.333 +	;;
   5.334 +	add r18=r19,r16
   5.335 +	movl r20=PAGE_KERNEL
   5.336 +	;;
   5.337 +	add r16=r20,r16
   5.338 +	mov r19=IA64_GRANULE_SHIFT<<2
   5.339 +	;;
   5.340 +	mov cr.itir=r19
   5.341 +	mov cr.ifa=r18
   5.342 +	mov r20=IA64_TR_CURRENT_STACK
   5.343 +	;;
   5.344 +	itr.d dtr[r20]=r16
   5.345 +	;;
   5.346 +	srlz.d
   5.347 +	;;
   5.348 +	br.sptk.many done_tlb_purge_and_reload
   5.349 +err:
   5.350 +	COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
   5.351 +	br.sptk.many ia64_os_mca_done_restore
   5.352 +
   5.353 +done_tlb_purge_and_reload:
   5.354 +
   5.355 +	// Setup new stack frame for OS_MCA handling
   5.356 +	GET_IA64_MCA_DATA(r2)
   5.357 +	;;
   5.358 +	add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
   5.359 +	add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
   5.360 +	;;
   5.361 +	rse_switch_context(r6,r3,r2);;	// RSC management in this new context
   5.362 +
   5.363 +	GET_IA64_MCA_DATA(r2)
   5.364 +	;;
   5.365 +	add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
   5.366 +	;;
   5.367 +	mov r12=r2		// establish new stack-pointer
   5.368 +
   5.369 +        // Enter virtual mode from physical mode
   5.370 +	VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
   5.371 +ia64_os_mca_virtual_begin:
   5.372 +
   5.373 +	// Call virtual mode handler
   5.374 +	movl		r2=ia64_mca_ucmc_handler;;
   5.375 +	mov		b6=r2;;
   5.376 +	br.call.sptk.many    b0=b6;;
   5.377 +.ret0:
   5.378 +	// Revert back to physical mode before going back to SAL
   5.379 +	PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
   5.380 +ia64_os_mca_virtual_end:
   5.381 +
   5.382 +	// restore the original stack frame here
   5.383 +	GET_IA64_MCA_DATA(r2)
   5.384 +	;;
   5.385 +	add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
   5.386 +	;;
   5.387 +	movl    r4=IA64_PSR_MC
   5.388 +	;;
   5.389 +	rse_return_context(r4,r3,r2)	// switch from interrupt context for RSE
   5.390 +
   5.391 +	// let us restore all the registers from our PSI structure
   5.392 +	mov	r8=gp
   5.393 +	;;
   5.394 +begin_os_mca_restore:
   5.395 +	br	ia64_os_mca_proc_state_restore;;
   5.396 +
   5.397 +ia64_os_mca_done_restore:
   5.398 +	OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
   5.399 +	// branch back to SALE_CHECK
   5.400 +	ld8		r3=[r2];;
   5.401 +	mov		b0=r3;;		// SAL_CHECK return address
   5.402 +
   5.403 +	// release lock
   5.404 +	movl		r3=ia64_mca_serialize;;
   5.405 +	DATA_VA_TO_PA(r3);;
   5.406 +	st8.rel		[r3]=r0
   5.407 +
   5.408 +	br		b0
   5.409 +	;;
   5.410 +ia64_os_mca_dispatch_end:
   5.411 +//EndMain//////////////////////////////////////////////////////////////////////
   5.412 +
   5.413 +
   5.414 +//++
   5.415 +// Name:
   5.416 +//      ia64_os_mca_proc_state_dump()
   5.417 +//
   5.418 +// Stub Description:
   5.419 +//
   5.420 +//       This stub dumps the processor state during MCHK to a data area
   5.421 +//
   5.422 +//--
   5.423 +
   5.424 +ia64_os_mca_proc_state_dump:
   5.425 +// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
   5.426 +//  to virtual addressing mode.
   5.427 +	GET_IA64_MCA_DATA(r2)
   5.428 +	;;
   5.429 +	add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
   5.430 +	;;
   5.431 +// save ar.NaT
   5.432 +	mov		r5=ar.unat                  // ar.unat
   5.433 +
   5.434 +// save banked GRs 16-31 along with NaT bits
   5.435 +	bsw.1;;
   5.436 +	st8.spill	[r2]=r16,8;;
   5.437 +	st8.spill	[r2]=r17,8;;
   5.438 +	st8.spill	[r2]=r18,8;;
   5.439 +	st8.spill	[r2]=r19,8;;
   5.440 +	st8.spill	[r2]=r20,8;;
   5.441 +	st8.spill	[r2]=r21,8;;
   5.442 +	st8.spill	[r2]=r22,8;;
   5.443 +	st8.spill	[r2]=r23,8;;
   5.444 +	st8.spill	[r2]=r24,8;;
   5.445 +	st8.spill	[r2]=r25,8;;
   5.446 +	st8.spill	[r2]=r26,8;;
   5.447 +	st8.spill	[r2]=r27,8;;
   5.448 +	st8.spill	[r2]=r28,8;;
   5.449 +	st8.spill	[r2]=r29,8;;
   5.450 +	st8.spill	[r2]=r30,8;;
   5.451 +	st8.spill	[r2]=r31,8;;
   5.452 +
   5.453 +	mov		r4=ar.unat;;
   5.454 +	st8		[r2]=r4,8                // save User NaT bits for r16-r31
   5.455 +	mov		ar.unat=r5                  // restore original unat
   5.456 +	bsw.0;;
   5.457 +
   5.458 +//save BRs
   5.459 +	add		r4=8,r2                  // duplicate r2 in r4
   5.460 +	add		r6=2*8,r2                // duplicate r2 in r4
   5.461 +
   5.462 +	mov		r3=b0
   5.463 +	mov		r5=b1
   5.464 +	mov		r7=b2;;
   5.465 +	st8		[r2]=r3,3*8
   5.466 +	st8		[r4]=r5,3*8
   5.467 +	st8		[r6]=r7,3*8;;
   5.468 +
   5.469 +	mov		r3=b3
   5.470 +	mov		r5=b4
   5.471 +	mov		r7=b5;;
   5.472 +	st8		[r2]=r3,3*8
   5.473 +	st8		[r4]=r5,3*8
   5.474 +	st8		[r6]=r7,3*8;;
   5.475 +
   5.476 +	mov		r3=b6
   5.477 +	mov		r5=b7;;
   5.478 +	st8		[r2]=r3,2*8
   5.479 +	st8		[r4]=r5,2*8;;
   5.480 +
   5.481 +cSaveCRs:
   5.482 +// save CRs
   5.483 +	add		r4=8,r2                  // duplicate r2 in r4
   5.484 +	add		r6=2*8,r2                // duplicate r2 in r4
   5.485 +
   5.486 +	mov		r3=cr.dcr
   5.487 +	mov		r5=cr.itm
   5.488 +	mov		r7=cr.iva;;
   5.489 +
   5.490 +	st8		[r2]=r3,8*8
   5.491 +	st8		[r4]=r5,3*8
   5.492 +	st8		[r6]=r7,3*8;;            // 48 byte rements
   5.493 +
   5.494 +	mov		r3=cr.pta;;
   5.495 +	st8		[r2]=r3,8*8;;            // 64 byte rements
   5.496 +
   5.497 +// if PSR.ic=0, reading interruption registers causes an illegal operation fault
   5.498 +	mov		r3=psr;;
   5.499 +	tbit.nz.unc	p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
   5.500 +(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
   5.501 +begin_skip_intr_regs:
   5.502 +(p6)	br		SkipIntrRegs;;
   5.503 +
   5.504 +	add		r4=8,r2                  // duplicate r2 in r4
   5.505 +	add		r6=2*8,r2                // duplicate r2 in r6
   5.506 +
   5.507 +	mov		r3=cr.ipsr
   5.508 +	mov		r5=cr.isr
   5.509 +	mov		r7=r0;;
   5.510 +	st8		[r2]=r3,3*8
   5.511 +	st8		[r4]=r5,3*8
   5.512 +	st8		[r6]=r7,3*8;;
   5.513 +
   5.514 +	mov		r3=cr.iip
   5.515 +	mov		r5=cr.ifa
   5.516 +	mov		r7=cr.itir;;
   5.517 +	st8		[r2]=r3,3*8
   5.518 +	st8		[r4]=r5,3*8
   5.519 +	st8		[r6]=r7,3*8;;
   5.520 +
   5.521 +	mov		r3=cr.iipa
   5.522 +	mov		r5=cr.ifs
   5.523 +	mov		r7=cr.iim;;
   5.524 +	st8		[r2]=r3,3*8
   5.525 +	st8		[r4]=r5,3*8
   5.526 +	st8		[r6]=r7,3*8;;
   5.527 +
   5.528 +	mov		r3=cr25;;                   // cr.iha
   5.529 +	st8		[r2]=r3,160;;               // 160 byte rement
   5.530 +
   5.531 +SkipIntrRegs:
   5.532 +	st8		[r2]=r0,152;;               // another 152 byte .
   5.533 +
   5.534 +	add		r4=8,r2                     // duplicate r2 in r4
   5.535 +	add		r6=2*8,r2                   // duplicate r2 in r6
   5.536 +
   5.537 +	mov		r3=cr.lid
   5.538 +//	mov		r5=cr.ivr                     // cr.ivr, don't read it
   5.539 +	mov		r7=cr.tpr;;
   5.540 +	st8		[r2]=r3,3*8
   5.541 +	st8		[r4]=r5,3*8
   5.542 +	st8		[r6]=r7,3*8;;
   5.543 +
   5.544 +	mov		r3=r0                       // cr.eoi => cr67
   5.545 +	mov		r5=r0                       // cr.irr0 => cr68
   5.546 +	mov		r7=r0;;                     // cr.irr1 => cr69
   5.547 +	st8		[r2]=r3,3*8
   5.548 +	st8		[r4]=r5,3*8
   5.549 +	st8		[r6]=r7,3*8;;
   5.550 +
   5.551 +	mov		r3=r0                       // cr.irr2 => cr70
   5.552 +	mov		r5=r0                       // cr.irr3 => cr71
   5.553 +	mov		r7=cr.itv;;
   5.554 +	st8		[r2]=r3,3*8
   5.555 +	st8		[r4]=r5,3*8
   5.556 +	st8		[r6]=r7,3*8;;
   5.557 +
   5.558 +	mov		r3=cr.pmv
   5.559 +	mov		r5=cr.cmcv;;
   5.560 +	st8		[r2]=r3,7*8
   5.561 +	st8		[r4]=r5,7*8;;
   5.562 +
   5.563 +	mov		r3=r0                       // cr.lrr0 => cr80
   5.564 +	mov		r5=r0;;                     // cr.lrr1 => cr81
   5.565 +	st8		[r2]=r3,23*8
   5.566 +	st8		[r4]=r5,23*8;;
   5.567 +
   5.568 +	adds		r2=25*8,r2;;
   5.569 +
   5.570 +cSaveARs:
   5.571 +// save ARs
   5.572 +	add		r4=8,r2                  // duplicate r2 in r4
   5.573 +	add		r6=2*8,r2                // duplicate r2 in r6
   5.574 +
   5.575 +	mov		r3=ar.k0
   5.576 +	mov		r5=ar.k1
   5.577 +	mov		r7=ar.k2;;
   5.578 +	st8		[r2]=r3,3*8
   5.579 +	st8		[r4]=r5,3*8
   5.580 +	st8		[r6]=r7,3*8;;
   5.581 +
   5.582 +	mov		r3=ar.k3
   5.583 +	mov		r5=ar.k4
   5.584 +	mov		r7=ar.k5;;
   5.585 +	st8		[r2]=r3,3*8
   5.586 +	st8		[r4]=r5,3*8
   5.587 +	st8		[r6]=r7,3*8;;
   5.588 +
   5.589 +	mov		r3=ar.k6
   5.590 +	mov		r5=ar.k7
   5.591 +	mov		r7=r0;;                     // ar.kr8
   5.592 +	st8		[r2]=r3,10*8
   5.593 +	st8		[r4]=r5,10*8
   5.594 +	st8		[r6]=r7,10*8;;           // rement by 72 bytes
   5.595 +
   5.596 +	mov		r3=ar.rsc
   5.597 +	mov		ar.rsc=r0			    // put RSE in enforced lazy mode
   5.598 +	mov		r5=ar.bsp
   5.599 +	;;
   5.600 +	mov		r7=ar.bspstore;;
   5.601 +	st8		[r2]=r3,3*8
   5.602 +	st8		[r4]=r5,3*8
   5.603 +	st8		[r6]=r7,3*8;;
   5.604 +
   5.605 +	mov		r3=ar.rnat;;
   5.606 +	st8		[r2]=r3,8*13             // increment by 13x8 bytes
   5.607 +
   5.608 +	mov		r3=ar.ccv;;
   5.609 +	st8		[r2]=r3,8*4
   5.610 +
   5.611 +	mov		r3=ar.unat;;
   5.612 +	st8		[r2]=r3,8*4
   5.613 +
   5.614 +	mov		r3=ar.fpsr;;
   5.615 +	st8		[r2]=r3,8*4
   5.616 +
   5.617 +	mov		r3=ar.itc;;
   5.618 +	st8		[r2]=r3,160                 // 160
   5.619 +
   5.620 +	mov		r3=ar.pfs;;
   5.621 +	st8		[r2]=r3,8
   5.622 +
   5.623 +	mov		r3=ar.lc;;
   5.624 +	st8		[r2]=r3,8
   5.625 +
   5.626 +	mov		r3=ar.ec;;
   5.627 +	st8		[r2]=r3
   5.628 +	add		r2=8*62,r2               //padding
   5.629 +
   5.630 +// save RRs
   5.631 +	mov		ar.lc=0x08-1
   5.632 +	movl		r4=0x00;;
   5.633 +
   5.634 +cStRR:
   5.635 +	dep.z		r5=r4,61,3;;
   5.636 +	mov		r3=rr[r5];;
   5.637 +	st8		[r2]=r3,8
   5.638 +	add		r4=1,r4
   5.639 +	br.cloop.sptk.few	cStRR
   5.640 +	;;
   5.641 +end_os_mca_dump:
   5.642 +	br	ia64_os_mca_done_dump;;
   5.643 +
   5.644 +//EndStub//////////////////////////////////////////////////////////////////////
   5.645 +
   5.646 +
   5.647 +//++
   5.648 +// Name:
   5.649 +//       ia64_os_mca_proc_state_restore()
   5.650 +//
   5.651 +// Stub Description:
   5.652 +//
   5.653 +//       This is a stub to restore the saved processor state during MCHK
   5.654 +//
   5.655 +//--
   5.656 +
   5.657 +ia64_os_mca_proc_state_restore:
   5.658 +
   5.659 +// Restore bank1 GR16-31
   5.660 +	GET_IA64_MCA_DATA(r2)
   5.661 +	;;
   5.662 +	add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
   5.663 +
   5.664 +restore_GRs:                                    // restore bank-1 GRs 16-31
   5.665 +	bsw.1;;
   5.666 +	add		r3=16*8,r2;;                // to get to NaT of GR 16-31
   5.667 +	ld8		r3=[r3];;
   5.668 +	mov		ar.unat=r3;;                // first restore NaT
   5.669 +
   5.670 +	ld8.fill	r16=[r2],8;;
   5.671 +	ld8.fill	r17=[r2],8;;
   5.672 +	ld8.fill	r18=[r2],8;;
   5.673 +	ld8.fill	r19=[r2],8;;
   5.674 +	ld8.fill	r20=[r2],8;;
   5.675 +	ld8.fill	r21=[r2],8;;
   5.676 +	ld8.fill	r22=[r2],8;;
   5.677 +	ld8.fill	r23=[r2],8;;
   5.678 +	ld8.fill	r24=[r2],8;;
   5.679 +	ld8.fill	r25=[r2],8;;
   5.680 +	ld8.fill	r26=[r2],8;;
   5.681 +	ld8.fill	r27=[r2],8;;
   5.682 +	ld8.fill	r28=[r2],8;;
   5.683 +	ld8.fill	r29=[r2],8;;
   5.684 +	ld8.fill	r30=[r2],8;;
   5.685 +	ld8.fill	r31=[r2],8;;
   5.686 +
   5.687 +	ld8		r3=[r2],8;;              // increment to skip NaT
   5.688 +	bsw.0;;
   5.689 +
   5.690 +restore_BRs:
   5.691 +	add		r4=8,r2                  // duplicate r2 in r4
   5.692 +	add		r6=2*8,r2;;              // duplicate r2 in r4
   5.693 +
   5.694 +	ld8		r3=[r2],3*8
   5.695 +	ld8		r5=[r4],3*8
   5.696 +	ld8		r7=[r6],3*8;;
   5.697 +	mov		b0=r3
   5.698 +	mov		b1=r5
   5.699 +	mov		b2=r7;;
   5.700 +
   5.701 +	ld8		r3=[r2],3*8
   5.702 +	ld8		r5=[r4],3*8
   5.703 +	ld8		r7=[r6],3*8;;
   5.704 +	mov		b3=r3
   5.705 +	mov		b4=r5
   5.706 +	mov		b5=r7;;
   5.707 +
   5.708 +	ld8		r3=[r2],2*8
   5.709 +	ld8		r5=[r4],2*8;;
   5.710 +	mov		b6=r3
   5.711 +	mov		b7=r5;;
   5.712 +
   5.713 +restore_CRs:
   5.714 +	add		r4=8,r2                  // duplicate r2 in r4
   5.715 +	add		r6=2*8,r2;;              // duplicate r2 in r4
   5.716 +
   5.717 +	ld8		r3=[r2],8*8
   5.718 +	ld8		r5=[r4],3*8
   5.719 +	ld8		r7=[r6],3*8;;            // 48 byte increments
   5.720 +	mov		cr.dcr=r3
   5.721 +	mov		cr.itm=r5
   5.722 +	mov		cr.iva=r7;;
   5.723 +
   5.724 +	ld8		r3=[r2],8*8;;            // 64 byte increments
   5.725 +//      mov		cr.pta=r3
   5.726 +
   5.727 +
   5.728 +// if PSR.ic=1, reading interruption registers causes an illegal operation fault
   5.729 +	mov		r3=psr;;
   5.730 +	tbit.nz.unc	p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
   5.731 +(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
   5.732 +
   5.733 +begin_rskip_intr_regs:
   5.734 +(p6)	br		rSkipIntrRegs;;
   5.735 +
   5.736 +	add		r4=8,r2                  // duplicate r2 in r4
   5.737 +	add		r6=2*8,r2;;              // duplicate r2 in r4
   5.738 +
   5.739 +	ld8		r3=[r2],3*8
   5.740 +	ld8		r5=[r4],3*8
   5.741 +	ld8		r7=[r6],3*8;;
   5.742 +	mov		cr.ipsr=r3
   5.743 +//	mov		cr.isr=r5                   // cr.isr is read only
   5.744 +
   5.745 +	ld8		r3=[r2],3*8
   5.746 +	ld8		r5=[r4],3*8
   5.747 +	ld8		r7=[r6],3*8;;
   5.748 +	mov		cr.iip=r3
   5.749 +	mov		cr.ifa=r5
   5.750 +	mov		cr.itir=r7;;
   5.751 +
   5.752 +	ld8		r3=[r2],3*8
   5.753 +	ld8		r5=[r4],3*8
   5.754 +	ld8		r7=[r6],3*8;;
   5.755 +	mov		cr.iipa=r3
   5.756 +	mov		cr.ifs=r5
   5.757 +	mov		cr.iim=r7
   5.758 +
   5.759 +	ld8		r3=[r2],160;;               // 160 byte increment
   5.760 +	mov		cr.iha=r3
   5.761 +
   5.762 +rSkipIntrRegs:
   5.763 +	ld8		r3=[r2],152;;               // another 152 byte inc.
   5.764 +
   5.765 +	add		r4=8,r2                     // duplicate r2 in r4
   5.766 +	add		r6=2*8,r2;;                 // duplicate r2 in r6
   5.767 +
   5.768 +	ld8		r3=[r2],8*3
   5.769 +	ld8		r5=[r4],8*3
   5.770 +	ld8		r7=[r6],8*3;;
   5.771 +	mov		cr.lid=r3
   5.772 +//	mov		cr.ivr=r5                   // cr.ivr is read only
   5.773 +	mov		cr.tpr=r7;;
   5.774 +
   5.775 +	ld8		r3=[r2],8*3
   5.776 +	ld8		r5=[r4],8*3
   5.777 +	ld8		r7=[r6],8*3;;
   5.778 +//	mov		cr.eoi=r3
   5.779 +//	mov		cr.irr0=r5                  // cr.irr0 is read only
   5.780 +//	mov		cr.irr1=r7;;                // cr.irr1 is read only
   5.781 +
   5.782 +	ld8		r3=[r2],8*3
   5.783 +	ld8		r5=[r4],8*3
   5.784 +	ld8		r7=[r6],8*3;;
   5.785 +//	mov		cr.irr2=r3                  // cr.irr2 is read only
   5.786 +//	mov		cr.irr3=r5                  // cr.irr3 is read only
   5.787 +	mov		cr.itv=r7;;
   5.788 +
   5.789 +	ld8		r3=[r2],8*7
   5.790 +	ld8		r5=[r4],8*7;;
   5.791 +	mov		cr.pmv=r3
   5.792 +	mov		cr.cmcv=r5;;
   5.793 +
   5.794 +	ld8		r3=[r2],8*23
   5.795 +	ld8		r5=[r4],8*23;;
   5.796 +	adds		r2=8*23,r2
   5.797 +	adds		r4=8*23,r4;;
   5.798 +//	mov		cr.lrr0=r3
   5.799 +//	mov		cr.lrr1=r5
   5.800 +
   5.801 +	adds		r2=8*2,r2;;
   5.802 +
   5.803 +restore_ARs:
   5.804 +	add		r4=8,r2                  // duplicate r2 in r4
   5.805 +	add		r6=2*8,r2;;              // duplicate r2 in r4
   5.806 +
   5.807 +	ld8		r3=[r2],3*8
   5.808 +	ld8		r5=[r4],3*8
   5.809 +	ld8		r7=[r6],3*8;;
   5.810 +	mov		ar.k0=r3
   5.811 +	mov		ar.k1=r5
   5.812 +	mov		ar.k2=r7;;
   5.813 +
   5.814 +	ld8		r3=[r2],3*8
   5.815 +	ld8		r5=[r4],3*8
   5.816 +	ld8		r7=[r6],3*8;;
   5.817 +	mov		ar.k3=r3
   5.818 +	mov		ar.k4=r5
   5.819 +	mov		ar.k5=r7;;
   5.820 +
   5.821 +	ld8		r3=[r2],10*8
   5.822 +	ld8		r5=[r4],10*8
   5.823 +	ld8		r7=[r6],10*8;;
   5.824 +	mov		ar.k6=r3
   5.825 +	mov		ar.k7=r5
   5.826 +	;;
   5.827 +
   5.828 +	ld8		r3=[r2],3*8
   5.829 +	ld8		r5=[r4],3*8
   5.830 +	ld8		r7=[r6],3*8;;
   5.831 +//	mov		ar.rsc=r3
   5.832 +//	mov		ar.bsp=r5                   // ar.bsp is read only
   5.833 +	mov		ar.rsc=r0			    // make sure that RSE is in enforced lazy mode
   5.834 +	;;
   5.835 +	mov		ar.bspstore=r7;;
   5.836 +
   5.837 +	ld8		r9=[r2],8*13;;
   5.838 +	mov		ar.rnat=r9
   5.839 +
   5.840 +	mov		ar.rsc=r3
   5.841 +	ld8		r3=[r2],8*4;;
   5.842 +	mov		ar.ccv=r3
   5.843 +
   5.844 +	ld8		r3=[r2],8*4;;
   5.845 +	mov		ar.unat=r3
   5.846 +
   5.847 +	ld8		r3=[r2],8*4;;
   5.848 +	mov		ar.fpsr=r3
   5.849 +
   5.850 +	ld8		r3=[r2],160;;               // 160
   5.851 +//      mov		ar.itc=r3
   5.852 +
   5.853 +	ld8		r3=[r2],8;;
   5.854 +	mov		ar.pfs=r3
   5.855 +
   5.856 +	ld8		r3=[r2],8;;
   5.857 +	mov		ar.lc=r3
   5.858 +
   5.859 +	ld8		r3=[r2];;
   5.860 +	mov		ar.ec=r3
   5.861 +	add		r2=8*62,r2;;             // padding
   5.862 +
   5.863 +restore_RRs:
   5.864 +	mov		r5=ar.lc
   5.865 +	mov		ar.lc=0x08-1
   5.866 +	movl		r4=0x00;;
   5.867 +cStRRr:
   5.868 +	dep.z		r7=r4,61,3
   5.869 +	ld8		r3=[r2],8;;
   5.870 +	mov		rr[r7]=r3                   // what are its access previledges?
   5.871 +	add		r4=1,r4
   5.872 +	br.cloop.sptk.few	cStRRr
   5.873 +	;;
   5.874 +	mov		ar.lc=r5
   5.875 +	;;
   5.876 +end_os_mca_restore:
   5.877 +	br	ia64_os_mca_done_restore;;
   5.878 +
   5.879 +//EndStub//////////////////////////////////////////////////////////////////////
   5.880 +#else
   5.881 +ia64_os_mca_dispatch:
   5.882 +1:
   5.883 +	br.sptk 1b
   5.884 +ia64_os_mca_dispatch_end:
   5.885 +#endif /* !XEN */
   5.886 +
   5.887 +
   5.888 +// ok, the issue here is that we need to save state information so
   5.889 +// it can be useable by the kernel debugger and show regs routines.
   5.890 +// In order to do this, our best bet is save the current state (plus
   5.891 +// the state information obtain from the MIN_STATE_AREA) into a pt_regs
   5.892 +// format.  This way we can pass it on in a useable format.
   5.893 +//
   5.894 +
   5.895 +//
   5.896 +// SAL to OS entry point for INIT on the monarch processor
   5.897 +// This has been defined for registration purposes with SAL
   5.898 +// as a part of ia64_mca_init.
   5.899 +//
   5.900 +// When we get here, the following registers have been
   5.901 +// set by the SAL for our use
   5.902 +//
   5.903 +//		1. GR1 = OS INIT GP
   5.904 +//		2. GR8 = PAL_PROC physical address
   5.905 +//		3. GR9 = SAL_PROC physical address
   5.906 +//		4. GR10 = SAL GP (physical)
   5.907 +//		5. GR11 = Init Reason
   5.908 +//			0 = Received INIT for event other than crash dump switch
   5.909 +//			1 = Received wakeup at the end of an OS_MCA corrected machine check
   5.910 +//			2 = Received INIT dude to CrashDump switch assertion
   5.911 +//
   5.912 +//		6. GR12 = Return address to location within SAL_INIT procedure
   5.913 +
   5.914 +
   5.915 +GLOBAL_ENTRY(ia64_monarch_init_handler)
   5.916 +	.prologue
   5.917 +	// stash the information the SAL passed to os
   5.918 +	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
   5.919 +	;;
   5.920 +	SAVE_MIN_WITH_COVER
   5.921 +	;;
   5.922 +	mov r8=cr.ifa
   5.923 +	mov r9=cr.isr
   5.924 +	adds r3=8,r2				// set up second base pointer
   5.925 +	;;
   5.926 +	SAVE_REST
   5.927 +
   5.928 +// ok, enough should be saved at this point to be dangerous, and supply
   5.929 +// information for a dump
   5.930 +// We need to switch to Virtual mode before hitting the C functions.
   5.931 +
   5.932 +	movl	r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
   5.933 +	mov	r3=psr	// get the current psr, minimum enabled at this point
   5.934 +	;;
   5.935 +	or	r2=r2,r3
   5.936 +	;;
   5.937 +	movl	r3=IVirtual_Switch
   5.938 +	;;
   5.939 +	mov	cr.iip=r3	// short return to set the appropriate bits
   5.940 +	mov	cr.ipsr=r2	// need to do an rfi to set appropriate bits
   5.941 +	;;
   5.942 +	rfi
   5.943 +	;;
   5.944 +IVirtual_Switch:
   5.945 +	//
   5.946 +	// We should now be running virtual
   5.947 +	//
   5.948 +	// Let's call the C handler to get the rest of the state info
   5.949 +	//
   5.950 +	alloc r14=ar.pfs,0,0,2,0		// now it's safe (must be first in insn group!)
   5.951 +	;;
   5.952 +	adds out0=16,sp				// out0 = pointer to pt_regs
   5.953 +	;;
   5.954 +	DO_SAVE_SWITCH_STACK
   5.955 +	.body
   5.956 +	adds out1=16,sp				// out0 = pointer to switch_stack
   5.957 +
   5.958 +	br.call.sptk.many rp=ia64_init_handler
   5.959 +.ret1:
   5.960 +
   5.961 +return_from_init:
   5.962 +	br.sptk return_from_init
   5.963 +END(ia64_monarch_init_handler)
   5.964 +
   5.965 +//
   5.966 +// SAL to OS entry point for INIT on the slave processor
   5.967 +// This has been defined for registration purposes with SAL
   5.968 +// as a part of ia64_mca_init.
   5.969 +//
   5.970 +
   5.971 +GLOBAL_ENTRY(ia64_slave_init_handler)
   5.972 +1:	br.sptk 1b
   5.973 +END(ia64_slave_init_handler)
     6.1 --- a/xen/arch/ia64/linux-xen/minstate.h	Fri Jul 07 10:36:31 2006 -0600
     6.2 +++ b/xen/arch/ia64/linux-xen/minstate.h	Sun Jul 09 20:04:23 2006 -0600
     6.3 @@ -36,7 +36,31 @@
     6.4   * For mca_asm.S we want to access the stack physically since the state is saved before we
     6.5   * go virtual and don't want to destroy the iip or ipsr.
     6.6   */
     6.7 -#define MINSTATE_START_SAVE_MIN_PHYS								\
     6.8 +#ifdef XEN
     6.9 +# define MINSTATE_START_SAVE_MIN_PHYS								\
    6.10 +(pKStk)	movl r3=THIS_CPU(ia64_mca_data);;							\
    6.11 +(pKStk)	tpa r3 = r3;;										\
    6.12 +(pKStk)	ld8 r3 = [r3];;										\
    6.13 +(pKStk)	addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;						\
    6.14 +(pKStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;						\
    6.15 +(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
    6.16 +(pUStk)	addl r22=IA64_RBS_OFFSET,r1;		/* compute base of register backing store */	\
    6.17 +	;;											\
    6.18 +(pUStk)	mov r24=ar.rnat;									\
    6.19 +(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base of memory stack */	\
    6.20 +(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
    6.21 +(pUStk)	dep r22=-1,r22,60,4;			/* compute Xen virtual addr of RBS */	\
    6.22 +	;;											\
    6.23 +(pUStk)	mov ar.bspstore=r22;			/* switch to Xen RBS */			\
    6.24 +	;;											\
    6.25 +(pUStk)	mov r18=ar.bsp;										\
    6.26 +(pUStk)	mov ar.rsc=0x3;	 /* set eager mode, pl 0, little-endian, loadrs=0 */			\
    6.27 +
    6.28 +# define MINSTATE_END_SAVE_MIN_PHYS								\
    6.29 +	dep r12=-1,r12,60,4;	    /* make sp a Xen virtual address */			\
    6.30 +	;;
    6.31 +#else
    6.32 +# define MINSTATE_START_SAVE_MIN_PHYS								\
    6.33  (pKStk) mov r3=IA64_KR(PER_CPU_DATA);;								\
    6.34  (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;							\
    6.35  (pKStk) ld8 r3 = [r3];;										\
    6.36 @@ -55,15 +79,17 @@
    6.37  (pUStk)	mov r18=ar.bsp;										\
    6.38  (pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
    6.39  
    6.40 -#define MINSTATE_END_SAVE_MIN_PHYS								\
    6.41 +# define MINSTATE_END_SAVE_MIN_PHYS								\
    6.42  	dep r12=-1,r12,61,3;		/* make sp a kernel virtual address */			\
    6.43  	;;
    6.44 +#endif /* XEN */
    6.45  
    6.46  #ifdef MINSTATE_VIRT
    6.47  #ifdef XEN
    6.48  # define MINSTATE_GET_CURRENT(reg)					\
    6.49                 movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;	\
    6.50                 ld8 reg=[reg]
    6.51 +# define MINSTATE_GET_CURRENT_VIRT(reg)	MINSTATE_GET_CURRENT(reg)
    6.52  #else
    6.53  # define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT)
    6.54  #endif
    6.55 @@ -72,7 +98,19 @@
    6.56  #endif
    6.57  
    6.58  #ifdef MINSTATE_PHYS
    6.59 +# ifdef XEN
    6.60 +# define MINSTATE_GET_CURRENT(reg)					\
    6.61 +	movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;		\
    6.62 +	tpa reg=reg;;							\
    6.63 +	ld8 reg=[reg];;							\
    6.64 +	tpa reg=reg;;
    6.65 +# define MINSTATE_GET_CURRENT_VIRT(reg)					\
    6.66 +	movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;		\
    6.67 +	tpa reg=reg;;							\
    6.68 +	ld8 reg=[reg];;
    6.69 +#else
    6.70  # define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT);; tpa reg=reg
    6.71 +#endif /* XEN */
    6.72  # define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_PHYS
    6.73  # define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_PHYS
    6.74  #endif
    6.75 @@ -175,8 +213,8 @@
    6.76  	;;											\
    6.77  .mem.offset 0,0; st8.spill [r16]=r13,16;							\
    6.78  .mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
    6.79 -	/* XEN mov r13=IA64_KR(CURRENT);*/	/* establish `current' */				\
    6.80 -	MINSTATE_GET_CURRENT(r13);		/* XEN establish `current' */				\
    6.81 +	/* XEN mov r13=IA64_KR(CURRENT);*/	/* establish `current' */			\
    6.82 +	MINSTATE_GET_CURRENT_VIRT(r13);		/* XEN establish `current' */			\
    6.83  	;;											\
    6.84  .mem.offset 0,0; st8.spill [r16]=r15,16;							\
    6.85  .mem.offset 8,0; st8.spill [r17]=r14,16;							\
     7.1 --- a/xen/arch/ia64/linux-xen/unwind.c	Fri Jul 07 10:36:31 2006 -0600
     7.2 +++ b/xen/arch/ia64/linux-xen/unwind.c	Sun Jul 09 20:04:23 2006 -0600
     7.3 @@ -2056,6 +2056,28 @@ init_frame_info (struct unw_frame_info *
     7.4  }
     7.5  
     7.6  void
     7.7 +unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
     7.8 +			    struct pt_regs *pt, struct switch_stack *sw)
     7.9 +{
    7.10 +	unsigned long sof;
    7.11 +
    7.12 +	init_frame_info(info, t, sw, pt->r12);
    7.13 +	info->cfm_loc = &pt->cr_ifs;
    7.14 +	info->unat_loc = &pt->ar_unat;
    7.15 +	info->pfs_loc = &pt->ar_pfs;
    7.16 +	sof = *info->cfm_loc & 0x7f;
    7.17 +	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
    7.18 +	info->ip = pt->cr_iip + ia64_psr(pt)->ri;
    7.19 +	info->pt = (unsigned long) pt;
    7.20 +	UNW_DPRINT(3, "unwind.%s:\n"
    7.21 +		   "  bsp    0x%lx\n"
    7.22 +		   "  sof    0x%lx\n"
    7.23 +		   "  ip     0x%lx\n",
    7.24 +		   __FUNCTION__, info->bsp, sof, info->ip);
    7.25 +	find_save_locs(info);
    7.26 +}
    7.27 +
    7.28 +void
    7.29  unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
    7.30  {
    7.31  	unsigned long sol;
     8.1 --- a/xen/arch/ia64/xen/xenmisc.c	Fri Jul 07 10:36:31 2006 -0600
     8.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Sun Jul 09 20:04:23 2006 -0600
     8.3 @@ -28,8 +28,6 @@ unsigned long loops_per_jiffy = (1<<12);
     8.4  /* FIXME: where these declarations should be there ? */
     8.5  extern void show_registers(struct pt_regs *regs);
     8.6  
     8.7 -void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); }
     8.8 -void ia64_mca_cpu_init(void *x) { }
     8.9  void hpsim_setup(char **x)
    8.10  {
    8.11  #ifdef CONFIG_SMP
     9.1 --- a/xen/include/asm-ia64/linux-xen/asm/README.origin	Fri Jul 07 10:36:31 2006 -0600
     9.2 +++ b/xen/include/asm-ia64/linux-xen/asm/README.origin	Sun Jul 09 20:04:23 2006 -0600
     9.3 @@ -5,6 +5,7 @@
     9.4  # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
     9.5  # easily updated to future versions of the corresponding Linux files.
     9.6  
     9.7 +asmmacro.h		-> linux/include/asm-ia64/asmmacro.h
     9.8  cache.h			-> linux/include/asm-ia64/cache.h
     9.9  gcc_intrin.h		-> linux/include/asm-ia64/gcc_intrin.h
    9.10  ia64regs.h		-> linux/include/asm-ia64/ia64regs.h
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/xen/include/asm-ia64/linux-xen/asm/asmmacro.h	Sun Jul 09 20:04:23 2006 -0600
    10.3 @@ -0,0 +1,119 @@
    10.4 +#ifndef _ASM_IA64_ASMMACRO_H
    10.5 +#define _ASM_IA64_ASMMACRO_H
    10.6 +
    10.7 +/*
    10.8 + * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
    10.9 + *	David Mosberger-Tang <davidm@hpl.hp.com>
   10.10 + */
   10.11 +
   10.12 +#include <linux/config.h>
   10.13 +
   10.14 +#define ENTRY(name)				\
   10.15 +	.align 32;				\
   10.16 +	.proc name;				\
   10.17 +name:
   10.18 +
   10.19 +#define ENTRY_MIN_ALIGN(name)			\
   10.20 +	.align 16;				\
   10.21 +	.proc name;				\
   10.22 +name:
   10.23 +
   10.24 +#define GLOBAL_ENTRY(name)			\
   10.25 +	.global name;				\
   10.26 +	ENTRY(name)
   10.27 +
   10.28 +#define END(name)				\
   10.29 +	.endp name
   10.30 +
   10.31 +/*
   10.32 + * Helper macros to make unwind directives more readable:
   10.33 + */
   10.34 +
   10.35 +/* prologue_gr: */
   10.36 +#define ASM_UNW_PRLG_RP			0x8
   10.37 +#define ASM_UNW_PRLG_PFS		0x4
   10.38 +#define ASM_UNW_PRLG_PSP		0x2
   10.39 +#define ASM_UNW_PRLG_PR			0x1
   10.40 +#define ASM_UNW_PRLG_GRSAVE(ninputs)	(32+(ninputs))
   10.41 +
   10.42 +/*
   10.43 + * Helper macros for accessing user memory.
   10.44 + */
   10.45 +
   10.46 +	.section "__ex_table", "a"		// declare section & section attributes
   10.47 +	.previous
   10.48 +
   10.49 +# define EX(y,x...)				\
   10.50 +	.xdata4 "__ex_table", 99f-., y-.;	\
   10.51 +  [99:]	x
   10.52 +# define EXCLR(y,x...)				\
   10.53 +	.xdata4 "__ex_table", 99f-., y-.+4;	\
   10.54 +  [99:]	x
   10.55 +
   10.56 +/*
   10.57 + * Mark instructions that need a load of a virtual address patched to be
   10.58 + * a load of a physical address.  We use this either in critical performance
   10.59 + * path (ivt.S - TLB miss processing) or in places where it might not be
   10.60 + * safe to use a "tpa" instruction (mca_asm.S - error recovery).
   10.61 + */
   10.62 +	.section ".data.patch.vtop", "a"	// declare section & section attributes
   10.63 +	.previous
   10.64 +
   10.65 +#ifdef XEN
   10.66 +#define	LOAD_PHYSICAL(pr, reg, obj)		\
   10.67 +[1:](pr)movl reg = obj;;			\
   10.68 +	shl reg = reg,4;;			\
   10.69 +	shr.u reg = reg,4;;			\
   10.70 +	.xdata4 ".data.patch.vtop", 1b-.
   10.71 +#else
   10.72 +#define	LOAD_PHYSICAL(pr, reg, obj)		\
   10.73 +[1:](pr)movl reg = obj;				\
   10.74 +	.xdata4 ".data.patch.vtop", 1b-.
   10.75 +#endif
   10.76 +
   10.77 +/*
   10.78 + * For now, we always put in the McKinley E9 workaround.  On CPUs that don't need it,
   10.79 + * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
   10.80 + */
   10.81 +#define DO_MCKINLEY_E9_WORKAROUND
   10.82 +
   10.83 +#ifdef DO_MCKINLEY_E9_WORKAROUND
   10.84 +	.section ".data.patch.mckinley_e9", "a"
   10.85 +	.previous
   10.86 +/* workaround for Itanium 2 Errata 9: */
   10.87 +# define FSYS_RETURN					\
   10.88 +	.xdata4 ".data.patch.mckinley_e9", 1f-.;	\
   10.89 +1:{ .mib;						\
   10.90 +	nop.m 0;					\
   10.91 +	mov r16=ar.pfs;					\
   10.92 +	br.call.sptk.many b7=2f;;			\
   10.93 +  };							\
   10.94 +2:{ .mib;						\
   10.95 +	nop.m 0;					\
   10.96 +	mov ar.pfs=r16;					\
   10.97 +	br.ret.sptk.many b6;;				\
   10.98 +  }
   10.99 +#else
  10.100 +# define FSYS_RETURN	br.ret.sptk.many b6
  10.101 +#endif
  10.102 +
  10.103 +/*
  10.104 + * Up until early 2004, use of .align within a function caused bad unwind info.
  10.105 + * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
  10.106 + * otherwise.
  10.107 + */
  10.108 +#ifdef HAVE_WORKING_TEXT_ALIGN
  10.109 +# define TEXT_ALIGN(n)	.align n
  10.110 +#else
  10.111 +# define TEXT_ALIGN(n)
  10.112 +#endif
  10.113 +
  10.114 +#ifdef HAVE_SERIALIZE_DIRECTIVE
  10.115 +# define dv_serialize_data		.serialize.data
  10.116 +# define dv_serialize_instruction	.serialize.instruction
  10.117 +#else
  10.118 +# define dv_serialize_data
  10.119 +# define dv_serialize_instruction
  10.120 +#endif
  10.121 +
  10.122 +#endif /* _ASM_IA64_ASMMACRO_H */
    11.1 --- a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h	Fri Jul 07 10:36:31 2006 -0600
    11.2 +++ b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h	Sun Jul 09 20:04:23 2006 -0600
    11.3 @@ -58,7 +58,9 @@
    11.4  #endif
    11.5  
    11.6  #ifdef XEN
    11.7 -//FIXME LATER
    11.8 +#define GET_THIS_PADDR(reg, var)		\
    11.9 +	movl	reg = THIS_CPU(var)		\
   11.10 +	tpa	reg = reg
   11.11  #else
   11.12  #define GET_THIS_PADDR(reg, var)		\
   11.13  	mov	reg = IA64_KR(PER_CPU_DATA);;	\
    12.1 --- a/xen/include/asm-ia64/linux-xen/asm/system.h	Fri Jul 07 10:36:31 2006 -0600
    12.2 +++ b/xen/include/asm-ia64/linux-xen/asm/system.h	Sun Jul 09 20:04:23 2006 -0600
    12.3 @@ -19,8 +19,8 @@
    12.4  #include <asm/pal.h>
    12.5  #include <asm/percpu.h>
    12.6  
    12.7 +#ifndef XEN
    12.8  #define GATE_ADDR		__IA64_UL_CONST(0xa000000000000000)
    12.9 -#ifndef XEN
   12.10  /*
   12.11   * 0xa000000000000000+2*PERCPU_PAGE_SIZE
   12.12   * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
    13.1 --- a/xen/include/asm-ia64/linux/asm/README.origin	Fri Jul 07 10:36:31 2006 -0600
    13.2 +++ b/xen/include/asm-ia64/linux/asm/README.origin	Sun Jul 09 20:04:23 2006 -0600
    13.3 @@ -5,7 +5,6 @@
    13.4  # the instructions in the README there.
    13.5  
    13.6  acpi.h			-> linux/include/asm-ia64/acpi.h
    13.7 -asmmacro.h		-> linux/include/asm-ia64/asmmacro.h
    13.8  atomic.h		-> linux/include/asm-ia64/atomic.h
    13.9  bitops.h		-> linux/include/asm-ia64/bitops.h
   13.10  break.h			-> linux/include/asm-ia64/break.h
    14.1 --- a/xen/include/asm-ia64/linux/asm/asmmacro.h	Fri Jul 07 10:36:31 2006 -0600
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,111 +0,0 @@
    14.4 -#ifndef _ASM_IA64_ASMMACRO_H
    14.5 -#define _ASM_IA64_ASMMACRO_H
    14.6 -
    14.7 -/*
    14.8 - * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
    14.9 - *	David Mosberger-Tang <davidm@hpl.hp.com>
   14.10 - */
   14.11 -
   14.12 -#include <linux/config.h>
   14.13 -
   14.14 -#define ENTRY(name)				\
   14.15 -	.align 32;				\
   14.16 -	.proc name;				\
   14.17 -name:
   14.18 -
   14.19 -#define ENTRY_MIN_ALIGN(name)			\
   14.20 -	.align 16;				\
   14.21 -	.proc name;				\
   14.22 -name:
   14.23 -
   14.24 -#define GLOBAL_ENTRY(name)			\
   14.25 -	.global name;				\
   14.26 -	ENTRY(name)
   14.27 -
   14.28 -#define END(name)				\
   14.29 -	.endp name
   14.30 -
   14.31 -/*
   14.32 - * Helper macros to make unwind directives more readable:
   14.33 - */
   14.34 -
   14.35 -/* prologue_gr: */
   14.36 -#define ASM_UNW_PRLG_RP			0x8
   14.37 -#define ASM_UNW_PRLG_PFS		0x4
   14.38 -#define ASM_UNW_PRLG_PSP		0x2
   14.39 -#define ASM_UNW_PRLG_PR			0x1
   14.40 -#define ASM_UNW_PRLG_GRSAVE(ninputs)	(32+(ninputs))
   14.41 -
   14.42 -/*
   14.43 - * Helper macros for accessing user memory.
   14.44 - */
   14.45 -
   14.46 -	.section "__ex_table", "a"		// declare section & section attributes
   14.47 -	.previous
   14.48 -
   14.49 -# define EX(y,x...)				\
   14.50 -	.xdata4 "__ex_table", 99f-., y-.;	\
   14.51 -  [99:]	x
   14.52 -# define EXCLR(y,x...)				\
   14.53 -	.xdata4 "__ex_table", 99f-., y-.+4;	\
   14.54 -  [99:]	x
   14.55 -
   14.56 -/*
   14.57 - * Mark instructions that need a load of a virtual address patched to be
   14.58 - * a load of a physical address.  We use this either in critical performance
   14.59 - * path (ivt.S - TLB miss processing) or in places where it might not be
   14.60 - * safe to use a "tpa" instruction (mca_asm.S - error recovery).
   14.61 - */
   14.62 -	.section ".data.patch.vtop", "a"	// declare section & section attributes
   14.63 -	.previous
   14.64 -
   14.65 -#define	LOAD_PHYSICAL(pr, reg, obj)		\
   14.66 -[1:](pr)movl reg = obj;				\
   14.67 -	.xdata4 ".data.patch.vtop", 1b-.
   14.68 -
   14.69 -/*
   14.70 - * For now, we always put in the McKinley E9 workaround.  On CPUs that don't need it,
   14.71 - * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
   14.72 - */
   14.73 -#define DO_MCKINLEY_E9_WORKAROUND
   14.74 -
   14.75 -#ifdef DO_MCKINLEY_E9_WORKAROUND
   14.76 -	.section ".data.patch.mckinley_e9", "a"
   14.77 -	.previous
   14.78 -/* workaround for Itanium 2 Errata 9: */
   14.79 -# define FSYS_RETURN					\
   14.80 -	.xdata4 ".data.patch.mckinley_e9", 1f-.;	\
   14.81 -1:{ .mib;						\
   14.82 -	nop.m 0;					\
   14.83 -	mov r16=ar.pfs;					\
   14.84 -	br.call.sptk.many b7=2f;;			\
   14.85 -  };							\
   14.86 -2:{ .mib;						\
   14.87 -	nop.m 0;					\
   14.88 -	mov ar.pfs=r16;					\
   14.89 -	br.ret.sptk.many b6;;				\
   14.90 -  }
   14.91 -#else
   14.92 -# define FSYS_RETURN	br.ret.sptk.many b6
   14.93 -#endif
   14.94 -
   14.95 -/*
   14.96 - * Up until early 2004, use of .align within a function caused bad unwind info.
   14.97 - * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
   14.98 - * otherwise.
   14.99 - */
  14.100 -#ifdef HAVE_WORKING_TEXT_ALIGN
  14.101 -# define TEXT_ALIGN(n)	.align n
  14.102 -#else
  14.103 -# define TEXT_ALIGN(n)
  14.104 -#endif
  14.105 -
  14.106 -#ifdef HAVE_SERIALIZE_DIRECTIVE
  14.107 -# define dv_serialize_data		.serialize.data
  14.108 -# define dv_serialize_instruction	.serialize.instruction
  14.109 -#else
  14.110 -# define dv_serialize_data
  14.111 -# define dv_serialize_instruction
  14.112 -#endif
  14.113 -
  14.114 -#endif /* _ASM_IA64_ASMMACRO_H */
    15.1 --- a/xen/include/asm-ia64/xensystem.h	Fri Jul 07 10:36:31 2006 -0600
    15.2 +++ b/xen/include/asm-ia64/xensystem.h	Sun Jul 09 20:04:23 2006 -0600
    15.3 @@ -19,6 +19,7 @@
    15.4  
    15.5  #define HYPERVISOR_VIRT_START	 0xe800000000000000
    15.6  #define KERNEL_START		 0xf000000004000000
    15.7 +#define GATE_ADDR		KERNEL_START
    15.8  #define DEFAULT_SHAREDINFO_ADDR	 0xf100000000000000
    15.9  #define PERCPU_ADDR		 (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
   15.10  #define VHPT_ADDR		 0xf200000000000000