ia64/xen-unstable

changeset 12445:8a4c9ff5fa42

[IA64] MCA support - patch for MCA handler

Signed-off-by: Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
Signed-off-by: Kazuhiro Suzuki <kaz@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Sun Oct 29 09:27:09 2006 -0700 (2006-10-29)
parents cf05aabe6e65
children dc4a352d2143
files xen/arch/ia64/linux-xen/mca.c
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/mca.c	Wed Oct 25 15:29:00 2006 -0600
     1.2 +++ b/xen/arch/ia64/linux-xen/mca.c	Sun Oct 29 09:27:09 2006 -0700
     1.3 @@ -81,6 +81,9 @@
     1.4  #include <xen/symbols.h>
     1.5  #include <xen/mm.h>
     1.6  #include <xen/console.h>
     1.7 +#include <xen/event.h>
     1.8 +#include <xen/softirq.h>
     1.9 +#include <asm/xenmca.h>
    1.10  #endif
    1.11  
    1.12  #if defined(IA64_MCA_DEBUG_INFO)
    1.13 @@ -108,18 +111,27 @@ unsigned long __per_cpu_mca[NR_CPUS];
    1.14  /* In mca_asm.S */
    1.15  extern void			ia64_monarch_init_handler (void);
    1.16  extern void			ia64_slave_init_handler (void);
    1.17 +#ifdef XEN
    1.18 +extern void setup_vector (unsigned int vec, struct irqaction *action);
    1.19 +#define setup_irq(irq, action)	setup_vector(irq, action)
    1.20 +#endif
    1.21  
    1.22  static ia64_mc_info_t		ia64_mc_info;
    1.23  
    1.24 -#ifndef XEN
    1.25  #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
    1.26  #define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
    1.27  #define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
    1.28  #define CPE_HISTORY_LENGTH    5
    1.29  #define CMC_HISTORY_LENGTH    5
    1.30  
    1.31 +#ifndef XEN 
    1.32  static struct timer_list cpe_poll_timer;
    1.33  static struct timer_list cmc_poll_timer;
    1.34 +#else
    1.35 +#define mod_timer(timer, expires)	set_timer(timer, expires)
    1.36 +static struct timer cpe_poll_timer;
    1.37 +static struct timer cmc_poll_timer;
    1.38 +#endif
    1.39  /*
    1.40   * This variable tells whether we are currently in polling mode.
    1.41   * Start with this in the wrong state so we won't play w/ timers
    1.42 @@ -136,11 +148,9 @@ static int cmc_polling_enabled = 1;
    1.43  static int cpe_poll_enabled = 1;
    1.44  
    1.45  extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
    1.46 -#endif /* !XEN */
    1.47  
    1.48  static int mca_init;
    1.49  
    1.50 -#ifndef XEN
    1.51  /*
    1.52   * IA64_MCA log support
    1.53   */
    1.54 @@ -157,11 +167,24 @@ typedef struct ia64_state_log_s
    1.55  
    1.56  static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
    1.57  
    1.58 +#ifndef XEN
    1.59  #define IA64_LOG_ALLOCATE(it, size) \
    1.60  	{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
    1.61  		(ia64_err_rec_t *)alloc_bootmem(size); \
    1.62  	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
    1.63  		(ia64_err_rec_t *)alloc_bootmem(size);}
    1.64 +#else
    1.65 +#define IA64_LOG_ALLOCATE(it, size) \
    1.66 +	do { \
    1.67 +		unsigned int pageorder; \
    1.68 +		pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu)); \
    1.69 +		ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
    1.70 +		  (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
    1.71 +		ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
    1.72 +		  (ia64_err_rec_t *)alloc_xenheap_pages(pageorder); \
    1.73 +	} while(0)
    1.74 +#endif
    1.75 +
    1.76  #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
    1.77  #define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
    1.78  #define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
    1.79 @@ -176,6 +199,12 @@ static ia64_state_log_t ia64_state_log[I
    1.80  #define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
    1.81  #define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
    1.82  
    1.83 +#ifdef XEN
    1.84 +struct list_head sal_queue[IA64_MAX_LOG_TYPES];
    1.85 +sal_log_record_header_t *sal_record = NULL;
    1.86 +DEFINE_SPINLOCK(sal_queue_lock);
    1.87 +#endif
    1.88 +
    1.89  /*
    1.90   * ia64_log_init
    1.91   *	Reset the OS ia64 log buffer
    1.92 @@ -200,8 +229,19 @@ ia64_log_init(int sal_info_type)
    1.93  	IA64_LOG_ALLOCATE(sal_info_type, max_size);
    1.94  	memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
    1.95  	memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
    1.96 +
    1.97 +#ifdef XEN
    1.98 +	if (sal_record == NULL) {
    1.99 +		unsigned int pageorder;
   1.100 +		pageorder  = get_order_from_bytes(max_size);
   1.101 +		sal_record = (sal_log_record_header_t *)
   1.102 +		             alloc_xenheap_pages(pageorder);
   1.103 +		BUG_ON(sal_record == NULL);
   1.104 +	}
   1.105 +#endif
   1.106  }
   1.107  
   1.108 +#ifndef XEN
   1.109  /*
   1.110   * ia64_log_get
   1.111   *
   1.112 @@ -277,16 +317,160 @@ ia64_mca_log_sal_error_record(int sal_in
   1.113  	if (rh->severity == sal_log_severity_corrected)
   1.114  		ia64_sal_clear_state_info(sal_info_type);
   1.115  }
   1.116 +#else /* !XEN */
   1.117 +/*
   1.118 + * ia64_log_queue
   1.119 + *
   1.120 + *	Get the current MCA log from SAL and copy it into the OS log buffer.
   1.121 + *
   1.122 + *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
   1.123 + *  Outputs :   size        (total record length)
   1.124 + *              *buffer     (ptr to error record)
   1.125 + *
   1.126 + */
   1.127 +static u64
   1.128 +ia64_log_queue(int sal_info_type, int virq)
   1.129 +{
   1.130 +	sal_log_record_header_t     *log_buffer;
   1.131 +	u64                         total_len = 0;
   1.132 +	int                         s;
   1.133 +	sal_queue_entry_t	    *e;
   1.134 +	unsigned long		    flags;
   1.135 +
   1.136 +	IA64_LOG_LOCK(sal_info_type);
   1.137 +
   1.138 +	/* Get the process state information */
   1.139 +	log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
   1.140 +
   1.141 +	total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
   1.142 +
   1.143 +	if (total_len) {
   1.144 +		int queue_type;
   1.145 +
   1.146 +		spin_lock_irqsave(&sal_queue_lock, flags);
   1.147 +
   1.148 +		if (sal_info_type == SAL_INFO_TYPE_MCA && virq == VIRQ_MCA_CMC)
   1.149 +			queue_type = SAL_INFO_TYPE_CMC;
   1.150 +		else
   1.151 +			queue_type = sal_info_type;
   1.152 +
   1.153 +		e = xmalloc(sal_queue_entry_t);
   1.154 +		BUG_ON(e == NULL);
   1.155 +		e->cpuid = smp_processor_id();
   1.156 +		e->sal_info_type = sal_info_type;
   1.157 +		e->vector = IA64_CMC_VECTOR;
   1.158 +		e->virq = virq;
   1.159 +		e->length = total_len;
   1.160 +
   1.161 +		list_add_tail(&e->list, &sal_queue[queue_type]);
   1.162 +		spin_unlock_irqrestore(&sal_queue_lock, flags);
   1.163 +
   1.164 +		IA64_LOG_INDEX_INC(sal_info_type);
   1.165 +		IA64_LOG_UNLOCK(sal_info_type);
   1.166 +		if (sal_info_type != SAL_INFO_TYPE_MCA &&
   1.167 +		    sal_info_type != SAL_INFO_TYPE_INIT) {
   1.168 +			IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
   1.169 +				       "Record length = %ld\n", __FUNCTION__,
   1.170 +			               sal_info_type, total_len);
   1.171 +		}
   1.172 +		return total_len;
   1.173 +	} else {
   1.174 +		IA64_LOG_UNLOCK(sal_info_type);
   1.175 +		return 0;
   1.176 +	}
   1.177 +}
   1.178 +#endif /* !XEN */
   1.179  
   1.180  /*
   1.181   * platform dependent error handling
   1.182   */
   1.183 -#endif /* !XEN */
   1.184  #ifndef PLATFORM_MCA_HANDLERS
   1.185 -#ifndef XEN
   1.186  
   1.187  #ifdef CONFIG_ACPI
   1.188  
   1.189 +#ifdef XEN
   1.190 +/**
   1.191 + *	Copy from linux/include/asm-generic/bug.h
   1.192 + */
   1.193 +#define WARN_ON(condition) do { \
   1.194 +	if (unlikely((condition)!=0)) { \
   1.195 +		printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
   1.196 +		dump_stack(); \
   1.197 +	} \
   1.198 +} while (0)
   1.199 +
   1.200 +/**
   1.201 + *	Copy from linux/kernel/irq/manage.c
   1.202 + *
   1.203 + *	disable_irq_nosync - disable an irq without waiting
   1.204 + *	@irq: Interrupt to disable
   1.205 + *
   1.206 + *	Disable the selected interrupt line.  Disables and Enables are
   1.207 + *	nested.
   1.208 + *	Unlike disable_irq(), this function does not ensure existing
   1.209 + *	instances of the IRQ handler have completed before returning.
   1.210 + *
   1.211 + *	This function may be called from IRQ context.
   1.212 + */
   1.213 +void disable_irq_nosync(unsigned int irq)
   1.214 +{
   1.215 +	irq_desc_t *desc = irq_desc + irq;
   1.216 +	unsigned long flags;
   1.217 +
   1.218 +	if (irq >= NR_IRQS)
   1.219 +		return;
   1.220 +
   1.221 +	spin_lock_irqsave(&desc->lock, flags);
   1.222 +	if (!desc->depth++) {
   1.223 +		desc->status |= IRQ_DISABLED;
   1.224 +		desc->handler->disable(irq);
   1.225 +	}
   1.226 +	spin_unlock_irqrestore(&desc->lock, flags);
   1.227 +}
   1.228 +
   1.229 +/**
   1.230 + *	Copy from linux/kernel/irq/manage.c
   1.231 + *
   1.232 + *	enable_irq - enable handling of an irq
   1.233 + *	@irq: Interrupt to enable
   1.234 + *
   1.235 + *	Undoes the effect of one call to disable_irq().  If this
   1.236 + *	matches the last disable, processing of interrupts on this
   1.237 + *	IRQ line is re-enabled.
   1.238 + *
   1.239 + *	This function may be called from IRQ context.
   1.240 + */
   1.241 +void enable_irq(unsigned int irq)
   1.242 +{
   1.243 +	irq_desc_t *desc = irq_desc + irq;
   1.244 +	unsigned long flags;
   1.245 +
   1.246 +	if (irq >= NR_IRQS)
   1.247 +		return;
   1.248 +
   1.249 +	spin_lock_irqsave(&desc->lock, flags);
   1.250 +	switch (desc->depth) {
   1.251 +	case 0:
   1.252 +		WARN_ON(1);
   1.253 +		break;
   1.254 +	case 1: {
   1.255 +		unsigned int status = desc->status & ~IRQ_DISABLED;
   1.256 +
   1.257 +		desc->status = status;
   1.258 +		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
   1.259 +			desc->status = status | IRQ_REPLAY;
   1.260 +			hw_resend_irq(desc->handler,irq);
   1.261 +		}
   1.262 +		desc->handler->enable(irq);
   1.263 +		/* fall-through */
   1.264 +	}
   1.265 +	default:
   1.266 +		desc->depth--;
   1.267 +	}
   1.268 +	spin_unlock_irqrestore(&desc->lock, flags);
   1.269 +}
   1.270 +#endif	/* XEN */
   1.271 +
   1.272  int cpe_vector = -1;
   1.273  
   1.274  static irqreturn_t
   1.275 @@ -302,8 +486,15 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
   1.276  	/* SAL spec states this should run w/ interrupts enabled */
   1.277  	local_irq_enable();
   1.278  
   1.279 +#ifndef XEN
   1.280  	/* Get the CPE error record and log it */
   1.281  	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
   1.282 +#else
   1.283 +	/* CPE error does not inform to dom0 but the following codes are 
   1.284 +	   reserved for future implementation */
   1.285 +/* 	ia64_log_queue(SAL_INFO_TYPE_CPE, VIRQ_MCA_CPE); */
   1.286 +/* 	send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CPE); */
   1.287 +#endif
   1.288  
   1.289  	spin_lock(&cpe_history_lock);
   1.290  	if (!cpe_poll_enabled && cpe_vector >= 0) {
   1.291 @@ -345,7 +536,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, v
   1.292  }
   1.293  
   1.294  #endif /* CONFIG_ACPI */
   1.295 -#endif /* !XEN */
   1.296  
   1.297  static void
   1.298  show_min_state (pal_min_state_area_t *minstate)
   1.299 @@ -593,7 +783,6 @@ init_handler_platform (pal_min_state_are
   1.300  	while (1);			/* hang city if no debugger */
   1.301  }
   1.302  
   1.303 -#ifndef XEN
   1.304  #ifdef CONFIG_ACPI
   1.305  /*
   1.306   * ia64_mca_register_cpev
   1.307 @@ -624,9 +813,7 @@ ia64_mca_register_cpev (int cpev)
   1.308  }
   1.309  #endif /* CONFIG_ACPI */
   1.310  
   1.311 -#endif /* !XEN */
   1.312  #endif /* PLATFORM_MCA_HANDLERS */
   1.313 -#ifndef XEN
   1.314  
   1.315  /*
   1.316   * ia64_mca_cmc_vector_setup
   1.317 @@ -713,6 +900,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
   1.318  		       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
   1.319  }
   1.320  
   1.321 +#ifndef XEN
   1.322  /*
   1.323   * ia64_mca_cmc_vector_disable_keventd
   1.324   *
   1.325 @@ -736,6 +924,7 @@ ia64_mca_cmc_vector_enable_keventd(void 
   1.326  {
   1.327  	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
   1.328  }
   1.329 +#endif /* !XEN	*/
   1.330  
   1.331  /*
   1.332   * ia64_mca_wakeup_ipi_wait
   1.333 @@ -887,15 +1076,26 @@ ia64_mca_wakeup_int_handler(int wakeup_i
   1.334  static void
   1.335  ia64_return_to_sal_check(int recover)
   1.336  {
   1.337 +#ifdef XEN
   1.338 +	int cpu = smp_processor_id();
   1.339 +#endif
   1.340  
   1.341  	/* Copy over some relevant stuff from the sal_to_os_mca_handoff
   1.342  	 * so that it can be used at the time of os_mca_to_sal_handoff
   1.343  	 */
   1.344 +#ifdef XEN
   1.345 +	ia64_os_to_sal_handoff_state.imots_sal_gp =
   1.346 +		ia64_sal_to_os_handoff_state[cpu].imsto_sal_gp;
   1.347 +
   1.348 +	ia64_os_to_sal_handoff_state.imots_sal_check_ra =
   1.349 +		ia64_sal_to_os_handoff_state[cpu].imsto_sal_check_ra;
   1.350 +#else
   1.351  	ia64_os_to_sal_handoff_state.imots_sal_gp =
   1.352  		ia64_sal_to_os_handoff_state.imsto_sal_gp;
   1.353  
   1.354  	ia64_os_to_sal_handoff_state.imots_sal_check_ra =
   1.355  		ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
   1.356 +#endif
   1.357  
   1.358  	if (recover)
   1.359  		ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
   1.360 @@ -905,8 +1105,13 @@ ia64_return_to_sal_check(int recover)
   1.361  	/* Default = tell SAL to return to same context */
   1.362  	ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
   1.363  
   1.364 +#ifdef XEN
   1.365 +	ia64_os_to_sal_handoff_state.imots_new_min_state =
   1.366 +		(u64 *)ia64_sal_to_os_handoff_state[cpu].pal_min_state;
   1.367 +#else
   1.368  	ia64_os_to_sal_handoff_state.imots_new_min_state =
   1.369  		(u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
   1.370 +#endif
   1.371  
   1.372  }
   1.373  
   1.374 @@ -954,27 +1159,44 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension);
   1.375  void
   1.376  ia64_mca_ucmc_handler(void)
   1.377  {
   1.378 +#ifdef XEN
   1.379 +	int cpu = smp_processor_id();
   1.380 +	pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
   1.381 +		&ia64_sal_to_os_handoff_state[cpu].proc_state_param;
   1.382 +#else
   1.383  	pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
   1.384  		&ia64_sal_to_os_handoff_state.proc_state_param;
   1.385 +#endif
   1.386  	int recover; 
   1.387  
   1.388 +#ifndef XEN
   1.389  	/* Get the MCA error record and log it */
   1.390  	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
   1.391 +#else
   1.392 +	ia64_log_queue(SAL_INFO_TYPE_MCA, VIRQ_MCA_CMC);
   1.393 +	send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
   1.394 +#endif
   1.395  
   1.396  	/* TLB error is only exist in this SAL error record */
   1.397  	recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
   1.398  	/* other error recovery */
   1.399 +#ifndef XEN
   1.400  	   || (ia64_mca_ucmc_extension 
   1.401  		&& ia64_mca_ucmc_extension(
   1.402  			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
   1.403  			&ia64_sal_to_os_handoff_state,
   1.404  			&ia64_os_to_sal_handoff_state)); 
   1.405 +#else
   1.406 +	;
   1.407 +#endif
   1.408  
   1.409 +#ifndef XEN
   1.410  	if (recover) {
   1.411  		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
   1.412  		rh->severity = sal_log_severity_corrected;
   1.413  		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
   1.414  	}
   1.415 +#endif
   1.416  	/*
   1.417  	 *  Wakeup all the processors which are spinning in the rendezvous
   1.418  	 *  loop.
   1.419 @@ -985,8 +1207,10 @@ ia64_mca_ucmc_handler(void)
   1.420  	ia64_return_to_sal_check(recover);
   1.421  }
   1.422  
   1.423 +#ifndef XEN
   1.424  static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
   1.425  static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
   1.426 +#endif
   1.427  
   1.428  /*
   1.429   * ia64_mca_cmc_int_handler
   1.430 @@ -1016,8 +1240,13 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
   1.431  	/* SAL spec states this should run w/ interrupts enabled */
   1.432  	local_irq_enable();
   1.433  
   1.434 +#ifndef XEN	
   1.435  	/* Get the CMC error record and log it */
   1.436  	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
   1.437 +#else
   1.438 +	ia64_log_queue(SAL_INFO_TYPE_CMC, VIRQ_MCA_CMC);
   1.439 +	send_guest_vcpu_virq(dom0->vcpu[0], VIRQ_MCA_CMC);
   1.440 +#endif
   1.441  
   1.442  	spin_lock(&cmc_history_lock);
   1.443  	if (!cmc_polling_enabled) {
   1.444 @@ -1034,7 +1263,12 @@ ia64_mca_cmc_int_handler(int cmc_irq, vo
   1.445  
   1.446  			cmc_polling_enabled = 1;
   1.447  			spin_unlock(&cmc_history_lock);
   1.448 +#ifndef XEN	/* XXX FIXME */
   1.449  			schedule_work(&cmc_disable_work);
   1.450 +#else
   1.451 +			cpumask_raise_softirq(cpu_online_map,
   1.452 +			                      CMC_DISABLE_SOFTIRQ);
   1.453 +#endif
   1.454  
   1.455  			/*
   1.456  			 * Corrected errors will still be corrected, but
   1.457 @@ -1083,7 +1317,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
   1.458  	if (start_count == -1)
   1.459  		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
   1.460  
   1.461 +#ifndef XEN
   1.462  	ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
   1.463 +#endif
   1.464  
   1.465  	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
   1.466  
   1.467 @@ -1094,7 +1330,12 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
   1.468  		if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
   1.469  
   1.470  			printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
   1.471 +#ifndef XEN	/* XXX FIXME */
   1.472  			schedule_work(&cmc_enable_work);
   1.473 +#else
   1.474 +			cpumask_raise_softirq(cpu_online_map,
   1.475 +			                      CMC_ENABLE_SOFTIRQ);
   1.476 +#endif
   1.477  			cmc_polling_enabled = 0;
   1.478  
   1.479  		} else {
   1.480 @@ -1104,7 +1345,6 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
   1.481  
   1.482  		start_count = -1;
   1.483  	}
   1.484 -
   1.485  	return IRQ_HANDLED;
   1.486  }
   1.487  
   1.488 @@ -1118,7 +1358,11 @@ ia64_mca_cmc_int_caller(int cmc_irq, voi
   1.489   *
   1.490   */
   1.491  static void
   1.492 +#ifndef XEN
   1.493  ia64_mca_cmc_poll (unsigned long dummy)
   1.494 +#else
   1.495 +ia64_mca_cmc_poll (void *dummy)
   1.496 +#endif
   1.497  {
   1.498  	/* Trigger a CMC interrupt cascade  */
   1.499  	platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
   1.500 @@ -1153,7 +1397,9 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
   1.501  	if (start_count == -1)
   1.502  		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
   1.503  
   1.504 +#ifndef XEN
   1.505  	ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
   1.506 +#endif
   1.507  
   1.508  	for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
   1.509  
   1.510 @@ -1180,7 +1426,6 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
   1.511  			mod_timer(&cpe_poll_timer, jiffies + poll_time);
   1.512  		start_count = -1;
   1.513  	}
   1.514 -
   1.515  	return IRQ_HANDLED;
   1.516  }
   1.517  
   1.518 @@ -1195,14 +1440,17 @@ ia64_mca_cpe_int_caller(int cpe_irq, voi
   1.519   *
   1.520   */
   1.521  static void
   1.522 +#ifndef XEN
   1.523  ia64_mca_cpe_poll (unsigned long dummy)
   1.524 +#else
   1.525 +ia64_mca_cpe_poll (void *dummy)
   1.526 +#endif
   1.527  {
   1.528  	/* Trigger a CPE interrupt cascade  */
   1.529  	platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
   1.530  }
   1.531  
   1.532  #endif /* CONFIG_ACPI */
   1.533 -#endif /* !XEN */
   1.534  
   1.535  /*
   1.536   * C portion of the OS INIT handler
   1.537 @@ -1248,7 +1496,6 @@ ia64_init_handler (struct pt_regs *pt, s
   1.538  	init_handler_platform(ms, pt, sw);	/* call platform specific routines */
   1.539  }
   1.540  
   1.541 -#ifndef XEN
   1.542  static int __init
   1.543  ia64_mca_disable_cpe_polling(char *str)
   1.544  {
   1.545 @@ -1260,42 +1507,53 @@ ia64_mca_disable_cpe_polling(char *str)
   1.546  
   1.547  static struct irqaction cmci_irqaction = {
   1.548  	.handler =	ia64_mca_cmc_int_handler,
   1.549 +#ifndef XEN
   1.550  	.flags =	SA_INTERRUPT,
   1.551 +#endif
   1.552  	.name =		"cmc_hndlr"
   1.553  };
   1.554  
   1.555  static struct irqaction cmcp_irqaction = {
   1.556  	.handler =	ia64_mca_cmc_int_caller,
   1.557 +#ifndef XEN
   1.558  	.flags =	SA_INTERRUPT,
   1.559 +#endif
   1.560  	.name =		"cmc_poll"
   1.561  };
   1.562  
   1.563  static struct irqaction mca_rdzv_irqaction = {
   1.564  	.handler =	ia64_mca_rendez_int_handler,
   1.565 +#ifndef XEN
   1.566  	.flags =	SA_INTERRUPT,
   1.567 +#endif
   1.568  	.name =		"mca_rdzv"
   1.569  };
   1.570  
   1.571  static struct irqaction mca_wkup_irqaction = {
   1.572  	.handler =	ia64_mca_wakeup_int_handler,
   1.573 +#ifndef XEN
   1.574  	.flags =	SA_INTERRUPT,
   1.575 +#endif
   1.576  	.name =		"mca_wkup"
   1.577  };
   1.578  
   1.579  #ifdef CONFIG_ACPI
   1.580  static struct irqaction mca_cpe_irqaction = {
   1.581  	.handler =	ia64_mca_cpe_int_handler,
   1.582 +#ifndef XEN
   1.583  	.flags =	SA_INTERRUPT,
   1.584 +#endif
   1.585  	.name =		"cpe_hndlr"
   1.586  };
   1.587  
   1.588  static struct irqaction mca_cpep_irqaction = {
   1.589  	.handler =	ia64_mca_cpe_int_caller,
   1.590 +#ifndef XEN
   1.591  	.flags =	SA_INTERRUPT,
   1.592 +#endif
   1.593  	.name =		"cpe_poll"
   1.594  };
   1.595  #endif /* CONFIG_ACPI */
   1.596 -#endif /* !XEN */
   1.597  
   1.598  /* Do per-CPU MCA-related initialization.  */
   1.599  
   1.600 @@ -1329,6 +1587,13 @@ ia64_mca_cpu_init(void *cpu_data)
   1.601  #endif
   1.602  		}
   1.603  	}
   1.604 +#ifdef XEN
   1.605 +	else {
   1.606 +		int i;
   1.607 +		for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
   1.608 +			ia64_log_queue(i, 0);
   1.609 +	}
   1.610 +#endif
   1.611  
   1.612          /*
   1.613           * The MCA info structure was allocated earlier and its
   1.614 @@ -1395,18 +1660,15 @@ ia64_mca_init(void)
   1.615  	ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
   1.616  	ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
   1.617  	ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
   1.618 -#ifdef XEN
   1.619 -	s64 rc;
   1.620 -
   1.621 -	slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
   1.622 -
   1.623 -	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
   1.624 -#else
   1.625  	int i;
   1.626  	s64 rc;
   1.627  	struct ia64_sal_retval isrv;
   1.628  	u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;	/* platform specific */
   1.629  
   1.630 +#ifdef XEN
   1.631 +	slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
   1.632 +#endif
   1.633 +
   1.634  	IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
   1.635  
   1.636  	/* Clear the Rendez checkin flag for all cpus */
   1.637 @@ -1451,7 +1713,6 @@ ia64_mca_init(void)
   1.638  	}
   1.639  
   1.640  	IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
   1.641 -#endif /* !XEN */
   1.642  
   1.643  	ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
   1.644  	/*
   1.645 @@ -1503,7 +1764,6 @@ ia64_mca_init(void)
   1.646  
   1.647  	IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
   1.648  
   1.649 -#ifndef XEN
   1.650  	/*
   1.651  	 *  Configure the CMCI/P vector and handler. Interrupts for CMC are
   1.652  	 *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
   1.653 @@ -1531,13 +1791,26 @@ ia64_mca_init(void)
   1.654  	ia64_log_init(SAL_INFO_TYPE_INIT);
   1.655  	ia64_log_init(SAL_INFO_TYPE_CMC);
   1.656  	ia64_log_init(SAL_INFO_TYPE_CPE);
   1.657 -#endif /* !XEN */
   1.658 +
   1.659 +#ifdef XEN
   1.660 +	INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_MCA]);
   1.661 +	INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_INIT]);
   1.662 +	INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_CMC]);
   1.663 +	INIT_LIST_HEAD(&sal_queue[SAL_INFO_TYPE_CPE]);
   1.664 +
   1.665 +	open_softirq(CMC_DISABLE_SOFTIRQ,
   1.666 +	             (softirq_handler)ia64_mca_cmc_vector_disable);
   1.667 +	open_softirq(CMC_ENABLE_SOFTIRQ,
   1.668 +	             (softirq_handler)ia64_mca_cmc_vector_enable);
   1.669 +
   1.670 +	for (i = 0; i < IA64_MAX_LOG_TYPES; i++)
   1.671 +		ia64_log_queue(i, 0);
   1.672 +#endif
   1.673  
   1.674  	mca_init = 1;
   1.675  	printk(KERN_INFO "MCA related initialization done\n");
   1.676  }
   1.677  
   1.678 -#ifndef XEN
   1.679  /*
   1.680   * ia64_mca_late_init
   1.681   *
   1.682 @@ -1555,20 +1828,34 @@ ia64_mca_late_init(void)
   1.683  		return 0;
   1.684  
   1.685  	/* Setup the CMCI/P vector and handler */
   1.686 +#ifndef XEN
   1.687  	init_timer(&cmc_poll_timer);
   1.688  	cmc_poll_timer.function = ia64_mca_cmc_poll;
   1.689 +#else
   1.690 +	init_timer(&cmc_poll_timer, ia64_mca_cmc_poll, NULL, smp_processor_id());
   1.691 +	printk("INIT_TIMER(cmc_poll_timer): on cpu%d\n", smp_processor_id());
   1.692 +#endif
   1.693  
   1.694  	/* Unmask/enable the vector */
   1.695  	cmc_polling_enabled = 0;
   1.696 +#ifndef XEN	/* XXX FIXME */
   1.697  	schedule_work(&cmc_enable_work);
   1.698 +#else
   1.699 +	cpumask_raise_softirq(cpu_online_map, CMC_ENABLE_SOFTIRQ);
   1.700 +#endif
   1.701  
   1.702  	IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
   1.703  
   1.704  #ifdef CONFIG_ACPI
   1.705  	/* Setup the CPEI/P vector and handler */
   1.706  	cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
   1.707 +#ifndef	XEN
   1.708  	init_timer(&cpe_poll_timer);
   1.709  	cpe_poll_timer.function = ia64_mca_cpe_poll;
   1.710 +#else
   1.711 +	init_timer(&cpe_poll_timer, ia64_mca_cpe_poll, NULL, smp_processor_id());
   1.712 +	printk("INIT_TIMER(cpe_poll_timer): on cpu%d\n", smp_processor_id());
   1.713 +#endif
   1.714  
   1.715  	{
   1.716  		irq_desc_t *desc;
   1.717 @@ -1598,5 +1885,8 @@ ia64_mca_late_init(void)
   1.718  	return 0;
   1.719  }
   1.720  
   1.721 +#ifndef XEN
   1.722  device_initcall(ia64_mca_late_init);
   1.723 -#endif /* !XEN */
   1.724 +#else
   1.725 +__initcall(ia64_mca_late_init);
   1.726 +#endif