ia64/xen-unstable

changeset 19373:372ec886ad0c

x86 mcheck: Provide MCA "injection" hypervisor services.

Signed-off-by: Gavin Maltby <gavin.maltby@sun.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 17 14:24:08 2009 +0000 (2009-03-17)
parents 9c1be8f2013b
children 821a4a8911f0
files xen/arch/x86/cpu/mcheck/amd_f10.c xen/arch/x86/cpu/mcheck/amd_nonfatal.c xen/arch/x86/cpu/mcheck/mce.c xen/arch/x86/cpu/mcheck/mce.h xen/include/public/arch-x86/xen-mca.h xen/include/xen/lib.h
line diff
     1.1 --- a/xen/arch/x86/cpu/mcheck/amd_f10.c	Tue Mar 17 14:22:50 2009 +0000
     1.2 +++ b/xen/arch/x86/cpu/mcheck/amd_f10.c	Tue Mar 17 14:24:08 2009 +0000
     1.3 @@ -74,9 +74,9 @@ amd_f10_handler(struct mc_info *mi, uint
     1.4  	mc_ext.mc_msr[1].reg = MSR_F10_MC4_MISC2;
     1.5  	mc_ext.mc_msr[2].reg = MSR_F10_MC4_MISC3;
     1.6  
     1.7 -	rdmsrl(MSR_F10_MC4_MISC1, mc_ext.mc_msr[0].value);
     1.8 -	rdmsrl(MSR_F10_MC4_MISC2, mc_ext.mc_msr[1].value);
     1.9 -	rdmsrl(MSR_F10_MC4_MISC3, mc_ext.mc_msr[2].value);
    1.10 +	mca_rdmsrl(MSR_F10_MC4_MISC1, mc_ext.mc_msr[0].value);
    1.11 +	mca_rdmsrl(MSR_F10_MC4_MISC2, mc_ext.mc_msr[1].value);
    1.12 +	mca_rdmsrl(MSR_F10_MC4_MISC3, mc_ext.mc_msr[2].value);
    1.13  	
    1.14  	x86_mcinfo_add(mi, &mc_ext);
    1.15  	return MCA_EXTINFO_LOCAL;
     2.1 --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c	Tue Mar 17 14:22:50 2009 +0000
     2.2 +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c	Tue Mar 17 14:24:08 2009 +0000
     2.3 @@ -147,7 +147,7 @@ static void mce_amd_work_fn(void *data)
     2.4  		uint64_t value;
     2.5  		uint32_t counter;
     2.6  
     2.7 -		rdmsrl(MSR_IA32_MC4_MISC, value);
     2.8 +		mca_rdmsrl(MSR_IA32_MC4_MISC, value);
     2.9  		/* Only the error counter field is of interest
    2.10  		 * Bit field is described in AMD K8 BKDG chapter 6.4.5.5
    2.11  		 */
    2.12 @@ -172,7 +172,7 @@ static void mce_amd_work_fn(void *data)
    2.13  			value &= ~(0x60FFF00000000ULL);
    2.14  			/* Counter enable */
    2.15  			value |= (1ULL << 51);
    2.16 -			wrmsrl(MSR_IA32_MC4_MISC, value);
    2.17 +			mca_wrmsrl(MSR_IA32_MC4_MISC, value);
    2.18  			wmb();
    2.19  		}
    2.20  	}
     3.1 --- a/xen/arch/x86/cpu/mcheck/mce.c	Tue Mar 17 14:22:50 2009 +0000
     3.2 +++ b/xen/arch/x86/cpu/mcheck/mce.c	Tue Mar 17 14:24:08 2009 +0000
     3.3 @@ -27,9 +27,11 @@ unsigned int nr_mce_banks;
     3.4  
     3.5  EXPORT_SYMBOL_GPL(nr_mce_banks);	/* non-fatal.o */
     3.6  
     3.7 +static void intpose_init(void);
     3.8  static void mcinfo_clear(struct mc_info *);
     3.9  
    3.10 -#define	SEG_PL(segsel) ((segsel) & 0x3)
    3.11 +#define	SEG_PL(segsel)			((segsel) & 0x3)
    3.12 +#define _MC_MSRINJ_F_REQ_HWCR_WREN	(1 << 16)
    3.13  
    3.14  #if 1	/* XXFM switch to 0 for putback */
    3.15  
    3.16 @@ -109,7 +111,7 @@ mctelem_cookie_t mcheck_mca_logout(enum 
    3.17  	cpu_nr = smp_processor_id();
    3.18  	BUG_ON(cpu_nr != v->processor);
    3.19  
    3.20 -	rdmsrl(MSR_IA32_MCG_STATUS, gstatus);
    3.21 +	mca_rdmsrl(MSR_IA32_MCG_STATUS, gstatus);
    3.22  
    3.23  	memset(&mcg, 0, sizeof (mcg));
    3.24  	mcg.common.type = MC_TYPE_GLOBAL;
    3.25 @@ -156,7 +158,7 @@ mctelem_cookie_t mcheck_mca_logout(enum 
    3.26  		if (!test_bit(i, bankmask))
    3.27  			continue;
    3.28  
    3.29 -		rdmsrl(MSR_IA32_MC0_STATUS + i * 4, status);
    3.30 +		mca_rdmsrl(MSR_IA32_MC0_STATUS + i * 4, status);
    3.31  		if (!(status & MCi_STATUS_VAL))
    3.32  			continue;	/* this bank has no valid telemetry */
    3.33  
    3.34 @@ -189,7 +191,7 @@ mctelem_cookie_t mcheck_mca_logout(enum 
    3.35  		addr = misc = 0;
    3.36  
    3.37  		if (status & MCi_STATUS_ADDRV) {
    3.38 -			rdmsrl(MSR_IA32_MC0_ADDR + 4 * i, addr);
    3.39 +			mca_rdmsrl(MSR_IA32_MC0_ADDR + 4 * i, addr);
    3.40  			d = maddr_get_owner(addr);
    3.41  			if (d != NULL && (who == MCA_POLLER ||
    3.42  			    who == MCA_CMCI_HANDLER))
    3.43 @@ -197,13 +199,13 @@ mctelem_cookie_t mcheck_mca_logout(enum 
    3.44  		}
    3.45  
    3.46  		if (status & MCi_STATUS_MISCV)
    3.47 -			rdmsrl(MSR_IA32_MC0_MISC + 4 * i, misc);
    3.48 +			mca_rdmsrl(MSR_IA32_MC0_MISC + 4 * i, misc);
    3.49  
    3.50  		mcb.mc_addr = addr;
    3.51  		mcb.mc_misc = misc;
    3.52  
    3.53  		if (who == MCA_CMCI_HANDLER) {
    3.54 -			rdmsrl(MSR_IA32_MC0_CTL2 + i, mcb.mc_ctrl2);
    3.55 +			mca_rdmsrl(MSR_IA32_MC0_CTL2 + i, mcb.mc_ctrl2);
    3.56  			rdtscll(mcb.mc_tsc);
    3.57  		}
    3.58  
    3.59 @@ -221,7 +223,7 @@ mctelem_cookie_t mcheck_mca_logout(enum 
    3.60  		}
    3.61  
    3.62  		/* Clear status */
    3.63 -		wrmsrl(MSR_IA32_MC0_STATUS + 4 * i, 0x0ULL);
    3.64 +		mca_wrmsrl(MSR_IA32_MC0_STATUS + 4 * i, 0x0ULL);
    3.65  		wmb();
    3.66  	}
    3.67  
    3.68 @@ -281,7 +283,7 @@ void mcheck_cmn_handler(struct cpu_user_
    3.69  
    3.70  	/* Read global status;  if it does not indicate machine check
    3.71  	 * in progress then bail as long as we have a valid ip to return to. */
    3.72 -	rdmsrl(MSR_IA32_MCG_STATUS, gstatus);
    3.73 +	mca_rdmsrl(MSR_IA32_MCG_STATUS, gstatus);
    3.74  	ripv = ((gstatus & MCG_STATUS_RIPV) != 0);
    3.75  	if (!(gstatus & MCG_STATUS_MCIP) && ripv) {
    3.76  		add_taint(TAINT_MACHINE_CHECK); /* questionable */
    3.77 @@ -300,7 +302,7 @@ void mcheck_cmn_handler(struct cpu_user_
    3.78  
    3.79  	/* Clear MCIP or another #MC will enter shutdown state */
    3.80  	gstatus &= ~MCG_STATUS_MCIP;
    3.81 -	wrmsrl(MSR_IA32_MCG_STATUS, gstatus);
    3.82 +	mca_wrmsrl(MSR_IA32_MCG_STATUS, gstatus);
    3.83  	wmb();
    3.84  
    3.85  	/* If no valid errors and our stack is intact, we're done */
    3.86 @@ -540,6 +542,7 @@ void mcheck_init(struct cpuinfo_x86 *c)
    3.87  		return;
    3.88  	}
    3.89  
    3.90 +	intpose_init();
    3.91  	mctelem_init(sizeof (struct mc_info));
    3.92  
    3.93  	switch (c->x86_vendor) {
    3.94 @@ -768,6 +771,203 @@ void x86_mc_get_cpu_info(unsigned cpu, u
    3.95  	}
    3.96  }
    3.97  
    3.98 +#define	INTPOSE_NENT	50
    3.99 +
   3.100 +static struct intpose_ent {
   3.101 +	unsigned  int cpu_nr;
   3.102 +	uint64_t msr;
   3.103 +	uint64_t val;
   3.104 +} intpose_arr[INTPOSE_NENT];
   3.105 +
   3.106 +static void intpose_init(void)
   3.107 +{
   3.108 +	static int done;
   3.109 +	int i;
   3.110 +
   3.111 +	if (done++ > 0)
   3.112 +		return;
   3.113 +
   3.114 +	for (i = 0; i < INTPOSE_NENT; i++) {
   3.115 +		intpose_arr[i].cpu_nr = -1;
   3.116 +	}
   3.117 +
   3.118 +}
   3.119 +
   3.120 +struct intpose_ent *intpose_lookup(unsigned int cpu_nr, uint64_t msr,
   3.121 +    uint64_t *valp)
   3.122 +{
   3.123 +	int i;
   3.124 +
   3.125 +	for (i = 0; i < INTPOSE_NENT; i++) {
   3.126 +		if (intpose_arr[i].cpu_nr == cpu_nr &&
   3.127 +		    intpose_arr[i].msr == msr) {
   3.128 +			if (valp != NULL)
   3.129 +				*valp = intpose_arr[i].val;
   3.130 +			return &intpose_arr[i];
   3.131 +		}
   3.132 +	}
   3.133 +
   3.134 +	return NULL;
   3.135 +}
   3.136 +
   3.137 +static void intpose_add(unsigned int cpu_nr, uint64_t msr, uint64_t val)
   3.138 +{
   3.139 +	struct intpose_ent *ent;
   3.140 +	int i;
   3.141 +
   3.142 +	if ((ent = intpose_lookup(cpu_nr, msr, NULL)) != NULL) {
   3.143 +		ent->val = val;
   3.144 +		return;
   3.145 +	}
   3.146 +
   3.147 +	for (i = 0, ent = &intpose_arr[0]; i < INTPOSE_NENT; i++, ent++) {
   3.148 +		if (ent->cpu_nr == -1) {
   3.149 +			ent->cpu_nr = cpu_nr;
   3.150 +			ent->msr = msr;
   3.151 +			ent->val = val;
   3.152 +			return;
   3.153 +		}
   3.154 +	}
   3.155 +
   3.156 +	printk("intpose_add: interpose array full - request dropped\n");
   3.157 +}
   3.158 +
   3.159 +void intpose_inval(unsigned int cpu_nr, uint64_t msr)
   3.160 +{
   3.161 +	struct intpose_ent *ent;
   3.162 +
   3.163 +	if ((ent = intpose_lookup(cpu_nr, msr, NULL)) != NULL) {
   3.164 +		ent->cpu_nr = -1;
   3.165 +	}
   3.166 +}
   3.167 +
   3.168 +#define	IS_MCA_BANKREG(r) \
   3.169 +    ((r) >= MSR_IA32_MC0_CTL && \
   3.170 +    (r) <= MSR_IA32_MC0_MISC + (nr_mce_banks - 1) * 4 && \
   3.171 +    ((r) - MSR_IA32_MC0_CTL) % 4 != 0)	/* excludes MCi_CTL */
   3.172 +
   3.173 +static int x86_mc_msrinject_verify(struct xen_mc_msrinject *mci)
   3.174 +{
   3.175 +	struct cpuinfo_x86 *c;
   3.176 +	int i, errs = 0;
   3.177 +
   3.178 +	c = &cpu_data[smp_processor_id()];
   3.179 +
   3.180 +	for (i = 0; i < mci->mcinj_count; i++) {
   3.181 +		uint64_t reg = mci->mcinj_msr[i].reg;
   3.182 +		const char *reason = NULL;
   3.183 +
   3.184 +		if (IS_MCA_BANKREG(reg)) {
   3.185 +			if (c->x86_vendor == X86_VENDOR_AMD) {
   3.186 +				/* On AMD we can set MCi_STATUS_WREN in the
   3.187 +				 * HWCR MSR to allow non-zero writes to banks
   3.188 +				 * MSRs not to #GP.  The injector in dom0
   3.189 +				 * should set that bit, but we detect when it
   3.190 +				 * is necessary and set it as a courtesy to
   3.191 +				 * avoid #GP in the hypervisor. */
   3.192 +				mci->mcinj_flags |=
   3.193 +				    _MC_MSRINJ_F_REQ_HWCR_WREN;
   3.194 +				continue;
   3.195 +			} else {
   3.196 +				/* No alternative but to interpose, so require
   3.197 +				 * that the injector specified as such. */
   3.198 +				if (!(mci->mcinj_flags &
   3.199 +				    MC_MSRINJ_F_INTERPOSE)) {
   3.200 +					reason = "must specify interposition";
   3.201 +				}
   3.202 +			}
   3.203 +		} else {
   3.204 +			switch (reg) {
   3.205 +			/* MSRs acceptable on all x86 cpus */
   3.206 +			case MSR_IA32_MCG_STATUS:
   3.207 +				break;
   3.208 +
   3.209 +			/* MSRs that the HV will take care of */
   3.210 +			case MSR_K8_HWCR:
   3.211 +				if (c->x86_vendor == X86_VENDOR_AMD)
   3.212 +					reason = "HV will operate HWCR";
   3.213 +				else
   3.214 +					reason ="only supported on AMD";
   3.215 +				break;
   3.216 +
   3.217 +			default:
   3.218 +				reason = "not a recognized MCA MSR";
   3.219 +				break;
   3.220 +			}
   3.221 +		}
   3.222 +
   3.223 +		if (reason != NULL) {
   3.224 +			printk("HV MSR INJECT ERROR: MSR 0x%llx %s\n",
   3.225 +			    (unsigned long long)mci->mcinj_msr[i].reg, reason);
   3.226 +			errs++;
   3.227 +		}
   3.228 +	}
   3.229 +
   3.230 +	return !errs;
   3.231 +}
   3.232 +
   3.233 +static uint64_t x86_mc_hwcr_wren(void)
   3.234 +{
   3.235 +	uint64_t old;
   3.236 +
   3.237 +	rdmsrl(MSR_K8_HWCR, old);
   3.238 +
   3.239 +	if (!(old & K8_HWCR_MCi_STATUS_WREN)) {
   3.240 +		uint64_t new = old | K8_HWCR_MCi_STATUS_WREN;
   3.241 +		wrmsrl(MSR_K8_HWCR, new);
   3.242 +	}
   3.243 +
   3.244 +	return old;
   3.245 +}
   3.246 +
   3.247 +static void x86_mc_hwcr_wren_restore(uint64_t hwcr)
   3.248 +{
   3.249 +	if (!(hwcr & K8_HWCR_MCi_STATUS_WREN))
   3.250 +		wrmsrl(MSR_K8_HWCR, hwcr);
   3.251 +}
   3.252 +
   3.253 +static void x86_mc_msrinject(void *data)
   3.254 +{
   3.255 +	struct xen_mc_msrinject *mci = data;
   3.256 +	struct mcinfo_msr *msr;
   3.257 +	struct cpuinfo_x86 *c;
   3.258 +	uint64_t hwcr = 0;
   3.259 +	int intpose;
   3.260 +	int i;
   3.261 +
   3.262 +	c = &cpu_data[smp_processor_id()];
   3.263 +
   3.264 +	if (mci->mcinj_flags & _MC_MSRINJ_F_REQ_HWCR_WREN)
   3.265 +		hwcr = x86_mc_hwcr_wren();
   3.266 +
   3.267 +	intpose = (mci->mcinj_flags & MC_MSRINJ_F_INTERPOSE) != 0;
   3.268 +
   3.269 +	for (i = 0, msr = &mci->mcinj_msr[0];
   3.270 +	    i < mci->mcinj_count; i++, msr++) {
   3.271 +		printk("HV MSR INJECT (%s) target %u actual %u MSR 0x%llx "
   3.272 +		    "<-- 0x%llx\n",
   3.273 +		    intpose ?  "interpose" : "hardware",
   3.274 +		    mci->mcinj_cpunr, smp_processor_id(),
   3.275 +		    (unsigned long long)msr->reg,
   3.276 +		    (unsigned long long)msr->value);
   3.277 +
   3.278 +		if (intpose)
   3.279 +			intpose_add(mci->mcinj_cpunr, msr->reg, msr->value);
   3.280 +		else
   3.281 +			wrmsrl(msr->reg, msr->value);
   3.282 +	}
   3.283 +
   3.284 +	if (mci->mcinj_flags & _MC_MSRINJ_F_REQ_HWCR_WREN)
   3.285 +		x86_mc_hwcr_wren_restore(hwcr);
   3.286 +}
   3.287 +
   3.288 +/*ARGSUSED*/
   3.289 +static void x86_mc_mceinject(void *data)
   3.290 +{
   3.291 +	printk("Simulating #MC on cpu %d\n", smp_processor_id());
   3.292 +	__asm__ __volatile__("int $0x12");
   3.293 +}
   3.294 +
   3.295  #if BITS_PER_LONG == 64
   3.296  
   3.297  #define	ID2COOKIE(id)	((mctelem_cookie_t)(id))
   3.298 @@ -797,6 +997,9 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
   3.299  	xen_mc_logical_cpu_t *log_cpus = NULL;
   3.300  	mctelem_cookie_t mctc;
   3.301  	mctelem_class_t which;
   3.302 +	unsigned int target;
   3.303 +	struct xen_mc_msrinject *mc_msrinject;
   3.304 +	struct xen_mc_mceinject *mc_mceinject;
   3.305  
   3.306  	if ( copy_from_guest(op, u_xen_mc, 1) )
   3.307  		return x86_mcerr("do_mca: failed copyin of xen_mc_t", -EFAULT);
   3.308 @@ -901,6 +1104,59 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
   3.309  		}
   3.310  		break;
   3.311  
   3.312 +	case XEN_MC_msrinject:
   3.313 +		if ( !IS_PRIV(v->domain) )
   3.314 +			return x86_mcerr("do_mca inject", -EPERM);
   3.315 +
   3.316 +		if (nr_mce_banks == 0)
   3.317 +			return x86_mcerr("do_mca inject", -ENODEV);
   3.318 +
   3.319 +		mc_msrinject = &op->u.mc_msrinject;
   3.320 +		target = mc_msrinject->mcinj_cpunr;
   3.321 +
   3.322 +		if (target >= NR_CPUS)
   3.323 +			return x86_mcerr("do_mca inject: bad target", -EINVAL);
   3.324 +
   3.325 +		if (!cpu_isset(target, cpu_online_map))
   3.326 +			return x86_mcerr("do_mca inject: target offline",
   3.327 +			    -EINVAL);
   3.328 +
   3.329 +		if (mc_msrinject->mcinj_count == 0)
   3.330 +			return 0;
   3.331 +
   3.332 +		if (!x86_mc_msrinject_verify(mc_msrinject))
   3.333 +			return x86_mcerr("do_mca inject: illegal MSR", -EINVAL);
   3.334 +
   3.335 +		add_taint(TAINT_ERROR_INJECT);
   3.336 +
   3.337 +		on_selected_cpus(cpumask_of_cpu(target),
   3.338 +		    x86_mc_msrinject, mc_msrinject, 1, 1);
   3.339 +
   3.340 +		break;
   3.341 +
   3.342 +	case XEN_MC_mceinject:
   3.343 +		if ( !IS_PRIV(v->domain) )
   3.344 +			return x86_mcerr("do_mca #MC", -EPERM);
   3.345 +
   3.346 +		if (nr_mce_banks == 0)
   3.347 +			return x86_mcerr("do_mca #MC", -ENODEV);
   3.348 +
   3.349 +		mc_mceinject = &op->u.mc_mceinject;
   3.350 +		target = mc_mceinject->mceinj_cpunr;
   3.351 +
   3.352 +		if (target >= NR_CPUS)
   3.353 +			return x86_mcerr("do_mca #MC: bad target", -EINVAL);
   3.354 +		       
   3.355 +		if (!cpu_isset(target, cpu_online_map))
   3.356 +			return x86_mcerr("do_mca #MC: target offline", -EINVAL);
   3.357 +
   3.358 +		add_taint(TAINT_ERROR_INJECT);
   3.359 +
   3.360 +		on_selected_cpus(cpumask_of_cpu(target),
   3.361 +		    x86_mc_mceinject, mc_mceinject, 1, 1);
   3.362 +
   3.363 +		break;
   3.364 +
   3.365  	default:
   3.366  		return x86_mcerr("do_mca: bad command", -EINVAL);
   3.367  	}
     4.1 --- a/xen/arch/x86/cpu/mcheck/mce.h	Tue Mar 17 14:22:50 2009 +0000
     4.2 +++ b/xen/arch/x86/cpu/mcheck/mce.h	Tue Mar 17 14:24:08 2009 +0000
     4.3 @@ -42,6 +42,23 @@ extern void x86_mce_vector_register(x86_
     4.4   * via x86_mce_vector_register. */
     4.5  extern void mcheck_cmn_handler(struct cpu_user_regs *, long, cpu_banks_t);
     4.6  
     4.7 +/* Read an MSR, checking for an interposed value first */
     4.8 +extern struct intpose_ent *intpose_lookup(unsigned int, uint64_t,
     4.9 +    uint64_t *);
    4.10 +extern void intpose_inval(unsigned int, uint64_t);
    4.11 +
    4.12 +#define mca_rdmsrl(msr, var) do { \
    4.13 +       if (intpose_lookup(smp_processor_id(), msr, &var) == NULL) \
    4.14 +               rdmsrl(msr, var); \
    4.15 +} while (0)
    4.16 +
    4.17 +/* Write an MSR, invalidating any interposed value */
    4.18 +#define        mca_wrmsrl(msr, val) do { \
    4.19 +       intpose_inval(smp_processor_id(), msr); \
    4.20 +       wrmsrl(msr, val); \
    4.21 +} while (0)
    4.22 +
    4.23 +
    4.24  /* Utility function to "logout" all architectural MCA telemetry from the MCA
    4.25   * banks of the current processor.  A cookie is returned which may be
    4.26   * uses to reference the data so logged (the cookie can be NULL if
     5.1 --- a/xen/include/public/arch-x86/xen-mca.h	Tue Mar 17 14:22:50 2009 +0000
     5.2 +++ b/xen/include/public/arch-x86/xen-mca.h	Tue Mar 17 14:24:08 2009 +0000
     5.3 @@ -324,10 +324,31 @@ struct xen_mc_physcpuinfo {
     5.4  	XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info;
     5.5  };
     5.6  
     5.7 +#define XEN_MC_msrinject    4
     5.8 +#define MC_MSRINJ_MAXMSRS       8
     5.9 +struct xen_mc_msrinject {
    5.10 +       /* IN */
    5.11 +	unsigned int mcinj_cpunr;       /* target processor id */
    5.12 +	uint32_t mcinj_flags;           /* see MC_MSRINJ_F_* below */
    5.13 +	uint32_t mcinj_count;           /* 0 .. count-1 in array are valid */
    5.14 +	uint32_t mcinj_pad0;
    5.15 +	struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS];
    5.16 +};
    5.17 +
    5.18 +/* Flags for mcinj_flags above; bits 16-31 are reserved */
    5.19 +#define MC_MSRINJ_F_INTERPOSE   0x1
    5.20 +
    5.21 +#define XEN_MC_mceinject    5
    5.22 +struct xen_mc_mceinject {
    5.23 +	unsigned int mceinj_cpunr;      /* target processor id */
    5.24 +};
    5.25 +
    5.26  typedef union {
    5.27      struct xen_mc_fetch        mc_fetch;
    5.28      struct xen_mc_notifydomain mc_notifydomain;
    5.29      struct xen_mc_physcpuinfo  mc_physcpuinfo;
    5.30 +    struct xen_mc_msrinject    mc_msrinject;
    5.31 +    struct xen_mc_mceinject    mc_mceinject;
    5.32  } xen_mc_arg_t;
    5.33  
    5.34  struct xen_mc {
     6.1 --- a/xen/include/xen/lib.h	Tue Mar 17 14:22:50 2009 +0000
     6.2 +++ b/xen/include/xen/lib.h	Tue Mar 17 14:24:08 2009 +0000
     6.3 @@ -95,6 +95,7 @@ unsigned long long parse_size_and_unit(c
     6.4  #define TAINT_MACHINE_CHECK             (1<<1)
     6.5  #define TAINT_BAD_PAGE                  (1<<2)
     6.6  #define TAINT_SYNC_CONSOLE              (1<<3)
     6.7 +#define TAINT_ERROR_INJECT              (1<<4)
     6.8  extern int tainted;
     6.9  #define TAINT_STRING_MAX_LEN            20
    6.10  extern char *print_tainted(char *str);