bool_t __read_mostly mce_broadcast = 0;
bool_t is_mc_panic;
unsigned int __read_mostly nr_mce_banks;
-int __read_mostly firstbank;
+unsigned int __read_mostly firstbank;
static void intpose_init(void);
static void mcinfo_clear(struct mc_info *);
* Check if bank 0 is usable for MCE. It isn't for AMD K7,
* and Intel P6 family before model 0x1a.
*/
-int mce_firstbank(struct cpuinfo_x86 *c)
+unsigned int mce_firstbank(struct cpuinfo_x86 *c)
{
if (c->x86 == 6) {
if (c->x86_vendor == X86_VENDOR_AMD)
int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn);
u64 mce_cap_init(void);
-extern int firstbank;
+extern unsigned int firstbank;
int intel_mce_rdmsr(uint32_t msr, uint64_t *val);
int intel_mce_wrmsr(uint32_t msr, uint64_t val);
struct mcinfo_global *mig, struct mc_info *mi);
int mce_available(struct cpuinfo_x86 *c);
-int mce_firstbank(struct cpuinfo_x86 *c);
+unsigned int mce_firstbank(struct cpuinfo_x86 *c);
/* Helper functions used for collecting error telemetry */
struct mc_info *x86_mcinfo_getptr(void);
void mc_panic(char *s);
return -ENOMEM;
}
memset(dom_vmce(d)->mci_ctl, ~0,
- sizeof(dom_vmce(d)->mci_ctl));
+ nr_mce_banks * sizeof(*dom_vmce(d)->mci_ctl));
dom_vmce(d)->mcg_status = 0x0;
dom_vmce(d)->mcg_cap = g_mcg_cap;
int vmce_init(struct cpuinfo_x86 *c)
{
u64 value;
- int i;
+ unsigned int i;
if ( !h_mci_ctrl )
{
return -ENOMEM;
}
/* Don't care banks before firstbank */
- memset(h_mci_ctrl, 0xff, sizeof(h_mci_ctrl));
+ memset(h_mci_ctrl, ~0,
+ min(firstbank, nr_mce_banks) * sizeof(*h_mci_ctrl));
for (i = firstbank; i < nr_mce_banks; i++)
rdmsrl(MSR_IA32_MCx_CTL(i), h_mci_ctrl[i]);
}
- if (g_mcg_cap & MCG_CTL_P)
- rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl);
-
rdmsrl(MSR_IA32_MCG_CAP, value);
/* For Guest vMCE usage */
g_mcg_cap = value & ~MCG_CMCI_P;
+ if (value & MCG_CTL_P)
+ rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl);
return 0;
}