v->arch.vmce.mcg_cap = ctxt->caps;
v->arch.vmce.bank[0].mci_ctl2 = ctxt->mci_ctl2_bank0;
v->arch.vmce.bank[1].mci_ctl2 = ctxt->mci_ctl2_bank1;
+ v->arch.vmce.mcg_ext_ctl = ctxt->mcg_ext_ctl;
return 0;
}
mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_CTL %#"PRIx64"\n", cur, *val);
break;
+ case MSR_IA32_MCG_EXT_CTL:
+ /*
+ * If MCG_LMCE_P is present in guest MSR_IA32_MCG_CAP, the LMCE and LOCK
+ * bits are always set in guest MSR_IA32_FEATURE_CONTROL by Xen, so it
+ * does not need to check them here.
+ */
+ if ( cur->arch.vmce.mcg_cap & MCG_LMCE_P )
+ {
+ *val = cur->arch.vmce.mcg_ext_ctl;
+ mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_EXT_CTL %#"PRIx64"\n",
+ cur, *val);
+ }
+ else
+ {
+ ret = -1;
+ mce_printk(MCE_VERBOSE, "MCE: %pv: rd MCG_EXT_CTL, not supported\n",
+ cur);
+ }
+ break;
+
default:
ret = mce_bank_msr(cur, msr) ? bank_mce_rdmsr(cur, msr, val) : 0;
break;
mce_printk(MCE_VERBOSE, "MCE: %pv: MCG_CAP is r/o\n", cur);
break;
+ case MSR_IA32_MCG_EXT_CTL:
+ if ( (cur->arch.vmce.mcg_cap & MCG_LMCE_P) &&
+ !(val & ~MCG_EXT_CTL_LMCE_EN) )
+ cur->arch.vmce.mcg_ext_ctl = val;
+ else
+ ret = -1;
+ mce_printk(MCE_VERBOSE, "MCE: %pv: wr MCG_EXT_CTL %"PRIx64"%s\n",
+ cur, val, (ret == -1) ? ", not supported" : "");
+ break;
+
default:
ret = mce_bank_msr(cur, msr) ? bank_mce_wrmsr(cur, msr, val) : 0;
break;
struct hvm_vmce_vcpu ctxt = {
.caps = v->arch.vmce.mcg_cap,
.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2,
- .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2
+ .mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2,
+ .mcg_ext_ctl = v->arch.vmce.mcg_ext_ctl,
};
err = hvm_save_entry(VMCE_VCPU, v->vcpu_id, h, &ctxt);