int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
{
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
unsigned int bank = msr - MSR_IA32_MC0_CTL2;
switch ( msr )
uint32_t subleaf, struct cpuid_leaf *res)
{
const struct domain *d = v->domain;
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
*res = EMPTY_LEAF;
void domain_cpu_policy_changed(struct domain *d)
{
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
struct vcpu *v;
if ( is_pv_domain(d) )
signed int cr0_pg)
{
const struct domain *d = v->domain;
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
if ( value & ~EFER_KNOWN_MASK )
return "Unknown bits set";
/* These bits in CR4 can be set by the guest. */
unsigned long hvm_cr4_guest_valid_bits(const struct domain *d)
{
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
bool mce, vmxe, cet;
/* Logic broken out simply to aid readability below. */
{
struct svm_vcpu *svm = &v->arch.hvm.svm;
struct vmcb_struct *vmcb = svm->vmcb;
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
u32 bitmap = vmcb_get_exception_intercepts(vmcb);
if ( opt_hvm_fep ||
int guest_wrmsr_apic_base(struct vcpu *v, uint64_t value)
{
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
struct vlapic *vlapic = vcpu_vlapic(v);
if ( !has_vlapic(v->domain) )
static void vmx_cpuid_policy_changed(struct vcpu *v)
{
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
int rc = 0;
if ( opt_hvm_fep ||
static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
struct vcpu *v = current;
- const struct cpuid_policy *cp = v->domain->arch.cpuid;
+ const struct cpu_policy *cp = v->domain->arch.cpu_policy;
HVM_DBG_LOG(DBG_LEVEL_MSR, "ecx=%#x, msr_value=%#"PRIx64, msr, msr_content);
{
const struct vcpu *curr = current;
const struct domain *d = v->domain;
- const struct cpuid_policy *cp = d->arch.cpuid;
- const struct msr_policy *mp = d->arch.msr;
+ const struct cpu_policy *cp = d->arch.cpu_policy;
const struct vcpu_msrs *msrs = v->arch.msrs;
int ret = X86EMUL_OKAY;
goto get_reg;
case MSR_INTEL_PLATFORM_INFO:
- *val = mp->platform_info.raw;
+ *val = cp->platform_info.raw;
break;
case MSR_ARCH_CAPABILITIES:
if ( !cp->feat.arch_caps )
goto gp_fault;
- *val = mp->arch_caps.raw;
+ *val = cp->arch_caps.raw;
break;
case MSR_INTEL_MISC_FEATURES_ENABLES:
* separate CPUID features for this functionality, but only set will be
* active.
*/
-uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp)
+uint64_t msr_spec_ctrl_valid_bits(const struct cpu_policy *cp)
{
bool ssbd = cp->feat.ssbd || cp->extd.amd_ssbd;
bool psfd = cp->feat.intel_psfd || cp->extd.psfd;
{
const struct vcpu *curr = current;
struct domain *d = v->domain;
- const struct cpuid_policy *cp = d->arch.cpuid;
- const struct msr_policy *mp = d->arch.msr;
+ const struct cpu_policy *cp = d->arch.cpu_policy;
struct vcpu_msrs *msrs = v->arch.msrs;
int ret = X86EMUL_OKAY;
* for backwards compatiblity, the OS should write 0 to it before
* trying to access the current microcode version.
*/
- if ( d->arch.cpuid->x86_vendor != X86_VENDOR_INTEL || val != 0 )
+ if ( cp->x86_vendor != X86_VENDOR_INTEL || val != 0 )
goto gp_fault;
break;
* to AMD CPUs as well (at least the architectural/CPUID part does).
*/
if ( is_pv_domain(d) ||
- d->arch.cpuid->x86_vendor != X86_VENDOR_AMD )
+ cp->x86_vendor != X86_VENDOR_AMD )
goto gp_fault;
break;
* by any CPUID bit.
*/
if ( is_pv_domain(d) ||
- d->arch.cpuid->x86_vendor != X86_VENDOR_INTEL )
+ cp->x86_vendor != X86_VENDOR_INTEL )
goto gp_fault;
break;
bool old_cpuid_faulting = msrs->misc_features_enables.cpuid_faulting;
rsvd = ~0ull;
- if ( mp->platform_info.cpuid_faulting )
+ if ( cp->platform_info.cpuid_faulting )
rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
if ( val & rsvd )
unsigned long pv_fixup_guest_cr4(const struct vcpu *v, unsigned long cr4)
{
- const struct cpuid_policy *p = v->domain->arch.cpuid;
+ const struct cpu_policy *p = v->domain->arch.cpu_policy;
/* Discard attempts to set guest controllable bits outside of the policy. */
cr4 &= ~((p->basic.tsc ? 0 : X86_CR4_TSD) |
{
struct vcpu *curr = current;
const struct domain *currd = curr->domain;
- const struct cpuid_policy *cp = currd->arch.cpuid;
+ const struct cpu_policy *cp = currd->arch.cpu_policy;
bool vpmu_msr = false, warn = false;
uint64_t tmp;
int ret;
{
struct vcpu *curr = current;
const struct domain *currd = curr->domain;
- const struct cpuid_policy *cp = currd->arch.cpuid;
+ const struct cpu_policy *cp = currd->arch.cpu_policy;
bool vpmu_msr = false;
int ret;
uint32_t subleaf, struct cpuid_leaf *res)
{
const struct domain *d = v->domain;
- const struct cpuid_policy *p = d->arch.cpuid;
+ const struct cpu_policy *p = d->arch.cpu_policy;
uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000;
uint32_t idx = leaf - base;
unsigned int limit = is_viridian_domain(d) ? p->hv2_limit : p->hv_limit;
}
static bool
-_amd_like(const struct cpuid_policy *cp)
+_amd_like(const struct cpu_policy *cp)
{
return cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON);
}
static bool
amd_like(const struct x86_emulate_ctxt *ctxt)
{
- return _amd_like(ctxt->cpuid);
+ return _amd_like(ctxt->cpu_policy);
}
#define vcpu_has_fpu() (ctxt->cpuid->basic.fpu)
struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
- const struct cpuid_policy *cp = ctxt->cpuid;
+ const struct cpu_policy *cp = ctxt->cpu_policy;
enum x86_segment sel_seg = (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr;
struct { uint32_t a, b; } desc, desc_hi = {};
uint8_t dpl, rpl;
}
}
-uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp);
+uint64_t msr_spec_ctrl_valid_bits(const struct cpu_policy *cp);
/* Container object for per-vCPU MSRs */
struct vcpu_msrs
uint8_t x86_vendor;
};
-/* Temporary */
-#define cpuid_policy cpu_policy
-#define msr_policy cpu_policy
-
struct cpu_policy_errors
{
uint32_t leaf, subleaf;