struct msr_domain_policy __read_mostly hvm_max_msr_domain_policy,
__read_mostly pv_max_msr_domain_policy;
+struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy,
+ __read_mostly pv_max_msr_vcpu_policy;
+
static void __init calculate_hvm_max_policy(void)
{
struct msr_domain_policy *dp = &hvm_max_msr_domain_policy;
+ struct msr_vcpu_policy *vp = &hvm_max_msr_vcpu_policy;
if ( !hvm_enabled )
return;
dp->plaform_info.available = true;
dp->plaform_info.cpuid_faulting = true;
}
+
+ /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
+ vp->misc_features_enables.available = dp->plaform_info.available;
}
static void __init calculate_pv_max_policy(void)
{
struct msr_domain_policy *dp = &pv_max_msr_domain_policy;
+ struct msr_vcpu_policy *vp = &pv_max_msr_vcpu_policy;
/* 0x000000ce MSR_INTEL_PLATFORM_INFO */
if ( cpu_has_cpuid_faulting )
dp->plaform_info.available = true;
dp->plaform_info.cpuid_faulting = true;
}
+
+ /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
+ vp->misc_features_enables.available = dp->plaform_info.available;
}
void __init init_guest_msr_policy(void)
return 0;
}
+int init_vcpu_msr_policy(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ struct msr_vcpu_policy *vp;
+
+ vp = xmalloc(struct msr_vcpu_policy);
+
+ if ( !vp )
+ return -ENOMEM;
+
+ *vp = is_pv_domain(d) ? pv_max_msr_vcpu_policy :
+ hvm_max_msr_vcpu_policy;
+
+ /* See comment in intel_ctxt_switch_levelling() */
+ if ( is_control_domain(d) )
+ vp->misc_features_enables.available = false;
+
+ v->arch.msr = vp;
+
+ return 0;
+}
+
/*
* Local variables:
* mode: C
} plaform_info;
};
+/* MSR policy object for per-vCPU MSRs */
+struct msr_vcpu_policy
+{
+ /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
+ struct {
+ bool available; /* This MSR is non-architectural */
+ bool cpuid_faulting;
+ } misc_features_enables;
+};
+
void init_guest_msr_policy(void);
int init_domain_msr_policy(struct domain *d);
+int init_vcpu_msr_policy(struct vcpu *v);
#endif /* !__ASSEMBLY__ */