#define is_pmu_enabled(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_EN_SHIFT))
#define set_guest_mode(msr) ((msr) |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
#define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH - 1))))
+#define is_svm_vcpu(v) (IS_ENABLED(CONFIG_AMD_SVM) && is_hvm_vcpu(v))
static unsigned int __read_mostly num_counters;
static const u32 __read_mostly *counters;
context_save(v);
- if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_svm_vcpu(v) &&
is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
return -EINVAL;
/* For all counters, enable guest only mode for HVM guest */
- if ( is_hvm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
+ if ( is_svm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
!is_guest_mode(msr_content) )
{
set_guest_mode(msr_content);
return 0;
vpmu_set(vpmu, VPMU_RUNNING);
- if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+ if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_set_msr_bitmap(v);
}
(is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
{
vpmu_reset(vpmu, VPMU_RUNNING);
- if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+ if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
release_pmu_ownership(PMU_OWNER_HVM);
}
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+ if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
xfree(vpmu->context);
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
static bool __read_mostly full_width_write;
+#define is_vmx_vcpu(v) (IS_ENABLED(CONFIG_INTEL_VMX) && is_hvm_vcpu(v))
+
/*
* MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
* counters. 4 bits for every counter.
rdmsrl(MSR_P6_EVNTSEL(i), xen_pmu_cntr_pair[i].control);
}
- if ( !is_hvm_vcpu(v) )
+ if ( !is_vmx_vcpu(v) )
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
/* Save MSR to private context to make it fork-friendly */
else if ( mem_sharing_enabled(v->domain) )
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !is_hvm_vcpu(v) )
+ if ( !is_vmx_vcpu(v) )
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
__core2_vpmu_save(v);
/* Unset PMU MSR bitmap to trap lazy load. */
- if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_vmx_vcpu(v) &&
cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v);
if ( vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area);
- if ( !is_hvm_vcpu(v) )
+ if ( !is_vmx_vcpu(v) )
{
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl);
core2_vpmu_cxt->global_ovf_ctrl = 0;
}
if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) &&
- !(is_hvm_vcpu(v)
+ !(is_vmx_vcpu(v)
? is_canonical_address(core2_vpmu_cxt->ds_area)
: __addr_ok(core2_vpmu_cxt->ds_area)) )
return -EINVAL;
if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
return 0;
- if ( is_hvm_vcpu(v) )
+ if ( is_vmx_vcpu(v) )
{
if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 0) )
goto out_err;
__core2_vpmu_load(current);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
- if ( is_hvm_vcpu(current) && cpu_has_vmx_msr_bitmap )
+ if ( is_vmx_vcpu(current) && cpu_has_vmx_msr_bitmap )
core2_vpmu_set_msr_bitmap(current);
}
return 1;
return -EINVAL;
if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
{
- if ( !(is_hvm_vcpu(v) ? is_canonical_address(msr_content)
+ if ( !(is_vmx_vcpu(v) ? is_canonical_address(msr_content)
: __addr_ok(msr_content)) )
{
gdprintk(XENLOG_WARNING,
if ( msr_content & fixed_ctrl_mask )
return -EINVAL;
- if ( is_hvm_vcpu(v) )
+ if ( is_vmx_vcpu(v) )
vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
&core2_vpmu_cxt->global_ctrl);
else
if ( blocked )
return -EINVAL;
- if ( is_hvm_vcpu(v) )
+ if ( is_vmx_vcpu(v) )
vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
&core2_vpmu_cxt->global_ctrl);
else
wrmsrl(msr, msr_content);
else
{
- if ( is_hvm_vcpu(v) )
+ if ( is_vmx_vcpu(v) )
vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
else
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
*msr_content = core2_vpmu_cxt->global_status;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
- if ( is_hvm_vcpu(v) )
+ if ( is_vmx_vcpu(v) )
vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
else
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
vpmu->context = NULL;
xfree(vpmu->priv_context);
vpmu->priv_context = NULL;
- if ( is_hvm_vcpu(v) && cpu_has_vmx_msr_bitmap )
+ if ( is_vmx_vcpu(v) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v);
release_pmu_ownership(PMU_OWNER_HVM);
vpmu_clear(vpmu);