ASSERT(!(vpmu->flags & ~VPMU_AVAILABLE) && !vpmu->context);
- if ( !vpmu_available(v) )
+ if ( !vpmu_available(v) || vpmu_mode == XENPMU_MODE_OFF )
return 0;
- switch ( vendor )
+ if ( !vpmu_ops.initialise )
{
- case X86_VENDOR_AMD:
- case X86_VENDOR_HYGON:
- ret = svm_vpmu_initialise(v);
- break;
-
- case X86_VENDOR_INTEL:
- ret = vmx_vpmu_initialise(v);
- break;
-
- default:
if ( vpmu_mode != XENPMU_MODE_OFF )
{
printk(XENLOG_G_WARNING "VPMU: Unknown CPU vendor %d. "
return -EINVAL;
}
- vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
-
+ ret = alternative_call(vpmu_ops.initialise, v);
if ( ret )
+ {
printk(XENLOG_G_WARNING "VPMU: Initialization failed for %pv\n", v);
+ return ret;
+ }
- return ret;
+ vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
+ vpmu_set(vpmu, VPMU_INITIALIZED);
+
+ return 0;
}
static void get_vpmu(struct vcpu *v)
}
}
-static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = {
- .do_wrmsr = amd_vpmu_do_wrmsr,
- .do_rdmsr = amd_vpmu_do_rdmsr,
- .do_interrupt = amd_vpmu_do_interrupt,
- .arch_vpmu_destroy = amd_vpmu_destroy,
- .arch_vpmu_save = amd_vpmu_save,
- .arch_vpmu_load = amd_vpmu_load,
- .arch_vpmu_dump = amd_vpmu_dump
-};
-
-int svm_vpmu_initialise(struct vcpu *v)
+static int svm_vpmu_initialise(struct vcpu *v)
{
struct xen_pmu_amd_ctxt *ctxt;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( vpmu_mode == XENPMU_MODE_OFF )
- return 0;
-
if ( !counters )
return -EINVAL;
offsetof(struct xen_pmu_amd_ctxt, regs));
}
- vpmu_set(vpmu, VPMU_INITIALIZED | VPMU_CONTEXT_ALLOCATED);
+ vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
return 0;
}
+static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = {
+ .initialise = svm_vpmu_initialise,
+ .do_wrmsr = amd_vpmu_do_wrmsr,
+ .do_rdmsr = amd_vpmu_do_rdmsr,
+ .do_interrupt = amd_vpmu_do_interrupt,
+ .arch_vpmu_destroy = amd_vpmu_destroy,
+ .arch_vpmu_save = amd_vpmu_save,
+ .arch_vpmu_load = amd_vpmu_load,
+ .arch_vpmu_dump = amd_vpmu_dump,
+};
+
static const struct arch_vpmu_ops *__init common_init(void)
{
unsigned int i;
vpmu_clear(vpmu);
}
-static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = {
- .do_wrmsr = core2_vpmu_do_wrmsr,
- .do_rdmsr = core2_vpmu_do_rdmsr,
- .do_interrupt = core2_vpmu_do_interrupt,
- .arch_vpmu_destroy = core2_vpmu_destroy,
- .arch_vpmu_save = core2_vpmu_save,
- .arch_vpmu_load = core2_vpmu_load,
- .arch_vpmu_dump = core2_vpmu_dump
-};
-
-int vmx_vpmu_initialise(struct vcpu *v)
+static int vmx_vpmu_initialise(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
u64 msr_content;
static bool_t ds_warned;
- if ( vpmu_mode == XENPMU_MODE_OFF )
- return 0;
-
if ( v->domain->arch.cpuid->basic.pmu_version <= 1 ||
v->domain->arch.cpuid->basic.pmu_version >= 6 )
return -EINVAL;
if ( is_pv_vcpu(v) && !core2_vpmu_alloc_resource(v) )
return -EIO;
- vpmu_set(vpmu, VPMU_INITIALIZED);
-
return 0;
}
+static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = {
+ .initialise = vmx_vpmu_initialise,
+ .do_wrmsr = core2_vpmu_do_wrmsr,
+ .do_rdmsr = core2_vpmu_do_rdmsr,
+ .do_interrupt = core2_vpmu_do_interrupt,
+ .arch_vpmu_destroy = core2_vpmu_destroy,
+ .arch_vpmu_save = core2_vpmu_save,
+ .arch_vpmu_load = core2_vpmu_load,
+ .arch_vpmu_dump = core2_vpmu_dump,
+};
+
const struct arch_vpmu_ops *__init core2_vpmu_init(void)
{
unsigned int version = 0;
/* Arch specific operations shared by all vpmus */
struct arch_vpmu_ops {
+ int (*initialise)(struct vcpu *v);
int (*do_wrmsr)(unsigned int msr, uint64_t msr_content,
uint64_t supported);
int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
};
const struct arch_vpmu_ops *core2_vpmu_init(void);
-int vmx_vpmu_initialise(struct vcpu *);
const struct arch_vpmu_ops *amd_vpmu_init(void);
const struct arch_vpmu_ops *hygon_vpmu_init(void);
-int svm_vpmu_initialise(struct vcpu *);
struct vpmu_struct {
u32 flags;