]> xenbits.xensource.com Git - xen.git/commitdiff
x86/vPMU: invoke <vendor>_vpmu_initialise() through a hook as well
authorJan Beulich <jbeulich@suse.com>
Fri, 3 Dec 2021 10:21:14 +0000 (11:21 +0100)
committerJan Beulich <jbeulich@suse.com>
Fri, 3 Dec 2021 10:21:14 +0000 (11:21 +0100)
I see little point in having an open-coded switch() statement to achieve
the same; like other vendor-specific operations the function can be
supplied in the respective ops structure instances.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/cpu/vpmu.c
xen/arch/x86/cpu/vpmu_amd.c
xen/arch/x86/cpu/vpmu_intel.c
xen/include/asm-x86/vpmu.h

index eacf12c48024360ae6b12f443acb296e8e11119d..64a8f45c16452bd086dc7688da209238a68f1d4b 100644 (file)
@@ -455,21 +455,11 @@ static int vpmu_arch_initialise(struct vcpu *v)
 
     ASSERT(!(vpmu->flags & ~VPMU_AVAILABLE) && !vpmu->context);
 
-    if ( !vpmu_available(v) )
+    if ( !vpmu_available(v) || vpmu_mode == XENPMU_MODE_OFF )
         return 0;
 
-    switch ( vendor )
+    if ( !vpmu_ops.initialise )
     {
-    case X86_VENDOR_AMD:
-    case X86_VENDOR_HYGON:
-        ret = svm_vpmu_initialise(v);
-        break;
-
-    case X86_VENDOR_INTEL:
-        ret = vmx_vpmu_initialise(v);
-        break;
-
-    default:
         if ( vpmu_mode != XENPMU_MODE_OFF )
         {
             printk(XENLOG_G_WARNING "VPMU: Unknown CPU vendor %d. "
@@ -480,12 +470,17 @@ static int vpmu_arch_initialise(struct vcpu *v)
         return -EINVAL;
     }
 
-    vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
-
+    ret = alternative_call(vpmu_ops.initialise, v);
     if ( ret )
+    {
         printk(XENLOG_G_WARNING "VPMU: Initialization failed for %pv\n", v);
+        return ret;
+    }
 
-    return ret;
+    vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
+    vpmu_set(vpmu, VPMU_INITIALIZED);
+
+    return 0;
 }
 
 static void get_vpmu(struct vcpu *v)
index 629a55f41155d365b62175bcbcc749eb8614dbf4..5bb42276624b4ae76113bca1c418f3e380f0a71d 100644 (file)
@@ -483,24 +483,11 @@ static void amd_vpmu_dump(const struct vcpu *v)
     }
 }
 
-static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = {
-    .do_wrmsr = amd_vpmu_do_wrmsr,
-    .do_rdmsr = amd_vpmu_do_rdmsr,
-    .do_interrupt = amd_vpmu_do_interrupt,
-    .arch_vpmu_destroy = amd_vpmu_destroy,
-    .arch_vpmu_save = amd_vpmu_save,
-    .arch_vpmu_load = amd_vpmu_load,
-    .arch_vpmu_dump = amd_vpmu_dump
-};
-
-int svm_vpmu_initialise(struct vcpu *v)
+static int svm_vpmu_initialise(struct vcpu *v)
 {
     struct xen_pmu_amd_ctxt *ctxt;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( vpmu_mode == XENPMU_MODE_OFF )
-        return 0;
-
     if ( !counters )
         return -EINVAL;
 
@@ -529,11 +516,22 @@ int svm_vpmu_initialise(struct vcpu *v)
                offsetof(struct xen_pmu_amd_ctxt, regs));
     }
 
-    vpmu_set(vpmu, VPMU_INITIALIZED | VPMU_CONTEXT_ALLOCATED);
+    vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
 
     return 0;
 }
 
+static const struct arch_vpmu_ops __initconstrel amd_vpmu_ops = {
+    .initialise = svm_vpmu_initialise,
+    .do_wrmsr = amd_vpmu_do_wrmsr,
+    .do_rdmsr = amd_vpmu_do_rdmsr,
+    .do_interrupt = amd_vpmu_do_interrupt,
+    .arch_vpmu_destroy = amd_vpmu_destroy,
+    .arch_vpmu_save = amd_vpmu_save,
+    .arch_vpmu_load = amd_vpmu_load,
+    .arch_vpmu_dump = amd_vpmu_dump,
+};
+
 static const struct arch_vpmu_ops *__init common_init(void)
 {
     unsigned int i;
index 75d66899842e04c6ff73d74801c35f56fe11f7bf..c44e81c756c82c2efd7a4f618fdb617c5c7e27ed 100644 (file)
@@ -819,25 +819,12 @@ static void core2_vpmu_destroy(struct vcpu *v)
     vpmu_clear(vpmu);
 }
 
-static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = {
-    .do_wrmsr = core2_vpmu_do_wrmsr,
-    .do_rdmsr = core2_vpmu_do_rdmsr,
-    .do_interrupt = core2_vpmu_do_interrupt,
-    .arch_vpmu_destroy = core2_vpmu_destroy,
-    .arch_vpmu_save = core2_vpmu_save,
-    .arch_vpmu_load = core2_vpmu_load,
-    .arch_vpmu_dump = core2_vpmu_dump
-};
-
-int vmx_vpmu_initialise(struct vcpu *v)
+static int vmx_vpmu_initialise(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     u64 msr_content;
     static bool_t ds_warned;
 
-    if ( vpmu_mode == XENPMU_MODE_OFF )
-        return 0;
-
     if ( v->domain->arch.cpuid->basic.pmu_version <= 1 ||
          v->domain->arch.cpuid->basic.pmu_version >= 6 )
         return -EINVAL;
@@ -893,11 +880,20 @@ int vmx_vpmu_initialise(struct vcpu *v)
     if ( is_pv_vcpu(v) && !core2_vpmu_alloc_resource(v) )
         return -EIO;
 
-    vpmu_set(vpmu, VPMU_INITIALIZED);
-
     return 0;
 }
 
+static const struct arch_vpmu_ops __initconstrel core2_vpmu_ops = {
+    .initialise = vmx_vpmu_initialise,
+    .do_wrmsr = core2_vpmu_do_wrmsr,
+    .do_rdmsr = core2_vpmu_do_rdmsr,
+    .do_interrupt = core2_vpmu_do_interrupt,
+    .arch_vpmu_destroy = core2_vpmu_destroy,
+    .arch_vpmu_save = core2_vpmu_save,
+    .arch_vpmu_load = core2_vpmu_load,
+    .arch_vpmu_dump = core2_vpmu_dump,
+};
+
 const struct arch_vpmu_ops *__init core2_vpmu_init(void)
 {
     unsigned int version = 0;
index aca143c1518a0c0f14bdbb1f254b839e386bb89a..8cfa2cf599887f9fc32ce49d0d9b1e747a44ace8 100644 (file)
@@ -39,6 +39,7 @@
 
 /* Arch specific operations shared by all vpmus */
 struct arch_vpmu_ops {
+    int (*initialise)(struct vcpu *v);
     int (*do_wrmsr)(unsigned int msr, uint64_t msr_content,
                     uint64_t supported);
     int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
@@ -50,10 +51,8 @@ struct arch_vpmu_ops {
 };
 
 const struct arch_vpmu_ops *core2_vpmu_init(void);
-int vmx_vpmu_initialise(struct vcpu *);
 const struct arch_vpmu_ops *amd_vpmu_init(void);
 const struct arch_vpmu_ops *hygon_vpmu_init(void);
-int svm_vpmu_initialise(struct vcpu *);
 
 struct vpmu_struct {
     u32 flags;