]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
KVM: x86/pmu: Update sample period in pmc_write_counter()
authorSean Christopherson <seanjc@google.com>
Fri, 3 Nov 2023 23:05:40 +0000 (16:05 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 30 Nov 2023 20:52:55 +0000 (12:52 -0800)
Update a PMC's sample period in pmc_write_counter() to deduplicate code
across all callers of pmc_write_counter().  Opportunistically move
pmc_write_counter() into pmc.c now that it's doing more work.  WRMSR isn't
such a hot path that an extra CALL+RET pair will be problematic, and the
order of function definitions needs to be changed anyways, i.e. now is a
convenient time to eat the churn.

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20231103230541.352265-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index c06090196b003b977df035a6acfe3adc8ee51176..3725d001239df1ad74625d0018de03429bb587af 100644 (file)
@@ -161,6 +161,15 @@ static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
        return 1;
 }
 
+static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
+{
+       u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
+
+       if (!sample_period)
+               sample_period = pmc_bitmask(pmc) + 1;
+       return sample_period;
+}
+
 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
                                 bool exclude_user, bool exclude_kernel,
                                 bool intr)
@@ -268,6 +277,24 @@ static void pmc_stop_counter(struct kvm_pmc *pmc)
        }
 }
 
+static void pmc_update_sample_period(struct kvm_pmc *pmc)
+{
+       if (!pmc->perf_event || pmc->is_paused ||
+           !is_sampling_event(pmc->perf_event))
+               return;
+
+       perf_event_period(pmc->perf_event,
+                         get_sample_period(pmc, pmc->counter));
+}
+
+void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
+{
+       pmc->counter += val - pmc_read_counter(pmc);
+       pmc->counter &= pmc_bitmask(pmc);
+       pmc_update_sample_period(pmc);
+}
+EXPORT_SYMBOL_GPL(pmc_write_counter);
+
 static int filter_cmp(const void *pa, const void *pb, u64 mask)
 {
        u64 a = *(u64 *)pa & mask;
index db9a12c0a2ef23badd61ee9f61fc91ff6d5ab9d8..cae85e550f600ebe4d1bb9f6fc14746c0c17ec82 100644 (file)
@@ -74,11 +74,7 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
        return counter & pmc_bitmask(pmc);
 }
 
-static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
-{
-       pmc->counter += val - pmc_read_counter(pmc);
-       pmc->counter &= pmc_bitmask(pmc);
-}
+void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
 
 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
 {
@@ -128,25 +124,6 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
        return NULL;
 }
 
-static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
-{
-       u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
-
-       if (!sample_period)
-               sample_period = pmc_bitmask(pmc) + 1;
-       return sample_period;
-}
-
-static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
-{
-       if (!pmc->perf_event || pmc->is_paused ||
-           !is_sampling_event(pmc->perf_event))
-               return;
-
-       perf_event_period(pmc->perf_event,
-                         get_sample_period(pmc, pmc->counter));
-}
-
 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
index 3fd47de14b38a3ca33ce2212c6d289decc5d7d5e..b6a7ad4d69145096d55e610ef8d789b87c2a5fb0 100644 (file)
@@ -161,7 +161,6 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
        if (pmc) {
                pmc_write_counter(pmc, data);
-               pmc_update_sample_period(pmc);
                return 0;
        }
        /* MSR_EVNTSELn */
index 90c1f7f07e53b07f0c6d9cb92e8f00a6913032c8..a6216c8747291f4c8aeed534117fad1f3808acb8 100644 (file)
@@ -437,11 +437,9 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                            !(msr & MSR_PMC_FULL_WIDTH_BIT))
                                data = (s64)(s32)data;
                        pmc_write_counter(pmc, data);
-                       pmc_update_sample_period(pmc);
                        break;
                } else if ((pmc = get_fixed_pmc(pmu, msr))) {
                        pmc_write_counter(pmc, data);
-                       pmc_update_sample_period(pmc);
                        break;
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        reserved_bits = pmu->reserved_bits;