]> xenbits.xensource.com Git - people/pauldu/linux.git/commitdiff
KVM: arm64: Sanitize PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} before first run
authorRaghavendra Rao Ananta <rananta@google.com>
Fri, 20 Oct 2023 21:40:46 +0000 (21:40 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 24 Oct 2023 22:59:30 +0000 (22:59 +0000)
For unimplemented counters, the registers PM{C,I}NTEN{SET,CLR}
and PMOVS{SET,CLR} are expected to have the corresponding bits RAZ.
Hence to ensure correct KVM's PMU emulation, mask out the RES0 bits.
Defer this work to the point that userspace can no longer change the
number of advertised PMCs.

Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231020214053.2144305-7-rananta@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/arm.c
arch/arm64/kvm/pmu-emul.c
include/kvm/arm_pmu.h

index 624e62b2345a1437759e3d52f0dd54ca4463da37..f6307afc5036b5692f35bcbb104e48683b701a82 100644 (file)
@@ -801,7 +801,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
                }
 
                if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
-                       kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
+                       kvm_vcpu_reload_pmu(vcpu);
 
                if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
                        kvm_vcpu_pmu_restore_guest(vcpu);
index 6d4763d7308f25aea43a6f4e8a27910c9ead35e0..6b271c542409ffcdfdb2bd72d5af037b722d6518 100644 (file)
@@ -785,6 +785,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        return val & mask;
 }
 
+void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
+{
+       u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+       kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
+
+       __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
+       __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
+       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
+}
+
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
 {
        if (!kvm_vcpu_has_pmu(vcpu))
index 96e15f4bb18c955a91c5bb17b1715a75a4361b83..abb1e8215ca8d7b236c607c11c9468a07a5cd3af 100644 (file)
@@ -13,7 +13,6 @@
 #define ARMV8_PMU_CYCLE_IDX            (ARMV8_PMU_MAX_COUNTERS - 1)
 
 #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
-
 struct kvm_pmc {
        u8 idx; /* index into the pmu->pmc array */
        struct perf_event *perf_event;
@@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
                                    u64 select_idx);
+void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr);
 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
@@ -171,6 +171,7 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
 static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
 {
        return 0;