]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
perf/arm_pmuv3: Add PMUv3.9 per counter EL0 access control
authorRob Herring (Arm) <robh@kernel.org>
Wed, 2 Oct 2024 18:43:24 +0000 (13:43 -0500)
committerWill Deacon <will@kernel.org>
Mon, 28 Oct 2024 17:27:15 +0000 (17:27 +0000)
Armv8.9/9.4 PMUv3.9 adds per counter EL0 access controls. Per counter
access is enabled with the UEN bit in PMUSERENR_EL1 register. Individual
counters are enabled/disabled in the PMUACR_EL1 register. When UEN is
set, the CR/ER bits control EL0 write access and must be set to disable
write access.

With the access controls, the clearing of unused counters can be
skipped.

KVM also configures PMUSERENR_EL1 in order to trap to EL2. UEN does not
need to be set for it since only PMUv3.5 is exposed to guests.

Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
Link: https://lore.kernel.org/r/20241002184326.1105499-1-robh@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm/include/asm/arm_pmuv3.h
arch/arm64/include/asm/arm_pmuv3.h
arch/arm64/tools/sysreg
drivers/perf/arm_pmuv3.c
include/linux/perf/arm_pmuv3.h

index f63ba8986b248881bd505974b77c935d53c4606a..d242b5e1ca0d98044d831780318ead10f935f1bb 100644 (file)
@@ -231,6 +231,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
 #define ARMV8_PMU_DFR_VER_V3P1      0x4
 #define ARMV8_PMU_DFR_VER_V3P4      0x5
 #define ARMV8_PMU_DFR_VER_V3P5      0x6
+#define ARMV8_PMU_DFR_VER_V3P9      0x9
 #define ARMV8_PMU_DFR_VER_IMP_DEF   0xF
 
 static inline bool pmuv3_implemented(int pmuver)
@@ -249,6 +250,11 @@ static inline bool is_pmuv3p5(int pmuver)
        return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
 }
 
+static inline bool is_pmuv3p9(int pmuver)
+{
+       return pmuver >= ARMV8_PMU_DFR_VER_V3P9;
+}
+
 static inline u64 read_pmceid0(void)
 {
        u64 val = read_sysreg(PMCEID0);
index 468a049bc63b59f75b6aa2e828021d8bb6b9cf6a..8a777dec8d88ab78b1a5eb2502c521780c25cdfb 100644 (file)
@@ -152,6 +152,11 @@ static inline void write_pmuserenr(u32 val)
        write_sysreg(val, pmuserenr_el0);
 }
 
+static inline void write_pmuacr(u64 val)
+{
+       write_sysreg_s(val, SYS_PMUACR_EL1);
+}
+
 static inline u64 read_pmceid0(void)
 {
        return read_sysreg(pmceid0_el0);
@@ -178,4 +183,9 @@ static inline bool is_pmuv3p5(int pmuver)
        return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;
 }
 
+static inline bool is_pmuv3p9(int pmuver)
+{
+       return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P9;
+}
+
 #endif
index 8d637ac4b7c6b9c266156b4a8518d8d64c555461..74fb5af91d4f384a73e4c7664365be4c3db38e0b 100644 (file)
@@ -1238,6 +1238,7 @@ UnsignedEnum      11:8    PMUVer
        0b0110  V3P5
        0b0111  V3P7
        0b1000  V3P8
+       0b1001  V3P9
        0b1111  IMP_DEF
 EndEnum
 UnsignedEnum   7:4     TraceVer
@@ -2178,6 +2179,13 @@ Field    4       P
 Field  3:0     ALIGN
 EndSysreg
 
+Sysreg PMUACR_EL1      3       0       9       14      4
+Res0   63:33
+Field  32      F0
+Field  31      C
+Field  30:0    P
+EndSysreg
+
 Sysreg PMSELR_EL0      3       3       9       12      5
 Res0   63:5
 Field  4:0     SEL
index 0afe02f879b45a8600b9cea1bfd88e6a68096a67..bb93d32b86ea1613431f2252452249d26f65e6a8 100644 (file)
@@ -770,18 +770,27 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
        int i;
        struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 
-       /* Clear any unused counters to avoid leaking their contents */
-       for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
-                           ARMPMU_MAX_HWEVENTS) {
-               if (i == ARMV8_PMU_CYCLE_IDX)
-                       write_pmccntr(0);
-               else if (i == ARMV8_PMU_INSTR_IDX)
-                       write_pmicntr(0);
-               else
-                       armv8pmu_write_evcntr(i, 0);
+       if (is_pmuv3p9(cpu_pmu->pmuver)) {
+               u64 mask = 0;
+               for_each_set_bit(i, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) {
+                       if (armv8pmu_event_has_user_read(cpuc->events[i]))
+                               mask |= BIT(i);
+               }
+               write_pmuacr(mask);
+       } else {
+               /* Clear any unused counters to avoid leaking their contents */
+               for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
+                                   ARMPMU_MAX_HWEVENTS) {
+                       if (i == ARMV8_PMU_CYCLE_IDX)
+                               write_pmccntr(0);
+                       else if (i == ARMV8_PMU_INSTR_IDX)
+                               write_pmicntr(0);
+                       else
+                               armv8pmu_write_evcntr(i, 0);
+               }
        }
 
-       update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
+       update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_UEN);
 }
 
 static void armv8pmu_enable_event(struct perf_event *event)
index 3372c1b56486fcfcd2068ff3a49df35a35e8eeb5..d698efba28a27634be81e80a36861c78944a2951 100644 (file)
 #define ARMV8_PMU_USERENR_SW   (1 << 1) /* PMSWINC can be written at EL0 */
 #define ARMV8_PMU_USERENR_CR   (1 << 2) /* Cycle counter can be read at EL0 */
 #define ARMV8_PMU_USERENR_ER   (1 << 3) /* Event counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_UEN  (1 << 4) /* Fine grained per counter access at EL0 */
 /* Mask for writable bits */
 #define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
                                 ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)