From 2d00cab849be5e4c1b2ba82d4fdcfa0af48c340f Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 5 Mar 2025 12:26:39 -0800 Subject: [PATCH] drivers/perf: apple_m1: Provide helper for mapping PMUv3 events Apple M* parts carry some IMP DEF traps for guest accesses to PMUv3 registers, even though the underlying hardware doesn't implement PMUv3. This means it is possible to virtualize PMUv3 for KVM guests. Add a helper for mapping common PMUv3 event IDs onto hardware event IDs, keeping the implementation-specific crud in the PMU driver rather than KVM proper. Populate the pmceid_bitmap based on the supported events so KVM can provide synthetic PMCEID* values to the guest. Tested-by: Janne Grunau Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20250305202641.428114-13-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- drivers/perf/apple_m1_cpu_pmu.c | 35 +++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index d6d4ff6da862..6be703619a97 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -174,6 +175,17 @@ static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_BRANCH_MISSES] = M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC, }; +#define M1_PMUV3_EVENT_MAP(pmuv3_event, m1_event) \ + [ARMV8_PMUV3_PERFCTR_##pmuv3_event] = M1_PMU_PERFCTR_##m1_event + +static const u16 m1_pmu_pmceid_map[ARMV8_PMUV3_MAX_COMMON_EVENTS] = { + [0 ... ARMV8_PMUV3_MAX_COMMON_EVENTS - 1] = HW_OP_UNSUPPORTED, + M1_PMUV3_EVENT_MAP(INST_RETIRED, INST_ALL), + M1_PMUV3_EVENT_MAP(CPU_CYCLES, CORE_ACTIVE_CYCLE), + M1_PMUV3_EVENT_MAP(BR_RETIRED, INST_BRANCH), + M1_PMUV3_EVENT_MAP(BR_MIS_PRED_RETIRED, BRANCH_MISPRED_NONSPEC), +}; + /* sysfs definitions */ static ssize_t m1_pmu_events_sysfs_show(struct device *dev, struct device_attribute *attr, @@ -558,6 +570,26 @@ static int m2_pmu_map_event(struct perf_event *event) return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT); } +static int m1_pmu_map_pmuv3_event(unsigned int eventsel) +{ + u16 m1_event = HW_OP_UNSUPPORTED; + + if (eventsel < ARMV8_PMUV3_MAX_COMMON_EVENTS) + m1_event = m1_pmu_pmceid_map[eventsel]; + + return m1_event == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : m1_event; +} + +static void m1_pmu_init_pmceid(struct arm_pmu *pmu) +{ + unsigned int event; + + for (event = 0; event < ARMV8_PMUV3_MAX_COMMON_EVENTS; event++) { + if (m1_pmu_map_pmuv3_event(event) >= 0) + set_bit(event, pmu->pmceid_bitmap); + } +} + static void m1_pmu_reset(void *info) { int i; @@ -618,6 +650,9 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags) cpu_pmu->reset = m1_pmu_reset; cpu_pmu->set_event_filter = m1_pmu_set_event_filter; + cpu_pmu->map_pmuv3_event = m1_pmu_map_pmuv3_event; + m1_pmu_init_pmceid(cpu_pmu); + bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS); cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group; -- 2.49.0