Topdown metric events require grouping with a slots event. In perf
metrics this is currently achieved by metrics adding an unnecessary
"0 * tma_info_thread_slots". New TMA metrics trigger optimizations of
the metric expression that removes the event and breaks the metric due
to the missing but required event. Add a pass immediately before
sorting and fixing parsed events, that insert a slots event if one is
missing. Update test expectations to match this.
Signed-off-by: Ian Rogers <irogers@google.com>
Link: https://lore.kernel.org/r/20250719030517.1990983-15-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
/* Default ordering by insertion index. */
return lhs->core.idx - rhs->core.idx;
}
+
+int arch_evlist__add_required_events(struct list_head *list)
+{
+ struct evsel *pos, *metric_event = NULL;
+ int idx = 0;
+
+ if (!topdown_sys_has_perf_metrics())
+ return 0;
+
+ list_for_each_entry(pos, list, core.node) {
+ if (arch_is_topdown_slots(pos)) {
+ /* Slots event already present, nothing to do. */
+ return 0;
+ }
+ if (metric_event == NULL && arch_is_topdown_metrics(pos))
+ metric_event = pos;
+ idx++;
+ }
+ if (metric_event == NULL) {
+ /* No topdown metric events, nothing to do. */
+ return 0;
+ }
+ return topdown_insert_slots_event(list, idx + 1, metric_event);
+}
return false;
}
+
+/*
+ * Make a copy of the topdown metric event metric_event with the given index but
+ * change its configuration to be a topdown slots event. Copying from
+ * metric_event ensures modifiers are the same.
+ */
+int topdown_insert_slots_event(struct list_head *list, int idx, struct evsel *metric_event)
+{
+ struct evsel *evsel = evsel__new_idx(&metric_event->core.attr, idx);
+
+ if (!evsel)
+ return -ENOMEM;
+
+ evsel->core.attr.config = TOPDOWN_SLOTS;
+ evsel->core.cpus = perf_cpu_map__get(metric_event->core.cpus);
+ evsel->core.pmu_cpus = perf_cpu_map__get(metric_event->core.pmu_cpus);
+ evsel->core.is_pmu_core = true;
+ evsel->pmu = metric_event->pmu;
+ evsel->name = strdup("slots");
+ evsel->precise_max = metric_event->precise_max;
+ evsel->sample_read = metric_event->sample_read;
+ evsel->weak_group = metric_event->weak_group;
+ evsel->bpf_counter = metric_event->bpf_counter;
+ evsel->retire_lat = metric_event->retire_lat;
+ evsel__set_leader(evsel, evsel__leader(metric_event));
+ list_add_tail(&evsel->core.node, list);
+ return 0;
+}
#include <stdbool.h>
struct evsel;
+struct list_head;
bool topdown_sys_has_perf_metrics(void);
bool arch_is_topdown_slots(const struct evsel *evsel);
bool arch_is_topdown_metrics(const struct evsel *evsel);
+int topdown_insert_slots_event(struct list_head *list, int idx, struct evsel *metric_event);
#endif
static int test__checkevent_pmu_events(struct evlist *evlist)
{
- struct evsel *evsel = evlist__first(evlist);
+ struct evsel *evsel;
- TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type ||
- strcmp(evsel->pmu->name, "cpu"));
- TEST_ASSERT_VAL("wrong exclude_user",
- !evsel->core.attr.exclude_user);
- TEST_ASSERT_VAL("wrong exclude_kernel",
- evsel->core.attr.exclude_kernel);
- TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
- TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
- TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+ TEST_ASSERT_VAL("wrong number of entries", 1 <= evlist->core.nr_entries);
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type ||
+ strcmp(evsel->pmu->name, "cpu"));
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
+ TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+ }
return TEST_OK;
}
void evlist__remove(struct evlist *evlist, struct evsel *evsel);
int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs);
+int arch_evlist__add_required_events(struct list_head *list);
int evlist__add_dummy(struct evlist *evlist);
struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide);
return arch_evlist__cmp(lhs, rhs);
}
+int __weak arch_evlist__add_required_events(struct list_head *list __always_unused)
+{
+ return 0;
+}
+
static int parse_events__sort_events_and_fix_groups(struct list_head *list)
{
int idx = 0, force_grouped_idx = -1;
struct evsel *force_grouped_leader = NULL;
bool last_event_was_forced_leader = false;
+ /* On x86 topdown metrics events require a slots event. */
+ ret = arch_evlist__add_required_events(list);
+ if (ret)
+ return ret;
+
/*
* Compute index to insert ungrouped events at. Place them where the
* first ungrouped event appears.