]> www.infradead.org Git - users/hch/misc.git/commitdiff
perf stat: Move create_perf_stat_counter() to builtin-stat.c
authorIan Rogers <irogers@google.com>
Thu, 2 Oct 2025 22:07:26 +0000 (15:07 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Fri, 3 Oct 2025 19:49:51 +0000 (16:49 -0300)
The function create_perf_stat_counter is only used in builtin-stat.c
and contains logic about retrying events specific to
builtin-stat.c.

Move the code to builtin-stat.c to tidy this up.

Reviewed-by: James Clark <james.clark@linaro.org>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chun-Tse Shao <ctshao@google.com>
Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Falcon <thomas.falcon@intel.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-stat.c
tools/perf/util/stat.c
tools/perf/util/stat.h

index ab567919b89a2b4bb48d011e00b97318e7ab4d27..75b9979c6c05ca7b5b02df6f75c79fc8eee81b28 100644 (file)
@@ -676,6 +676,62 @@ static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
        return COUNTER_FATAL;
 }
 
+static int create_perf_stat_counter(struct evsel *evsel,
+                                   struct perf_stat_config *config,
+                                   int cpu_map_idx)
+{
+       struct perf_event_attr *attr = &evsel->core.attr;
+       struct evsel *leader = evsel__leader(evsel);
+
+       /* Reset supported flag as creating a stat counter is retried. */
+       attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+                           PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+       /*
+        * The event is part of non trivial group, let's enable
+        * the group read (for leader) and ID retrieval for all
+        * members.
+        */
+       if (leader->core.nr_members > 1)
+               attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
+
+       attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
+
+       /*
+        * Some events get initialized with sample_(period/type) set,
+        * like tracepoints. Clear it up for counting.
+        */
+       attr->sample_period = 0;
+
+       if (config->identifier)
+               attr->sample_type = PERF_SAMPLE_IDENTIFIER;
+
+       if (config->all_user) {
+               attr->exclude_kernel = 1;
+               attr->exclude_user   = 0;
+       }
+
+       if (config->all_kernel) {
+               attr->exclude_kernel = 0;
+               attr->exclude_user   = 1;
+       }
+
+       /*
+        * Disabling all counters initially, they will be enabled
+        * either manually by us or by kernel via enable_on_exec
+        * set later.
+        */
+       if (evsel__is_group_leader(evsel)) {
+               attr->disabled = 1;
+
+               if (target__enable_on_exec(&target))
+                       attr->enable_on_exec = 1;
+       }
+
+       return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx,
+                                             evsel->core.threads);
+}
+
 static int __run_perf_stat(int argc, const char **argv, int run_idx)
 {
        int interval = stat_config.interval;
@@ -736,7 +792,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
                if (evsel__is_bperf(counter))
                        continue;
 try_again:
-               if (create_perf_stat_counter(counter, &stat_config, &target,
+               if (create_perf_stat_counter(counter, &stat_config,
                                             evlist_cpu_itr.cpu_map_idx) < 0) {
 
                        /*
@@ -794,7 +850,7 @@ try_again:
                                continue;
 try_again_reset:
                        pr_debug2("reopening weak %s\n", evsel__name(counter));
-                       if (create_perf_stat_counter(counter, &stat_config, &target,
+                       if (create_perf_stat_counter(counter, &stat_config,
                                                     evlist_cpu_itr.cpu_map_idx) < 0) {
 
                                switch (stat_handle_error(counter, errno)) {
index 50b1a92d16df6af26bbf7d3bd7b06302a29b361d..101ed6c497bcaebf08bdf85dd5900a34c5a2b89c 100644 (file)
@@ -716,59 +716,3 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
 
        return ret;
 }
-
-int create_perf_stat_counter(struct evsel *evsel,
-                            struct perf_stat_config *config,
-                            struct target *target,
-                            int cpu_map_idx)
-{
-       struct perf_event_attr *attr = &evsel->core.attr;
-       struct evsel *leader = evsel__leader(evsel);
-
-       attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
-                           PERF_FORMAT_TOTAL_TIME_RUNNING;
-
-       /*
-        * The event is part of non trivial group, let's enable
-        * the group read (for leader) and ID retrieval for all
-        * members.
-        */
-       if (leader->core.nr_members > 1)
-               attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
-
-       attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
-
-       /*
-        * Some events get initialized with sample_(period/type) set,
-        * like tracepoints. Clear it up for counting.
-        */
-       attr->sample_period = 0;
-
-       if (config->identifier)
-               attr->sample_type = PERF_SAMPLE_IDENTIFIER;
-
-       if (config->all_user) {
-               attr->exclude_kernel = 1;
-               attr->exclude_user   = 0;
-       }
-
-       if (config->all_kernel) {
-               attr->exclude_kernel = 0;
-               attr->exclude_user   = 1;
-       }
-
-       /*
-        * Disabling all counters initially, they will be enabled
-        * either manually by us or by kernel via enable_on_exec
-        * set later.
-        */
-       if (evsel__is_group_leader(evsel)) {
-               attr->disabled = 1;
-
-               if (target__enable_on_exec(target))
-                       attr->enable_on_exec = 1;
-       }
-
-       return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx,
-                                             evsel->core.threads);
-}
index 4b0f14ae4e5f4fbc1a387f08b420917d6be73b96..34f30a295f8917d85cbdd02e5d253f7bb2f7364b 100644 (file)
@@ -223,10 +223,6 @@ size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
 
-int create_perf_stat_counter(struct evsel *evsel,
-                            struct perf_stat_config *config,
-                            struct target *target,
-                            int cpu_map_idx);
 void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config,
                            struct target *_target, struct timespec *ts, int argc, const char **argv);