]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
perf record --off-cpu: Dump off-cpu samples in BPF
authorHoward Chu <howardchu95@gmail.com>
Thu, 1 May 2025 02:28:02 +0000 (19:28 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 6 May 2025 00:48:44 +0000 (21:48 -0300)
Collect tid, period, callchain, and cgroup id and dump them when off-cpu
time threshold is reached.

We don't collect the off-cpu time twice (the delta), it's either in
direct samples, or accumulated samples that are dumped at the end of
perf.data.

Suggested-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Howard Chu <howardchu95@gmail.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Gautam Menghani <gautam@linux.ibm.com>
Tested-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20241108204137.2444151-6-howardchu95@gmail.com
Link: https://lore.kernel.org/r/20250501022809.449767-5-howardchu95@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/bpf_skel/off_cpu.bpf.c

index a5002e25b2593e4ba8332fcc316ea6d62dcd51cf..14cd8881f8bb23c6d627f5f285fb6c3f91f3d6de 100644 (file)
 #define MAX_ENTRIES  102400
 
 #define MAX_CPUS  4096
+#define MAX_OFFCPU_LEN 37
+
+// We have a 'struct stack' in vmlinux.h when building with GEN_VMLINUX_H=1
+struct __stack {
+       u64 array[MAX_STACKS];
+};
 
 struct tstamp_data {
        __u32 stack_id;
        __u32 state;
        __u64 timestamp;
+       struct __stack stack;
 };
 
 struct offcpu_key {
@@ -41,6 +48,10 @@ struct {
        __uint(max_entries, MAX_ENTRIES);
 } stacks SEC(".maps");
 
+struct offcpu_data {
+       u64 array[MAX_OFFCPU_LEN];
+};
+
 struct {
        __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
        __uint(key_size, sizeof(int));
@@ -48,6 +59,13 @@ struct {
        __uint(max_entries, MAX_CPUS);
 } offcpu_output SEC(".maps");
 
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct offcpu_data));
+       __uint(max_entries, 1);
+} offcpu_payload SEC(".maps");
+
 struct {
        __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
        __uint(map_flags, BPF_F_NO_PREALLOC);
@@ -106,6 +124,8 @@ const volatile bool uses_cgroup_v1 = false;
 
 int perf_subsys_id = -1;
 
+__u64 offcpu_thresh_ns = 500000000ull;
+
 /*
  * Old kernel used to call it task_struct->state and now it's '__state'.
  * Use BPF CO-RE "ignored suffix rule" to deal with it like below:
@@ -192,6 +212,47 @@ static inline int can_record(struct task_struct *t, int state)
        return 1;
 }
 
+static inline int copy_stack(struct __stack *from, struct offcpu_data *to, int n)
+{
+       int len = 0;
+
+       for (int i = 0; i < MAX_STACKS && from->array[i]; ++i, ++len)
+               to->array[n + 2 + i] = from->array[i];
+
+       return len;
+}
+
+/**
+ * off_cpu_dump - dump off-cpu samples to ring buffer
+ * @data: payload for dumping off-cpu samples
+ * @key: off-cpu data
+ * @stack: stack trace of the task before being scheduled out
+ *
+ * If the threshold of off-cpu time is reached, acquire tid, period, callchain, and cgroup id
+ * information of the task, and dump it as a raw sample to perf ring buffer
+ */
+static int off_cpu_dump(void *ctx, struct offcpu_data *data, struct offcpu_key *key,
+                       struct __stack *stack, __u64 delta)
+{
+       int n = 0, len = 0;
+
+       data->array[n++] = (u64)key->tgid << 32 | key->pid;
+       data->array[n++] = delta;
+
+       /* data->array[n] is callchain->nr (updated later) */
+       data->array[n + 1] = PERF_CONTEXT_USER;
+       data->array[n + 2] = 0;
+       len = copy_stack(stack, data, n);
+
+       /* update length of callchain */
+       data->array[n] = len + 1;
+       n += len + 2;
+
+       data->array[n++] = key->cgroup_id;
+
+       return bpf_perf_event_output(ctx, &offcpu_output, BPF_F_CURRENT_CPU, data, n * sizeof(u64));
+}
+
 static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
                        struct task_struct *next, int state)
 {
@@ -216,6 +277,16 @@ static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
        pelem->state = state;
        pelem->stack_id = stack_id;
 
+       /*
+        * If stacks are successfully collected by bpf_get_stackid(), collect them once more
+        * in task_storage for direct off-cpu sample dumping
+        */
+       if (stack_id > 0 && bpf_get_stack(ctx, &pelem->stack, MAX_STACKS * sizeof(u64), BPF_F_USER_STACK)) {
+               /*
+                * This empty if block is used to avoid 'result unused warning' from bpf_get_stack().
+                * If the collection fails, continue with the logic for the next task.
+                */
+       }
 next:
        pelem = bpf_task_storage_get(&tstamp, next, NULL, 0);
 
@@ -230,11 +301,19 @@ next:
                __u64 delta = ts - pelem->timestamp;
                __u64 *total;
 
-               total = bpf_map_lookup_elem(&off_cpu, &key);
-               if (total)
-                       *total += delta;
-               else
-                       bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
+               if (delta >= offcpu_thresh_ns) {
+                       int zero = 0;
+                       struct offcpu_data *data = bpf_map_lookup_elem(&offcpu_payload, &zero);
+
+                       if (data)
+                               off_cpu_dump(ctx, data, &key, &pelem->stack, delta);
+               } else {
+                       total = bpf_map_lookup_elem(&off_cpu, &key);
+                       if (total)
+                               *total += delta;
+                       else
+                               bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
+               }
 
                /* prevent to reuse the timestamp later */
                pelem->timestamp = 0;