]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
selftests/bpf: Add tests for cgrp_local_storage with local percpu kptr
authorYonghong Song <yonghong.song@linux.dev>
Sun, 27 Aug 2023 15:28:27 +0000 (08:28 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 8 Sep 2023 15:42:18 +0000 (08:42 -0700)
Add a non-sleepable cgrp_local_storage test with percpu kptr. The
test does allocation of percpu data, assigning values to percpu
data and retrieval of percpu data. The de-allocation of percpu
data is done when the map is freed.

Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20230827152827.2001784-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c [new file with mode: 0644]

index 0fb536822f14a063c624ae862d36398984381a2f..41bf784a4bb3caf1cd39d41485e9c314bf6a8edb 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <test_progs.h>
 #include "percpu_alloc_array.skel.h"
+#include "percpu_alloc_cgrp_local_storage.skel.h"
 
 static void test_array(void)
 {
@@ -69,10 +70,49 @@ out:
        percpu_alloc_array__destroy(skel);
 }
 
+static void test_cgrp_local_storage(void)
+{
+       struct percpu_alloc_cgrp_local_storage *skel;
+       int err, cgroup_fd, prog_fd;
+       LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+       cgroup_fd = test__join_cgroup("/percpu_alloc");
+       if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /percpu_alloc"))
+               return;
+
+       skel = percpu_alloc_cgrp_local_storage__open();
+       if (!ASSERT_OK_PTR(skel, "percpu_alloc_cgrp_local_storage__open"))
+               goto close_fd;
+
+       skel->rodata->nr_cpus = libbpf_num_possible_cpus();
+
+       err = percpu_alloc_cgrp_local_storage__load(skel);
+       if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__load"))
+               goto destroy_skel;
+
+       err = percpu_alloc_cgrp_local_storage__attach(skel);
+       if (!ASSERT_OK(err, "percpu_alloc_cgrp_local_storage__attach"))
+               goto destroy_skel;
+
+       prog_fd = bpf_program__fd(skel->progs.test_cgrp_local_storage_1);
+       err = bpf_prog_test_run_opts(prog_fd, &topts);
+       ASSERT_OK(err, "test_run cgrp_local_storage 1-3");
+       ASSERT_EQ(topts.retval, 0, "test_run cgrp_local_storage 1-3");
+       ASSERT_EQ(skel->bss->cpu0_field_d, 2, "cpu0_field_d");
+       ASSERT_EQ(skel->bss->sum_field_c, 1, "sum_field_c");
+
+destroy_skel:
+       percpu_alloc_cgrp_local_storage__destroy(skel);
+close_fd:
+       close(cgroup_fd);
+}
+
 void test_percpu_alloc(void)
 {
        if (test__start_subtest("array"))
                test_array();
        if (test__start_subtest("array_sleepable"))
                test_array_sleepable();
+       if (test__start_subtest("cgrp_local_storage"))
+               test_cgrp_local_storage();
 }
diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c
new file mode 100644 (file)
index 0000000..1c36a24
--- /dev/null
@@ -0,0 +1,105 @@
+#include "bpf_experimental.h"
+
+struct val_t {
+       long b, c, d;
+};
+
+struct elem {
+       long sum;
+       struct val_t __percpu_kptr *pc;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
+       __uint(map_flags, BPF_F_NO_PREALLOC);
+       __type(key, int);
+       __type(value, struct elem);
+} cgrp SEC(".maps");
+
+const volatile int nr_cpus;
+
+/* Initialize the percpu object */
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(test_cgrp_local_storage_1)
+{
+       struct task_struct *task;
+       struct val_t __percpu_kptr *p;
+       struct elem *e;
+
+       task = bpf_get_current_task_btf();
+       e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0,
+                                BPF_LOCAL_STORAGE_GET_F_CREATE);
+       if (!e)
+               return 0;
+
+       p = bpf_percpu_obj_new(struct val_t);
+       if (!p)
+               return 0;
+
+       p = bpf_kptr_xchg(&e->pc, p);
+       if (p)
+               bpf_percpu_obj_drop(p);
+
+       return 0;
+}
+
+/* Percpu data collection */
+SEC("fentry/bpf_fentry_test2")
+int BPF_PROG(test_cgrp_local_storage_2)
+{
+       struct task_struct *task;
+       struct val_t __percpu_kptr *p;
+       struct val_t *v;
+       struct elem *e;
+
+       task = bpf_get_current_task_btf();
+       e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+       if (!e)
+               return 0;
+
+       p = e->pc;
+       if (!p)
+               return 0;
+
+       v = bpf_per_cpu_ptr(p, 0);
+       if (!v)
+               return 0;
+       v->c = 1;
+       v->d = 2;
+       return 0;
+}
+
+int cpu0_field_d, sum_field_c;
+
+/* Summarize percpu data collection */
+SEC("fentry/bpf_fentry_test3")
+int BPF_PROG(test_cgrp_local_storage_3)
+{
+       struct task_struct *task;
+       struct val_t __percpu_kptr *p;
+       struct val_t *v;
+       struct elem *e;
+       int i;
+
+       task = bpf_get_current_task_btf();
+       e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
+       if (!e)
+               return 0;
+
+       p = e->pc;
+       if (!p)
+               return 0;
+
+       bpf_for(i, 0, nr_cpus) {
+               v = bpf_per_cpu_ptr(p, i);
+               if (v) {
+                       if (i == 0)
+                               cpu0_field_d = v->d;
+                       sum_field_c += v->c;
+               }
+       }
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";