]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
selftests/bpf: Add BPF object loading tests with explicit token passing
authorAndrii Nakryiko <andrii@kernel.org>
Wed, 24 Jan 2024 02:21:23 +0000 (18:21 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 25 Jan 2024 00:21:03 +0000 (16:21 -0800)
Add a few tests that attempt to load BPF object containing privileged
map, program, and the one requiring mandatory BTF uploading into the
kernel (to validate token FD propagation to BPF_BTF_LOAD command).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-27-andrii@kernel.org
tools/testing/selftests/bpf/prog_tests/token.c
tools/testing/selftests/bpf/progs/priv_map.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/priv_prog.c [new file with mode: 0644]

index 185ed2f793153803dea219278f295e21c515d8d7..1594d9b94b13ce41e47367ef060386a037f62242 100644 (file)
@@ -14,6 +14,9 @@
 #include <sys/socket.h>
 #include <sys/syscall.h>
 #include <sys/un.h>
+#include "priv_map.skel.h"
+#include "priv_prog.skel.h"
+#include "dummy_st_ops_success.skel.h"
 
 static inline int sys_mount(const char *dev_name, const char *dir_name,
                            const char *type, unsigned long flags,
@@ -666,6 +669,104 @@ cleanup:
        return err;
 }
 
+static int userns_obj_priv_map(int mnt_fd)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct priv_map *skel;
+       int err;
+
+       skel = priv_map__open_and_load();
+       if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+               priv_map__destroy(skel);
+               return -EINVAL;
+       }
+
+       /* use bpf_token_path to provide BPF FS path */
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = priv_map__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = priv_map__load(skel);
+       priv_map__destroy(skel);
+       if (!ASSERT_OK(err, "obj_token_path_load"))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int userns_obj_priv_prog(int mnt_fd)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct priv_prog *skel;
+       int err;
+
+       skel = priv_prog__open_and_load();
+       if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+               priv_prog__destroy(skel);
+               return -EINVAL;
+       }
+
+       /* use bpf_token_path to provide BPF FS path */
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = priv_prog__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = priv_prog__load(skel);
+       priv_prog__destroy(skel);
+       if (!ASSERT_OK(err, "obj_token_path_load"))
+               return -EINVAL;
+
+       return 0;
+}
+
+/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command,
+ * which should cause struct_ops application to fail, as BTF won't be uploaded
+ * into the kernel, even if STRUCT_OPS programs themselves are allowed
+ */
+static int validate_struct_ops_load(int mnt_fd, bool expect_success)
+{
+       LIBBPF_OPTS(bpf_object_open_opts, opts);
+       char buf[256];
+       struct dummy_st_ops_success *skel;
+       int err;
+
+       snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+       opts.bpf_token_path = buf;
+       skel = dummy_st_ops_success__open_opts(&opts);
+       if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+               return -EINVAL;
+
+       err = dummy_st_ops_success__load(skel);
+       dummy_st_ops_success__destroy(skel);
+       if (expect_success) {
+               if (!ASSERT_OK(err, "obj_token_path_load"))
+                       return -EINVAL;
+       } else /* expect failure */ {
+               if (!ASSERT_ERR(err, "obj_token_path_load"))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int userns_obj_priv_btf_fail(int mnt_fd)
+{
+       return validate_struct_ops_load(mnt_fd, false /* should fail */);
+}
+
+static int userns_obj_priv_btf_success(int mnt_fd)
+{
+       return validate_struct_ops_load(mnt_fd, true /* should succeed */);
+}
+
+#define bit(n) (1ULL << (n))
+
 void test_token(void)
 {
        if (test__start_subtest("map_token")) {
@@ -692,4 +793,43 @@ void test_token(void)
 
                subtest_userns(&opts, userns_prog_load);
        }
+       if (test__start_subtest("obj_priv_map")) {
+               struct bpffs_opts opts = {
+                       .cmds = bit(BPF_MAP_CREATE),
+                       .maps = bit(BPF_MAP_TYPE_QUEUE),
+               };
+
+               subtest_userns(&opts, userns_obj_priv_map);
+       }
+       if (test__start_subtest("obj_priv_prog")) {
+               struct bpffs_opts opts = {
+                       .cmds = bit(BPF_PROG_LOAD),
+                       .progs = bit(BPF_PROG_TYPE_KPROBE),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_prog);
+       }
+       if (test__start_subtest("obj_priv_btf_fail")) {
+               struct bpffs_opts opts = {
+                       /* disallow BTF loading */
+                       .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+                       .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+                       .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_btf_fail);
+       }
+       if (test__start_subtest("obj_priv_btf_success")) {
+               struct bpffs_opts opts = {
+                       /* allow BTF loading */
+                       .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+                       .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+                       .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+                       .attachs = ~0ULL,
+               };
+
+               subtest_userns(&opts, userns_obj_priv_btf_success);
+       }
 }
diff --git a/tools/testing/selftests/bpf/progs/priv_map.c b/tools/testing/selftests/bpf/progs/priv_map.c
new file mode 100644 (file)
index 0000000..9085be5
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+       __uint(type, BPF_MAP_TYPE_QUEUE);
+       __uint(max_entries, 1);
+       __type(value, __u32);
+} priv_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c
new file mode 100644 (file)
index 0000000..3c7b2b6
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe")
+int kprobe_prog(void *ctx)
+{
+       return 1;
+}