]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
selftests/bpf: Add tests for tail calls with locks and refs
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Sun, 3 Nov 2024 22:59:40 +0000 (14:59 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 4 Nov 2024 00:52:06 +0000 (16:52 -0800)
Add failure tests to ensure bugs don't slip through for tail calls and
lingering locks, RCU sections, preemption disabled sections, and
references prevent tail calls.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20241103225940.1408302-4-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/tailcalls.c
tools/testing/selftests/bpf/progs/tailcall_fail.c [new file with mode: 0644]

index 40f22454cf05b0d5c9300f5dc6f420710da1df57..544144620ca61a109e7b30284361c14c621f8453 100644 (file)
@@ -7,6 +7,7 @@
 #include "tailcall_bpf2bpf_hierarchy3.skel.h"
 #include "tailcall_freplace.skel.h"
 #include "tc_bpf2bpf.skel.h"
+#include "tailcall_fail.skel.h"
 
 /* test_tailcall_1 checks basic functionality by patching multiple locations
  * in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -1646,6 +1647,11 @@ out:
        tc_bpf2bpf__destroy(tc_skel);
 }
 
+static void test_tailcall_failure()
+{
+       RUN_TESTS(tailcall_fail);
+}
+
 void test_tailcalls(void)
 {
        if (test__start_subtest("tailcall_1"))
@@ -1698,4 +1704,6 @@ void test_tailcalls(void)
                test_tailcall_freplace();
        if (test__start_subtest("tailcall_bpf2bpf_freplace"))
                test_tailcall_bpf2bpf_freplace();
+       if (test__start_subtest("tailcall_failure"))
+               test_tailcall_failure();
 }
diff --git a/tools/testing/selftests/bpf/progs/tailcall_fail.c b/tools/testing/selftests/bpf/progs/tailcall_fail.c
new file mode 100644 (file)
index 0000000..bc77921
--- /dev/null
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+
+private(A) struct bpf_spin_lock lock;
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+       __uint(max_entries, 3);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("?tc")
+__failure __msg("function calls are not allowed while holding a lock")
+int reject_tail_call_spin_lock(struct __sk_buff *ctx)
+{
+       bpf_spin_lock(&lock);
+       bpf_tail_call_static(ctx, &jmp_table, 0);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call cannot be used inside bpf_rcu_read_lock-ed region")
+int reject_tail_call_rcu_lock(struct __sk_buff *ctx)
+{
+       bpf_rcu_read_lock();
+       bpf_tail_call_static(ctx, &jmp_table, 0);
+       bpf_rcu_read_unlock();
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call cannot be used inside bpf_preempt_disable-ed region")
+int reject_tail_call_preempt_lock(struct __sk_buff *ctx)
+{
+       bpf_guard_preempt();
+       bpf_tail_call_static(ctx, &jmp_table, 0);
+       return 0;
+}
+
+SEC("?tc")
+__failure __msg("tail_call would lead to reference leak")
+int reject_tail_call_ref(struct __sk_buff *ctx)
+{
+       struct foo { int i; } *p;
+
+       p = bpf_obj_new(typeof(*p));
+       bpf_tail_call_static(ctx, &jmp_table, 0);
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";