]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
selftests/bpf: Add tracing prog private stack tests
authorYonghong Song <yonghong.song@linux.dev>
Tue, 12 Nov 2024 16:39:27 +0000 (08:39 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 13 Nov 2024 00:26:25 +0000 (16:26 -0800)
Some private stack tests are added including:
  - main prog only with stack size greater than BPF_PSTACK_MIN_SIZE.
  - main prog only with stack size smaller than BPF_PSTACK_MIN_SIZE.
  - prog with one subprog having MAX_BPF_STACK stack size and another
    subprog having non-zero small stack size.
  - prog with callback function.
  - prog with exception in main prog or subprog.
  - prog with async callback without nesting
  - prog with async callback with possible nesting

Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20241112163927.2224750-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/verifier.c
tools/testing/selftests/bpf/progs/verifier_private_stack.c [new file with mode: 0644]

index 75f7a2ce334b11f2e9ad119dff67f6e2d112f57e..d9f65adb456b6fb2afc469e413b3aa4c8bcb2088 100644 (file)
@@ -61,6 +61,7 @@
 #include "verifier_or_jmp32_k.skel.h"
 #include "verifier_precision.skel.h"
 #include "verifier_prevent_map_lookup.skel.h"
+#include "verifier_private_stack.skel.h"
 #include "verifier_raw_stack.skel.h"
 #include "verifier_raw_tp_writable.skel.h"
 #include "verifier_reg_equal.skel.h"
@@ -188,6 +189,7 @@ void test_verifier_bpf_fastcall(void)         { RUN(verifier_bpf_fastcall); }
 void test_verifier_or_jmp32_k(void)           { RUN(verifier_or_jmp32_k); }
 void test_verifier_precision(void)            { RUN(verifier_precision); }
 void test_verifier_prevent_map_lookup(void)   { RUN(verifier_prevent_map_lookup); }
+void test_verifier_private_stack(void)        { RUN(verifier_private_stack); }
 void test_verifier_raw_stack(void)            { RUN(verifier_raw_stack); }
 void test_verifier_raw_tp_writable(void)      { RUN(verifier_raw_tp_writable); }
 void test_verifier_reg_equal(void)            { RUN(verifier_reg_equal); }
diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
new file mode 100644 (file)
index 0000000..b1fbdf1
--- /dev/null
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_experimental.h"
+
+/* From include/linux/filter.h */
+#define MAX_BPF_STACK    512
+
+#if defined(__TARGET_ARCH_x86)
+
+struct elem {
+       struct bpf_timer t;
+       char pad[256];
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, int);
+       __type(value, struct elem);
+} array SEC(".maps");
+
+SEC("kprobe")
+__description("Private stack, single prog")
+__success
+__arch_x86_64
+__jited("      movabsq $0x{{.*}}, %r9")
+__jited("      addq    %gs:0x{{.*}}, %r9")
+__jited("      movl    $0x2a, %edi")
+__jited("      movq    %rdi, -0x100(%r9)")
+__naked void private_stack_single_prog(void)
+{
+       asm volatile ("                 \
+       r1 = 42;                        \
+       *(u64 *)(r10 - 256) = r1;       \
+       r0 = 0;                         \
+       exit;                           \
+"      ::: __clobber_all);
+}
+
+SEC("raw_tp")
+__description("No private stack")
+__success
+__arch_x86_64
+__jited("      subq    $0x8, %rsp")
+__naked void no_private_stack_nested(void)
+{
+       asm volatile ("                 \
+       r1 = 42;                        \
+       *(u64 *)(r10 - 8) = r1;         \
+       r0 = 0;                         \
+       exit;                           \
+"      ::: __clobber_all);
+}
+
+__used
+__naked static void cumulative_stack_depth_subprog(void)
+{
+       asm volatile ("                         \
+       r1 = 41;                                \
+       *(u64 *)(r10 - 32) = r1;                \
+       call %[bpf_get_smp_processor_id];       \
+       exit;                                   \
+"      :
+       : __imm(bpf_get_smp_processor_id)
+       : __clobber_all);
+}
+
+SEC("kprobe")
+__description("Private stack, subtree > MAX_BPF_STACK")
+__success
+__arch_x86_64
+/* private stack fp for the main prog */
+__jited("      movabsq $0x{{.*}}, %r9")
+__jited("      addq    %gs:0x{{.*}}, %r9")
+__jited("      movl    $0x2a, %edi")
+__jited("      movq    %rdi, -0x200(%r9)")
+__jited("      pushq   %r9")
+__jited("      callq   0x{{.*}}")
+__jited("      popq    %r9")
+__jited("      xorl    %eax, %eax")
+__naked void private_stack_nested_1(void)
+{
+       asm volatile ("                         \
+       r1 = 42;                                \
+       *(u64 *)(r10 - %[max_bpf_stack]) = r1;  \
+       call cumulative_stack_depth_subprog;    \
+       r0 = 0;                                 \
+       exit;                                   \
+"      :
+       : __imm_const(max_bpf_stack, MAX_BPF_STACK)
+       : __clobber_all);
+}
+
+__naked __noinline __used
+static unsigned long loop_callback(void)
+{
+       asm volatile ("                         \
+       call %[bpf_get_prandom_u32];            \
+       r1 = 42;                                \
+       *(u64 *)(r10 - 512) = r1;               \
+       call cumulative_stack_depth_subprog;    \
+       r0 = 0;                                 \
+       exit;                                   \
+"      :
+       : __imm(bpf_get_prandom_u32)
+       : __clobber_common);
+}
+
+SEC("raw_tp")
+__description("Private stack, callback")
+__success
+__arch_x86_64
+/* for func loop_callback */
+__jited("func #1")
+__jited("      endbr64")
+__jited("      nopl    (%rax,%rax)")
+__jited("      nopl    (%rax)")
+__jited("      pushq   %rbp")
+__jited("      movq    %rsp, %rbp")
+__jited("      endbr64")
+__jited("      movabsq $0x{{.*}}, %r9")
+__jited("      addq    %gs:0x{{.*}}, %r9")
+__jited("      pushq   %r9")
+__jited("      callq")
+__jited("      popq    %r9")
+__jited("      movl    $0x2a, %edi")
+__jited("      movq    %rdi, -0x200(%r9)")
+__jited("      pushq   %r9")
+__jited("      callq")
+__jited("      popq    %r9")
+__naked void private_stack_callback(void)
+{
+       asm volatile ("                 \
+       r1 = 1;                         \
+       r2 = %[loop_callback];          \
+       r3 = 0;                         \
+       r4 = 0;                         \
+       call %[bpf_loop];               \
+       r0 = 0;                         \
+       exit;                           \
+"      :
+       : __imm_ptr(loop_callback),
+         __imm(bpf_loop)
+       : __clobber_common);
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("Private stack, exception in main prog")
+__success __retval(0)
+__arch_x86_64
+__jited("      pushq   %r9")
+__jited("      callq")
+__jited("      popq    %r9")
+int private_stack_exception_main_prog(void)
+{
+       asm volatile ("                 \
+       r1 = 42;                        \
+       *(u64 *)(r10 - 512) = r1;       \
+"      ::: __clobber_common);
+
+       bpf_throw(0);
+       return 0;
+}
+
+__used static int subprog_exception(void)
+{
+       bpf_throw(0);
+       return 0;
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("Private stack, exception in subprog")
+__success __retval(0)
+__arch_x86_64
+__jited("      movq    %rdi, -0x200(%r9)")
+__jited("      pushq   %r9")
+__jited("      callq")
+__jited("      popq    %r9")
+int private_stack_exception_sub_prog(void)
+{
+       asm volatile ("                 \
+       r1 = 42;                        \
+       *(u64 *)(r10 - 512) = r1;       \
+       call subprog_exception;         \
+"      ::: __clobber_common);
+
+       return 0;
+}
+
+int glob;
+__noinline static void subprog2(int *val)
+{
+       glob += val[0] * 2;
+}
+
+__noinline static void subprog1(int *val)
+{
+       int tmp[64] = {};
+
+       tmp[0] = *val;
+       subprog2(tmp);
+}
+
+__noinline static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
+{
+       subprog1(key);
+       return 0;
+}
+
+__noinline static int timer_cb2(void *map, int *key, struct bpf_timer *timer)
+{
+       return 0;
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("Private stack, async callback, not nested")
+__success __retval(0)
+__arch_x86_64
+__jited("      movabsq $0x{{.*}}, %r9")
+int private_stack_async_callback_1(void)
+{
+       struct bpf_timer *arr_timer;
+       int array_key = 0;
+
+       arr_timer = bpf_map_lookup_elem(&array, &array_key);
+       if (!arr_timer)
+               return 0;
+
+       bpf_timer_init(arr_timer, &array, 1);
+       bpf_timer_set_callback(arr_timer, timer_cb2);
+       bpf_timer_start(arr_timer, 0, 0);
+       subprog1(&array_key);
+       return 0;
+}
+
+SEC("fentry/bpf_fentry_test9")
+__description("Private stack, async callback, potential nesting")
+__success __retval(0)
+__arch_x86_64
+__jited("      subq    $0x100, %rsp")
+int private_stack_async_callback_2(void)
+{
+       struct bpf_timer *arr_timer;
+       int array_key = 0;
+
+       arr_timer = bpf_map_lookup_elem(&array, &array_key);
+       if (!arr_timer)
+               return 0;
+
+       bpf_timer_init(arr_timer, &array, 1);
+       bpf_timer_set_callback(arr_timer, timer_cb1);
+       bpf_timer_start(arr_timer, 0, 0);
+       subprog1(&array_key);
+       return 0;
+}
+
+#else
+
+SEC("kprobe")
+__description("private stack is not supported, use a dummy test")
+__success
+int dummy_test(void)
+{
+       return 0;
+}
+
+#endif
+
+char _license[] SEC("license") = "GPL";