]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
selftests/bpf: introduce tests for dynptr copy kfuncs
authorMykyta Yatsenko <yatsenko@meta.com>
Mon, 12 May 2025 20:53:48 +0000 (21:53 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 13 May 2025 01:32:47 +0000 (18:32 -0700)
Introduce selftests verifying newly-added dynptr copy kfuncs.
Covering contiguous and non-contiguous memory backed dynptrs.

Disable test_probe_read_user_str_dynptr that triggers bug in
strncpy_from_user_nofault. Patch to fix the issue [1].

[1] https://patchwork.kernel.org/project/linux-mm/patch/20250422131449.57177-1-mykyta.yatsenko5@gmail.com/

Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Link: https://lore.kernel.org/r/20250512205348.191079-4-mykyta.yatsenko5@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/DENYLIST
tools/testing/selftests/bpf/prog_tests/dynptr.c
tools/testing/selftests/bpf/progs/dynptr_success.c

index f748f2c33b22aa705ad76c06b1bf38594b4d68ee..1789a61d0a9b3adae306eb32c71331ff9c9af42e 100644 (file)
@@ -1,5 +1,6 @@
 # TEMPORARY
 # Alphabetical order
+dynptr/test_probe_read_user_str_dynptr # disabled until https://patchwork.kernel.org/project/linux-mm/patch/20250422131449.57177-1-mykyta.yatsenko5@gmail.com/ makes it into the bpf-next
 get_stack_raw_tp    # spams with kernel warnings until next bpf -> bpf-next merge
 stacktrace_build_id
 stacktrace_build_id_nmi
index e29cc16124c2dfc57d94067992fdb6246f0f650e..62e7ec775f24bf2f5b1ca1d2a8459ec5fda1d32a 100644 (file)
@@ -33,10 +33,19 @@ static struct {
        {"test_dynptr_skb_no_buff", SETUP_SKB_PROG},
        {"test_dynptr_skb_strcmp", SETUP_SKB_PROG},
        {"test_dynptr_skb_tp_btf", SETUP_SKB_PROG_TP},
+       {"test_probe_read_user_dynptr", SETUP_XDP_PROG},
+       {"test_probe_read_kernel_dynptr", SETUP_XDP_PROG},
+       {"test_probe_read_user_str_dynptr", SETUP_XDP_PROG},
+       {"test_probe_read_kernel_str_dynptr", SETUP_XDP_PROG},
+       {"test_copy_from_user_dynptr", SETUP_SYSCALL_SLEEP},
+       {"test_copy_from_user_str_dynptr", SETUP_SYSCALL_SLEEP},
+       {"test_copy_from_user_task_dynptr", SETUP_SYSCALL_SLEEP},
+       {"test_copy_from_user_task_str_dynptr", SETUP_SYSCALL_SLEEP},
 };
 
 static void verify_success(const char *prog_name, enum test_setup_type setup_type)
 {
+       char user_data[384] = {[0 ... 382] = 'a', '\0'};
        struct dynptr_success *skel;
        struct bpf_program *prog;
        struct bpf_link *link;
@@ -58,6 +67,10 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ
        if (!ASSERT_OK(err, "dynptr_success__load"))
                goto cleanup;
 
+       skel->bss->user_ptr = user_data;
+       skel->data->test_len[0] = sizeof(user_data);
+       memcpy(skel->bss->expected_str, user_data, sizeof(user_data));
+
        switch (setup_type) {
        case SETUP_SYSCALL_SLEEP:
                link = bpf_program__attach(prog);
index e1fba28e4a868a750fc7a77e2c3b6db231aa190d..a0391f9da2d49f94f3b1203be2d76f4419ad0547 100644 (file)
@@ -680,3 +680,233 @@ out:
        bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
        return XDP_DROP;
 }
+
+void *user_ptr;
+/* Contains the copy of the data pointed by user_ptr.
+ * Size 384 to make it not fit into a single kernel chunk when copying
+ * but less than the maximum bpf stack size (512).
+ */
+char expected_str[384];
+__u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257};
+
+typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off,
+                                   u32 size, const void *unsafe_ptr);
+
+/* Returns the offset just before the end of the maximum sized xdp fragment.
+ * Any write larger than 32 bytes will be split between 2 fragments.
+ */
+__u32 xdp_near_frag_end_offset(void)
+{
+       const __u32 headroom = 256;
+       const __u32 max_frag_size =  __PAGE_SIZE - headroom - sizeof(struct skb_shared_info);
+
+       /* 32 bytes before the approximate end of the fragment */
+       return max_frag_size - 32;
+}
+
+/* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks
+ * of type bpf_read_dynptr_fn_t to prevent compiler from generating
+ * indirect calls that make program fail to load with "unknown opcode" error.
+ */
+static __always_inline void test_dynptr_probe(void *ptr, bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+       char buf[sizeof(expected_str)];
+       struct bpf_dynptr ptr_buf;
+       int i;
+
+       if (bpf_get_current_pid_tgid() >> 32 != pid)
+               return;
+
+       err = bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf);
+
+       bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+               __u32 len = test_len[i];
+
+               err = err ?: bpf_read_dynptr_fn(&ptr_buf, 0, test_len[i], ptr);
+               if (len > sizeof(buf))
+                       break;
+               err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0);
+
+               if (err || bpf_memcmp(expected_str, buf, len))
+                       err = 1;
+
+               /* Reset buffer and dynptr */
+               __builtin_memset(buf, 0, sizeof(buf));
+               err = err ?: bpf_dynptr_write(&ptr_buf, 0, buf, len, 0);
+       }
+       bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
+}
+
+static __always_inline void test_dynptr_probe_str(void *ptr,
+                                                 bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+       char buf[sizeof(expected_str)];
+       struct bpf_dynptr ptr_buf;
+       __u32 cnt, i;
+
+       if (bpf_get_current_pid_tgid() >> 32 != pid)
+               return;
+
+       bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf);
+
+       bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+               __u32 len = test_len[i];
+
+               cnt = bpf_read_dynptr_fn(&ptr_buf, 0, len, ptr);
+               if (cnt != len)
+                       err = 1;
+
+               if (len > sizeof(buf))
+                       continue;
+               err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0);
+               if (!len)
+                       continue;
+               if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0')
+                       err = 1;
+       }
+       bpf_ringbuf_discard_dynptr(&ptr_buf, 0);
+}
+
+static __always_inline void test_dynptr_probe_xdp(struct xdp_md *xdp, void *ptr,
+                                                 bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+       struct bpf_dynptr ptr_xdp;
+       char buf[sizeof(expected_str)];
+       __u32 off, i;
+
+       if (bpf_get_current_pid_tgid() >> 32 != pid)
+               return;
+
+       off = xdp_near_frag_end_offset();
+       err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+
+       bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+               __u32 len = test_len[i];
+
+               err = err ?: bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr);
+               if (len > sizeof(buf))
+                       continue;
+               err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0);
+               if (err || bpf_memcmp(expected_str, buf, len))
+                       err = 1;
+               /* Reset buffer and dynptr */
+               __builtin_memset(buf, 0, sizeof(buf));
+               err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0);
+       }
+}
+
+static __always_inline void test_dynptr_probe_str_xdp(struct xdp_md *xdp, void *ptr,
+                                                     bpf_read_dynptr_fn_t bpf_read_dynptr_fn)
+{
+       struct bpf_dynptr ptr_xdp;
+       char buf[sizeof(expected_str)];
+       __u32 cnt, off, i;
+
+       if (bpf_get_current_pid_tgid() >> 32 != pid)
+               return;
+
+       off = xdp_near_frag_end_offset();
+       err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp);
+       if (err)
+               return;
+
+       bpf_for(i, 0, ARRAY_SIZE(test_len)) {
+               __u32 len = test_len[i];
+
+               cnt = bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr);
+               if (cnt != len)
+                       err = 1;
+
+               if (len > sizeof(buf))
+                       continue;
+               err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0);
+
+               if (!len)
+                       continue;
+               if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0')
+                       err = 1;
+
+               __builtin_memset(buf, 0, sizeof(buf));
+               err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0);
+       }
+}
+
+SEC("xdp")
+int test_probe_read_user_dynptr(struct xdp_md *xdp)
+{
+       test_dynptr_probe(user_ptr, bpf_probe_read_user_dynptr);
+       if (!err)
+               test_dynptr_probe_xdp(xdp, user_ptr, bpf_probe_read_user_dynptr);
+       return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_kernel_dynptr(struct xdp_md *xdp)
+{
+       test_dynptr_probe(expected_str, bpf_probe_read_kernel_dynptr);
+       if (!err)
+               test_dynptr_probe_xdp(xdp, expected_str, bpf_probe_read_kernel_dynptr);
+       return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_user_str_dynptr(struct xdp_md *xdp)
+{
+       test_dynptr_probe_str(user_ptr, bpf_probe_read_user_str_dynptr);
+       if (!err)
+               test_dynptr_probe_str_xdp(xdp, user_ptr, bpf_probe_read_user_str_dynptr);
+       return XDP_PASS;
+}
+
+SEC("xdp")
+int test_probe_read_kernel_str_dynptr(struct xdp_md *xdp)
+{
+       test_dynptr_probe_str(expected_str, bpf_probe_read_kernel_str_dynptr);
+       if (!err)
+               test_dynptr_probe_str_xdp(xdp, expected_str, bpf_probe_read_kernel_str_dynptr);
+       return XDP_PASS;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_dynptr(void *ctx)
+{
+       test_dynptr_probe(user_ptr, bpf_copy_from_user_dynptr);
+       return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_str_dynptr(void *ctx)
+{
+       test_dynptr_probe_str(user_ptr, bpf_copy_from_user_str_dynptr);
+       return 0;
+}
+
+static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off,
+                                       u32 size, const void *unsafe_ptr)
+{
+       struct task_struct *task = bpf_get_current_task_btf();
+
+       return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task);
+}
+
+static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off,
+                                           u32 size, const void *unsafe_ptr)
+{
+       struct task_struct *task = bpf_get_current_task_btf();
+
+       return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task);
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_task_dynptr(void *ctx)
+{
+       test_dynptr_probe(user_ptr, bpf_copy_data_from_user_task);
+       return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int test_copy_from_user_task_str_dynptr(void *ctx)
+{
+       test_dynptr_probe_str(user_ptr, bpf_copy_data_from_user_task_str);
+       return 0;
+}