]> www.infradead.org Git - users/hch/misc.git/commitdiff
bpf, arm64: Add JIT support for timed may_goto
authorPuranjay Mohan <puranjay@kernel.org>
Wed, 27 Aug 2025 11:32:43 +0000 (11:32 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 28 Aug 2025 00:16:22 +0000 (17:16 -0700)
When verifier sees a timed may_goto instruction, it emits a call to
arch_bpf_timed_may_goto() with a stack offset in BPF_REG_AX (arm64 r9)
and expects a count value to be returned in the same register. The
verifier doesn't save or restore any registers before emitting this
call.

arch_bpf_timed_may_goto() should act as a trampoline to call
bpf_check_timed_may_goto() with AAPCS64 calling convention.

To support this custom calling convention, implement
arch_bpf_timed_may_goto() in assembly and make sure BPF caller saved
registers are saved and restored, call bpf_check_timed_may_goto with
arm64 calling convention where first argument and return value both are
in x0, then put the result back into BPF_REG_AX before returning.

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Xu Kuohai <xukuohai@huawei.com>
Link: https://lore.kernel.org/r/20250827113245.52629-2-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
arch/arm64/net/Makefile
arch/arm64/net/bpf_jit_comp.c
arch/arm64/net/bpf_timed_may_goto.S [new file with mode: 0644]

index 5c540efb7d9b9aeea54000a3ddba81da6499e571..3ae382bfca8797a43e82966c01ddb02f819d233a 100644 (file)
@@ -2,4 +2,4 @@
 #
 # ARM64 networking code
 #
-obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o
index 52ffe115a8c47cf0a5c1b69441aa6d00c0eda2f6..a98b8132479a757349501ce54ad63a648c314660 100644 (file)
@@ -1558,7 +1558,13 @@ emit_cond_jmp:
                if (ret < 0)
                        return ret;
                emit_call(func_addr, ctx);
-               emit(A64_MOV(1, r0, A64_R(0)), ctx);
+               /*
+                * Call to arch_bpf_timed_may_goto() is emitted by the
+                * verifier and called with custom calling convention with
+                * first argument and return value in BPF_REG_AX (x9).
+                */
+               if (func_addr != (u64)arch_bpf_timed_may_goto)
+                       emit(A64_MOV(1, r0, A64_R(0)), ctx);
                break;
        }
        /* tail call */
@@ -3038,6 +3044,11 @@ bool bpf_jit_bypass_spec_v4(void)
        return true;
 }
 
+bool bpf_jit_supports_timed_may_goto(void)
+{
+       return true;
+}
+
 bool bpf_jit_inlines_helper_call(s32 imm)
 {
        switch (imm) {
diff --git a/arch/arm64/net/bpf_timed_may_goto.S b/arch/arm64/net/bpf_timed_may_goto.S
new file mode 100644 (file)
index 0000000..894cfcd
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Puranjay Mohan <puranjay@kernel.org> */
+
+#include <linux/linkage.h>
+
+SYM_FUNC_START(arch_bpf_timed_may_goto)
+       /* Allocate stack space and emit frame record */
+       stp     x29, x30, [sp, #-64]!
+       mov     x29, sp
+
+       /* Save BPF registers R0 - R5 (x7, x0-x4)*/
+       stp     x7, x0, [sp, #16]
+       stp     x1, x2, [sp, #32]
+       stp     x3, x4, [sp, #48]
+
+       /*
+        * Stack depth was passed in BPF_REG_AX (x9), add it to the BPF_FP
+        * (x25) to get the pointer to count and timestamp and pass it as the
+        * first argument in x0.
+        *
+        * Before generating the call to arch_bpf_timed_may_goto, the verifier
+        * generates a load instruction using FP, i.e. REG_AX = *(u64 *)(FP -
+        * stack_off_cnt), so BPF_REG_FP (x25) is always set up by the arm64
+        * jit in this case.
+        */
+       add     x0, x9, x25
+       bl      bpf_check_timed_may_goto
+       /* BPF_REG_AX(x9) will be stored into count, so move return value to it. */
+       mov     x9, x0
+
+       /* Restore BPF registers R0 - R5 (x7, x0-x4) */
+       ldp     x7, x0, [sp, #16]
+       ldp     x1, x2, [sp, #32]
+       ldp     x3, x4, [sp, #48]
+
+       /* Restore FP and LR */
+       ldp     x29, x30, [sp], #64
+
+       ret
+SYM_FUNC_END(arch_bpf_timed_may_goto)