From: Puranjay Mohan Date: Wed, 27 Aug 2025 11:32:43 +0000 (+0000) Subject: bpf, arm64: Add JIT support for timed may_goto X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=16175375da369fcfdcc0127c9ca39b767ae4f885;p=users%2Fhch%2Fmisc.git bpf, arm64: Add JIT support for timed may_goto When verifier sees a timed may_goto instruction, it emits a call to arch_bpf_timed_may_goto() with a stack offset in BPF_REG_AX (arm64 r9) and expects a count value to be returned in the same register. The verifier doesn't save or restore any registers before emitting this call. arch_bpf_timed_may_goto() should act as a trampoline to call bpf_check_timed_may_goto() with AAPCS64 calling convention. To support this custom calling convention, implement arch_bpf_timed_may_goto() in assembly and make sure BPF caller saved registers are saved and restored, call bpf_check_timed_may_goto with arm64 calling convention where first argument and return value both are in x0, then put the result back into BPF_REG_AX before returning. Signed-off-by: Puranjay Mohan Acked-by: Kumar Kartikeya Dwivedi Acked-by: Xu Kuohai Link: https://lore.kernel.org/r/20250827113245.52629-2-puranjay@kernel.org Signed-off-by: Alexei Starovoitov --- diff --git a/arch/arm64/net/Makefile b/arch/arm64/net/Makefile index 5c540efb7d9b..3ae382bfca87 100644 --- a/arch/arm64/net/Makefile +++ b/arch/arm64/net/Makefile @@ -2,4 +2,4 @@ # # ARM64 networking code # -obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 52ffe115a8c4..a98b8132479a 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1558,7 +1558,13 @@ emit_cond_jmp: if (ret < 0) return ret; emit_call(func_addr, ctx); - emit(A64_MOV(1, r0, A64_R(0)), ctx); + /* + * Call to arch_bpf_timed_may_goto() is emitted by the + * verifier and called with custom calling convention with + * first argument and return value in BPF_REG_AX (x9). + */ + if (func_addr != (u64)arch_bpf_timed_may_goto) + emit(A64_MOV(1, r0, A64_R(0)), ctx); break; } /* tail call */ @@ -3038,6 +3044,11 @@ bool bpf_jit_bypass_spec_v4(void) return true; } +bool bpf_jit_supports_timed_may_goto(void) +{ + return true; +} + bool bpf_jit_inlines_helper_call(s32 imm) { switch (imm) { diff --git a/arch/arm64/net/bpf_timed_may_goto.S b/arch/arm64/net/bpf_timed_may_goto.S new file mode 100644 index 000000000000..894cfcd7b241 --- /dev/null +++ b/arch/arm64/net/bpf_timed_may_goto.S @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Puranjay Mohan */ + +#include + +SYM_FUNC_START(arch_bpf_timed_may_goto) + /* Allocate stack space and emit frame record */ + stp x29, x30, [sp, #-64]! + mov x29, sp + + /* Save BPF registers R0 - R5 (x7, x0-x4)*/ + stp x7, x0, [sp, #16] + stp x1, x2, [sp, #32] + stp x3, x4, [sp, #48] + + /* + * Stack depth was passed in BPF_REG_AX (x9), add it to the BPF_FP + * (x25) to get the pointer to count and timestamp and pass it as the + * first argument in x0. + * + * Before generating the call to arch_bpf_timed_may_goto, the verifier + * generates a load instruction using FP, i.e. REG_AX = *(u64 *)(FP - + * stack_off_cnt), so BPF_REG_FP (x25) is always set up by the arm64 + * jit in this case. + */ + add x0, x9, x25 + bl bpf_check_timed_may_goto + /* BPF_REG_AX(x9) will be stored into count, so move return value to it. */ + mov x9, x0 + + /* Restore BPF registers R0 - R5 (x7, x0-x4) */ + ldp x7, x0, [sp, #16] + ldp x1, x2, [sp, #32] + ldp x3, x4, [sp, #48] + + /* Restore FP and LR */ + ldp x29, x30, [sp], #64 + + ret +SYM_FUNC_END(arch_bpf_timed_may_goto)