]> www.infradead.org Git - users/hch/misc.git/commitdiff
bpf: use rcu_read_lock_dont_migrate() for trampoline.c
authorMenglong Dong <menglong8.dong@gmail.com>
Thu, 21 Aug 2025 09:06:09 +0000 (17:06 +0800)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 26 Aug 2025 01:52:16 +0000 (18:52 -0700)
Use rcu_read_lock_dont_migrate() and rcu_read_unlock_migrate() in
trampoline.c to obtain better performance when PREEMPT_RCU is not enabled.

Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
Link: https://lore.kernel.org/r/20250821090609.42508-8-dongml2@chinatelecom.cn
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/trampoline.c

index 0e364614c3a29144b78f05c6d8188f0dca72700c..5949095e51c3d072cb323bba464160c7012eb404 100644 (file)
@@ -899,8 +899,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
 static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
        __acquires(RCU)
 {
-       rcu_read_lock();
-       migrate_disable();
+       rcu_read_lock_dont_migrate();
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -949,8 +948,7 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
 
        update_prog_stats(prog, start);
        this_cpu_dec(*(prog->active));
-       migrate_enable();
-       rcu_read_unlock();
+       rcu_read_unlock_migrate();
 }
 
 static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
@@ -960,8 +958,7 @@ static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
        /* Runtime stats are exported via actual BPF_LSM_CGROUP
         * programs, not the shims.
         */
-       rcu_read_lock();
-       migrate_disable();
+       rcu_read_lock_dont_migrate();
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -974,8 +971,7 @@ static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
 {
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
-       migrate_enable();
-       rcu_read_unlock();
+       rcu_read_unlock_migrate();
 }
 
 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
@@ -1033,8 +1029,7 @@ static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
                                    struct bpf_tramp_run_ctx *run_ctx)
        __acquires(RCU)
 {
-       rcu_read_lock();
-       migrate_disable();
+       rcu_read_lock_dont_migrate();
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
@@ -1048,8 +1043,7 @@ static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
        update_prog_stats(prog, start);
-       migrate_enable();
-       rcu_read_unlock();
+       rcu_read_unlock_migrate();
 }
 
 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)