]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
bpf: Remove recursion prevention from rcu free callback
authorThomas Gleixner <tglx@linutronix.de>
Mon, 24 Feb 2020 14:01:39 +0000 (15:01 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Oct 2020 11:12:35 +0000 (13:12 +0200)
[ Upstream commit 8a37963c7ac9ecb7f86f8ebda020e3f8d6d7b8a0 ]

If an element is freed via RCU then recursion into BPF instrumentation
functions is not a concern. The element is already detached from the map
and the RCU callback does not hold any locks on which a kprobe, perf event
or tracepoint attached BPF program could deadlock.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145643.259118710@linutronix.de
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/bpf/hashtab.c

index 6cc090d015f66db58c719920931afae904b6b03a..ecc58137525bc3128bc49ec28cdbcb8bdd3346ae 100644 (file)
@@ -645,15 +645,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
        struct htab_elem *l = container_of(head, struct htab_elem, rcu);
        struct bpf_htab *htab = l->htab;
 
-       /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
-        * we're calling kfree, otherwise deadlock is possible if kprobes
-        * are placed somewhere inside of slub
-        */
-       preempt_disable();
-       __this_cpu_inc(bpf_prog_active);
        htab_elem_free(htab, l);
-       __this_cpu_dec(bpf_prog_active);
-       preempt_enable();
 }
 
 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)