]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bpf: Free element after unlock in __htab_map_lookup_and_delete_elem()
authorHou Tao <houtao1@huawei.com>
Fri, 17 Jan 2025 10:18:14 +0000 (18:18 +0800)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 20 Jan 2025 17:09:01 +0000 (09:09 -0800)
The freeing of special fields in map value may acquire a spin-lock
(e.g., the freeing of bpf_timer), however, the lookup_and_delete_elem
procedure has already held a raw-spin-lock, which violates the lockdep
rule.

The running context of __htab_map_lookup_and_delete_elem() has already
disabled the migration. Therefore, it is OK to invoke free_htab_elem()
after unlocking the bucket lock.

Fix the potential problem by freeing element after unlocking bucket lock
in __htab_map_lookup_and_delete_elem().

Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20250117101816.2101857-4-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/hashtab.c

index 6545ef40e128ae5574578ae6a5658c2541a1dd94..4a9eeb7aef85562e6f2a2f12bbe66086c35eed48 100644 (file)
@@ -1663,14 +1663,16 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
                check_and_init_map_value(map, value);
        }
        hlist_nulls_del_rcu(&l->hash_node);
-       if (!is_lru_map)
-               free_htab_elem(htab, l);
 
 out_unlock:
        htab_unlock_bucket(htab, b, hash, bflags);
 
-       if (is_lru_map && l)
-               htab_lru_push_free(htab, l);
+       if (l) {
+               if (is_lru_map)
+                       htab_lru_push_free(htab, l);
+               else
+                       free_htab_elem(htab, l);
+       }
 
        return ret;
 }