]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
bpf: Zeroing allocated object from slab in bpf memory allocator
authorHou Tao <houtao1@huawei.com>
Wed, 15 Feb 2023 08:21:31 +0000 (16:21 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 10 Mar 2023 08:33:06 +0000 (09:33 +0100)
[ Upstream commit 997849c4b969034e225153f41026657def66d286 ]

Currently the freed element in bpf memory allocator may be immediately
reused, for htab map the reuse will reinitialize special fields in map
value (e.g., bpf_spin_lock), but lookup procedure may still access
these special fields, and it may lead to hard-lockup as shown below:

 NMI backtrace for cpu 16
 CPU: 16 PID: 2574 Comm: htab.bin Tainted: G             L     6.1.0+ #1
 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
 RIP: 0010:queued_spin_lock_slowpath+0x283/0x2c0
 ......
 Call Trace:
  <TASK>
  copy_map_value_locked+0xb7/0x170
  bpf_map_copy_value+0x113/0x3c0
  __sys_bpf+0x1c67/0x2780
  __x64_sys_bpf+0x1c/0x20
  do_syscall_64+0x30/0x60
  entry_SYSCALL_64_after_hwframe+0x46/0xb0
 ......
  </TASK>

For htab map, just like the preallocated case, these is no need to
initialize these special fields in map value again once these fields
have been initialized. For preallocated htab map, these fields are
initialized through __GFP_ZERO in bpf_map_area_alloc(), so do the
similar thing for non-preallocated htab in bpf memory allocator. And
there is no need to use __GFP_ZERO for per-cpu bpf memory allocator,
because __alloc_percpu_gfp() does it implicitly.

Fixes: 0fd7c5d43339 ("bpf: Optimize call_rcu in non-preallocated hash map.")
Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20230215082132.3856544-2-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/linux/bpf.h
kernel/bpf/hashtab.c
kernel/bpf/memalloc.c

index c1bd1bd105067e575aafbf1f9e03b8280f81910d..942f9ac9fa7b637e60ea54b5639a767fbb70388a 100644 (file)
@@ -266,6 +266,13 @@ static inline bool map_value_has_kptrs(const struct bpf_map *map)
        return !IS_ERR_OR_NULL(map->kptr_off_tab);
 }
 
+/* 'dst' must be a temporary buffer and should not point to memory that is being
+ * used in parallel by a bpf program or bpf syscall, otherwise the access from
+ * the bpf program or bpf syscall may be corrupted by the reinitialization,
+ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
+ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
+ * program or bpf syscall.
+ */
 static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
 {
        if (unlikely(map_value_has_spin_lock(map)))
index c4811984fafa4a2e124f52e48621f19781bf41ce..4a3d0a74470263e869e8c26f28cf3117484eb35f 100644 (file)
@@ -1010,8 +1010,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                        l_new = ERR_PTR(-ENOMEM);
                        goto dec_count;
                }
-               check_and_init_map_value(&htab->map,
-                                        l_new->key + round_up(key_size, 8));
        }
 
        memcpy(l_new->key, key, key_size);
@@ -1603,6 +1601,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
                        else
                                copy_map_value(map, value, l->key +
                                               roundup_key_size);
+                       /* Zeroing special fields in the temp buffer */
                        check_and_init_map_value(map, value);
                }
 
@@ -1803,6 +1802,7 @@ again_nocopy:
                                                      true);
                        else
                                copy_map_value(map, dst_val, value);
+                       /* Zeroing special fields in the temp buffer */
                        check_and_init_map_value(map, dst_val);
                }
                if (do_delete) {
index 6187c28d266f01589894a39edff7131867d62630..ace303a220ae812d2bf9287d267b15b76923f264 100644 (file)
@@ -143,7 +143,7 @@ static void *__alloc(struct bpf_mem_cache *c, int node)
                return obj;
        }
 
-       return kmalloc_node(c->unit_size, flags, node);
+       return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
 }
 
 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)