]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bpf: Bail out early in __htab_map_lookup_and_delete_elem()
authorHou Tao <houtao1@huawei.com>
Fri, 17 Jan 2025 10:18:13 +0000 (18:18 +0800)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 20 Jan 2025 17:09:01 +0000 (09:09 -0800)
Use goto statement to bail out early when the target element is not
found, instead of using a large else branch to handle the more likely
case. This change doesn't affect functionality and simply make the code
cleaner.

Signed-off-by: Hou Tao <houtao1@huawei.com>
Reviewed-by: Toke Høiland-Jørgensen <toke@kernel.org>
Link: https://lore.kernel.org/r/20250117101816.2101857-3-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/hashtab.c

index 963cccb01daae380cdeb6bf32b94e5069bd06301..6545ef40e128ae5574578ae6a5658c2541a1dd94 100644 (file)
@@ -1635,37 +1635,38 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
        l = lookup_elem_raw(head, hash, key, key_size);
        if (!l) {
                ret = -ENOENT;
-       } else {
-               if (is_percpu) {
-                       u32 roundup_value_size = round_up(map->value_size, 8);
-                       void __percpu *pptr;
-                       int off = 0, cpu;
+               goto out_unlock;
+       }
 
-                       pptr = htab_elem_get_ptr(l, key_size);
-                       for_each_possible_cpu(cpu) {
-                               copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
-                               check_and_init_map_value(&htab->map, value + off);
-                               off += roundup_value_size;
-                       }
-               } else {
-                       u32 roundup_key_size = round_up(map->key_size, 8);
+       if (is_percpu) {
+               u32 roundup_value_size = round_up(map->value_size, 8);
+               void __percpu *pptr;
+               int off = 0, cpu;
 
-                       if (flags & BPF_F_LOCK)
-                               copy_map_value_locked(map, value, l->key +
-                                                     roundup_key_size,
-                                                     true);
-                       else
-                               copy_map_value(map, value, l->key +
-                                              roundup_key_size);
-                       /* Zeroing special fields in the temp buffer */
-                       check_and_init_map_value(map, value);
+               pptr = htab_elem_get_ptr(l, key_size);
+               for_each_possible_cpu(cpu) {
+                       copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
+                       check_and_init_map_value(&htab->map, value + off);
+                       off += roundup_value_size;
                }
+       } else {
+               u32 roundup_key_size = round_up(map->key_size, 8);
 
-               hlist_nulls_del_rcu(&l->hash_node);
-               if (!is_lru_map)
-                       free_htab_elem(htab, l);
+               if (flags & BPF_F_LOCK)
+                       copy_map_value_locked(map, value, l->key +
+                                             roundup_key_size,
+                                             true);
+               else
+                       copy_map_value(map, value, l->key +
+                                      roundup_key_size);
+               /* Zeroing special fields in the temp buffer */
+               check_and_init_map_value(map, value);
        }
+       hlist_nulls_del_rcu(&l->hash_node);
+       if (!is_lru_map)
+               free_htab_elem(htab, l);
 
+out_unlock:
        htab_unlock_bucket(htab, b, hash, bflags);
 
        if (is_lru_map && l)