BPF program may call bpf_for_each_map_elem(), and it will call
the ->map_for_each_callback callback of related bpf map. Considering the
running context of bpf program has already disabled migration, remove
the unnecessary migrate_{disable|enable} pair in the implementations of
->map_for_each_callback. To ensure the guarantee will not be voilated
later, also add cant_migrate() check in the implementations.
Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20250108010728.207536-3-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
        u64 ret = 0;
        void *val;
 
+       cant_migrate();
+
        if (flags != 0)
                return -EINVAL;
 
        is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
        array = container_of(map, struct bpf_array, map);
-       if (is_percpu)
-               migrate_disable();
        for (i = 0; i < map->max_entries; i++) {
                if (is_percpu)
                        val = this_cpu_ptr(array->pptrs[i]);
                        break;
        }
 
-       if (is_percpu)
-               migrate_enable();
        return num_elems;
 }
 
 
        bool is_percpu;
        u64 ret = 0;
 
+       cant_migrate();
+
        if (flags != 0)
                return -EINVAL;
 
        is_percpu = htab_is_percpu(htab);
 
        roundup_key_size = round_up(map->key_size, 8);
-       /* disable migration so percpu value prepared here will be the
-        * same as the one seen by the bpf program with bpf_map_lookup_elem().
+       /* migration has been disabled, so percpu value prepared here will be
+        * the same as the one seen by the bpf program with
+        * bpf_map_lookup_elem().
         */
-       if (is_percpu)
-               migrate_disable();
        for (i = 0; i < htab->n_buckets; i++) {
                b = &htab->buckets[i];
                rcu_read_lock();
                rcu_read_unlock();
        }
 out:
-       if (is_percpu)
-               migrate_enable();
        return num_elems;
 }