struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
 struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map,
+                                                  bool uref);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
 
 }
 
 /* map_idr_lock should have been held */
-static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
-                                           bool uref)
+static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map,
+                                             bool uref)
 {
        int refold;
 
        return map;
 }
 
+struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
+{
+       spin_lock_bh(&map_idr_lock);
+       map = __bpf_map_inc_not_zero(map, uref);
+       spin_unlock_bh(&map_idr_lock);
+
+       return map;
+}
+EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
+
 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 {
        return -ENOTSUPP;
        spin_lock_bh(&map_idr_lock);
        map = idr_find(&map_idr, id);
        if (map)
-               map = bpf_map_inc_not_zero(map, true);
+               map = __bpf_map_inc_not_zero(map, true);
        else
                map = ERR_PTR(-ENOENT);
        spin_unlock_bh(&map_idr_lock);