int (*map_lookup_and_delete_batch)(struct bpf_map *map,
                                           const union bpf_attr *attr,
                                           union bpf_attr __user *uattr);
-       int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+       int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
+                               const union bpf_attr *attr,
                                union bpf_attr __user *uattr);
        int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
                                union bpf_attr __user *uattr);
 int  generic_map_lookup_batch(struct bpf_map *map,
                              const union bpf_attr *attr,
                              union bpf_attr __user *uattr);
-int  generic_map_update_batch(struct bpf_map *map,
+int  generic_map_update_batch(struct bpf_map *map, struct file *map_file,
                              const union bpf_attr *attr,
                              union bpf_attr __user *uattr);
 int  generic_map_delete_batch(struct bpf_map *map,
 
                synchronize_rcu();
 }
 
-static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
-                               void *value, __u64 flags)
+static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
+                               void *key, void *value, __u64 flags)
 {
        int err;
 
                   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
                return sock_map_update_elem_sys(map, key, value, flags);
        } else if (IS_FD_PROG_ARRAY(map)) {
-               return bpf_fd_array_map_update_elem(map, f.file, key, value,
+               return bpf_fd_array_map_update_elem(map, map_file, key, value,
                                                    flags);
        }
 
                                                       flags);
        } else if (IS_FD_ARRAY(map)) {
                rcu_read_lock();
-               err = bpf_fd_array_map_update_elem(map, f.file, key, value,
+               err = bpf_fd_array_map_update_elem(map, map_file, key, value,
                                                   flags);
                rcu_read_unlock();
        } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
                rcu_read_lock();
-               err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
+               err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
                                                  flags);
                rcu_read_unlock();
        } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
                goto free_key;
        }
 
-       err = bpf_map_update_value(map, f, key, value, attr->flags);
+       err = bpf_map_update_value(map, f.file, key, value, attr->flags);
 
        kvfree(value);
 free_key:
        return err;
 }
 
-int generic_map_update_batch(struct bpf_map *map,
+int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
                             const union bpf_attr *attr,
                             union bpf_attr __user *uattr)
 {
        void __user *values = u64_to_user_ptr(attr->batch.values);
        void __user *keys = u64_to_user_ptr(attr->batch.keys);
        u32 value_size, cp, max_count;
-       int ufd = attr->batch.map_fd;
        void *key, *value;
-       struct fd f;
        int err = 0;
 
        if (attr->batch.elem_flags & ~BPF_F_LOCK)
                return -ENOMEM;
        }
 
-       f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
        for (cp = 0; cp < max_count; cp++) {
                err = -EFAULT;
                if (copy_from_user(key, keys + cp * map->key_size,
                    copy_from_user(value, values + cp * value_size, value_size))
                        break;
 
-               err = bpf_map_update_value(map, f, key, value,
+               err = bpf_map_update_value(map, map_file, key, value,
                                           attr->batch.elem_flags);
 
                if (err)
 
        kvfree(value);
        kvfree(key);
-       fdput(f);
        return err;
 }
 
 
 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
 
-#define BPF_DO_BATCH(fn)                       \
+#define BPF_DO_BATCH(fn, ...)                  \
        do {                                    \
                if (!fn) {                      \
                        err = -ENOTSUPP;        \
                        goto err_put;           \
                }                               \
-               err = fn(map, attr, uattr);     \
+               err = fn(__VA_ARGS__);          \
        } while (0)
 
 static int bpf_map_do_batch(const union bpf_attr *attr,
        }
 
        if (cmd == BPF_MAP_LOOKUP_BATCH)
-               BPF_DO_BATCH(map->ops->map_lookup_batch);
+               BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
        else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
-               BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
+               BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
        else if (cmd == BPF_MAP_UPDATE_BATCH)
-               BPF_DO_BATCH(map->ops->map_update_batch);
+               BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
        else
-               BPF_DO_BATCH(map->ops->map_delete_batch);
+               BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
 err_put:
        if (has_write)
                bpf_map_write_active_dec(map);