void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
        int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
                                union bpf_attr __user *uattr);
+       int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+                               union bpf_attr __user *uattr);
+       int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
+                               union bpf_attr __user *uattr);
 
        /* funcs callable from userspace and from eBPF programs */
        void *(*map_lookup_elem)(struct bpf_map *map, void *key);
 int  generic_map_lookup_batch(struct bpf_map *map,
                              const union bpf_attr *attr,
                              union bpf_attr __user *uattr);
+int  generic_map_update_batch(struct bpf_map *map,
+                             const union bpf_attr *attr,
+                             union bpf_attr __user *uattr);
+int  generic_map_delete_batch(struct bpf_map *map,
+                             const union bpf_attr *attr,
+                             union bpf_attr __user *uattr);
 
 extern int sysctl_unprivileged_bpf_disabled;
 
 
        return err;
 }
 
+int generic_map_delete_batch(struct bpf_map *map,
+                            const union bpf_attr *attr,
+                            union bpf_attr __user *uattr)
+{
+       void __user *keys = u64_to_user_ptr(attr->batch.keys);
+       u32 cp, max_count;
+       int err = 0;
+       void *key;
+
+       if (attr->batch.elem_flags & ~BPF_F_LOCK)
+               return -EINVAL;
+
+       if ((attr->batch.elem_flags & BPF_F_LOCK) &&
+           !map_value_has_spin_lock(map)) {
+               return -EINVAL;
+       }
+
+       max_count = attr->batch.count;
+       if (!max_count)
+               return 0;
+
+       for (cp = 0; cp < max_count; cp++) {
+               key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
+               if (IS_ERR(key)) {
+                       err = PTR_ERR(key);
+                       break;
+               }
+
+               if (bpf_map_is_dev_bound(map)) {
+                       err = bpf_map_offload_delete_elem(map, key);
+                       break;
+               }
+
+               preempt_disable();
+               __this_cpu_inc(bpf_prog_active);
+               rcu_read_lock();
+               err = map->ops->map_delete_elem(map, key);
+               rcu_read_unlock();
+               __this_cpu_dec(bpf_prog_active);
+               preempt_enable();
+               maybe_wait_bpf_programs(map);
+               if (err)
+                       break;
+       }
+       if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
+               err = -EFAULT;
+       return err;
+}
+
+int generic_map_update_batch(struct bpf_map *map,
+                            const union bpf_attr *attr,
+                            union bpf_attr __user *uattr)
+{
+       void __user *values = u64_to_user_ptr(attr->batch.values);
+       void __user *keys = u64_to_user_ptr(attr->batch.keys);
+       u32 value_size, cp, max_count;
+       int ufd = attr->map_fd;
+       void *key, *value;
+       struct fd f;
+       int err = 0;
+
+       f = fdget(ufd);
+       if (attr->batch.elem_flags & ~BPF_F_LOCK)
+               return -EINVAL;
+
+       if ((attr->batch.elem_flags & BPF_F_LOCK) &&
+           !map_value_has_spin_lock(map)) {
+               return -EINVAL;
+       }
+
+       value_size = bpf_map_value_size(map);
+
+       max_count = attr->batch.count;
+       if (!max_count)
+               return 0;
+
+       value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
+       if (!value)
+               return -ENOMEM;
+
+       for (cp = 0; cp < max_count; cp++) {
+               key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
+               if (IS_ERR(key)) {
+                       err = PTR_ERR(key);
+                       break;
+               }
+               err = -EFAULT;
+               if (copy_from_user(value, values + cp * value_size, value_size))
+                       break;
+
+               err = bpf_map_update_value(map, f, key, value,
+                                          attr->batch.elem_flags);
+
+               if (err)
+                       break;
+       }
+
+       if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
+               err = -EFAULT;
+
+       kfree(value);
+       kfree(key);
+       return err;
+}
+
 #define MAP_LOOKUP_RETRIES 3
 
 int generic_map_lookup_batch(struct bpf_map *map,
 
        if (cmd == BPF_MAP_LOOKUP_BATCH)
                BPF_DO_BATCH(map->ops->map_lookup_batch);
+       else if (cmd == BPF_MAP_UPDATE_BATCH)
+               BPF_DO_BATCH(map->ops->map_update_batch);
+       else
+               BPF_DO_BATCH(map->ops->map_delete_batch);
 
 err_put:
        fdput(f);
        case BPF_MAP_LOOKUP_BATCH:
                err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
                break;
+       case BPF_MAP_UPDATE_BATCH:
+               err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
+               break;
+       case BPF_MAP_DELETE_BATCH:
+               err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
+               break;
        default:
                err = -EINVAL;
                break;