const struct bpf_iter_seq_info *iter_seq_info;
 };
 
-struct bpf_map_memory {
-       u32 pages;
-       struct user_struct *user;
-};
-
 struct bpf_map {
        /* The first two cachelines with read-mostly members of which some
         * are also accessed in fast-path (e.g. ops, max_entries).
        u32 btf_key_type_id;
        u32 btf_value_type_id;
        struct btf *btf;
-       struct bpf_map_memory memory;
 #ifdef CONFIG_MEMCG_KMEM
        struct mem_cgroup *memcg;
 #endif
 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
-int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
-void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
-int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
-void bpf_map_charge_finish(struct bpf_map_memory *mem);
-void bpf_map_charge_move(struct bpf_map_memory *dst,
-                        struct bpf_map_memory *src);
 void *bpf_map_area_alloc(u64 size, int numa_node);
 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
 void bpf_map_area_free(void *base);
 
        return map;
 }
 
-static u32 bpf_map_value_size(struct bpf_map *map)
+static u32 bpf_map_value_size(const struct bpf_map *map)
 {
        if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
            map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
        map->numa_node = bpf_map_attr_numa_node(attr);
 }
 
-static int bpf_charge_memlock(struct user_struct *user, u32 pages)
-{
-       unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
-       if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
-               atomic_long_sub(pages, &user->locked_vm);
-               return -EPERM;
-       }
-       return 0;
-}
-
-static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
-{
-       if (user)
-               atomic_long_sub(pages, &user->locked_vm);
-}
-
-int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
-{
-       u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
-       struct user_struct *user;
-       int ret;
-
-       if (size >= U32_MAX - PAGE_SIZE)
-               return -E2BIG;
-
-       user = get_current_user();
-       ret = bpf_charge_memlock(user, pages);
-       if (ret) {
-               free_uid(user);
-               return ret;
-       }
-
-       mem->pages = pages;
-       mem->user = user;
-
-       return 0;
-}
-
-void bpf_map_charge_finish(struct bpf_map_memory *mem)
-{
-       bpf_uncharge_memlock(mem->user, mem->pages);
-       free_uid(mem->user);
-}
-
-void bpf_map_charge_move(struct bpf_map_memory *dst,
-                        struct bpf_map_memory *src)
-{
-       *dst = *src;
-
-       /* Make sure src will not be used for the redundant uncharging. */
-       memset(src, 0, sizeof(struct bpf_map_memory));
-}
-
-int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
-{
-       int ret;
-
-       ret = bpf_charge_memlock(map->memory.user, pages);
-       if (ret)
-               return ret;
-       map->memory.pages += pages;
-       return ret;
-}
-
-void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
-{
-       bpf_uncharge_memlock(map->memory.user, pages);
-       map->memory.pages -= pages;
-}
-
 static int bpf_map_alloc_id(struct bpf_map *map)
 {
        int id;
 static void bpf_map_free_deferred(struct work_struct *work)
 {
        struct bpf_map *map = container_of(work, struct bpf_map, work);
-       struct bpf_map_memory mem;
 
-       bpf_map_charge_move(&mem, &map->memory);
        security_bpf_map_free(map);
        bpf_map_release_memcg(map);
        /* implementation dependent freeing */
        map->ops->map_free(map);
-       bpf_map_charge_finish(&mem);
 }
 
 static void bpf_map_put_uref(struct bpf_map *map)
 }
 
 #ifdef CONFIG_PROC_FS
+/* Provides an approximation of the map's memory footprint.
+ * Used only to provide a backward compatibility and display
+ * a reasonable "memlock" info.
+ */
+static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
+{
+       unsigned long size;
+
+       size = round_up(map->key_size + bpf_map_value_size(map), 8);
+
+       return round_up(map->max_entries * size, PAGE_SIZE);
+}
+
 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
 {
        const struct bpf_map *map = filp->private_data;
                   "value_size:\t%u\n"
                   "max_entries:\t%u\n"
                   "map_flags:\t%#x\n"
-                  "memlock:\t%llu\n"
+                  "memlock:\t%lu\n"
                   "map_id:\t%u\n"
                   "frozen:\t%u\n",
                   map->map_type,
                   map->value_size,
                   map->max_entries,
                   map->map_flags,
-                  map->memory.pages * 1ULL << PAGE_SHIFT,
+                  bpf_map_memory_footprint(map),
                   map->id,
                   READ_ONCE(map->frozen));
        if (type) {
 static int map_create(union bpf_attr *attr)
 {
        int numa_node = bpf_map_attr_numa_node(attr);
-       struct bpf_map_memory mem;
        struct bpf_map *map;
        int f_flags;
        int err;
        security_bpf_map_free(map);
 free_map:
        btf_put(map->btf);
-       bpf_map_charge_move(&mem, &map->memory);
        map->ops->map_free(map);
-       bpf_map_charge_finish(&mem);
        return err;
 }