void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
 void bpf_prog_put(struct bpf_prog *prog);
 void bpf_prog_put_rcu(struct bpf_prog *prog);
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
 
 {
        switch (type) {
        case BPF_TYPE_PROG:
-               atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
+               raw = bpf_prog_inc(raw);
                break;
        case BPF_TYPE_MAP:
-               bpf_map_inc(raw, true);
+               raw = bpf_map_inc(raw, true);
                break;
        default:
                WARN_ON_ONCE(1);
                goto out;
 
        raw = bpf_any_get(inode->i_private, *type);
-       touch_atime(&path);
+       if (!IS_ERR(raw))
+               touch_atime(&path);
 
        path_put(&path);
        return raw;
 
        return f.file->private_data;
 }
 
-void bpf_map_inc(struct bpf_map *map, bool uref)
+/* prog's and map's refcnt limit */
+#define BPF_MAX_REFCNT 32768
+
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
 {
-       atomic_inc(&map->refcnt);
+       if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
+               atomic_dec(&map->refcnt);
+               return ERR_PTR(-EBUSY);
+       }
        if (uref)
                atomic_inc(&map->usercnt);
+       return map;
 }
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
        if (IS_ERR(map))
                return map;
 
-       bpf_map_inc(map, true);
+       map = bpf_map_inc(map, true);
        fdput(f);
 
        return map;
        return f.file->private_data;
 }
 
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+       if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
+               atomic_dec(&prog->aux->refcnt);
+               return ERR_PTR(-EBUSY);
+       }
+       return prog;
+}
+
 /* called by sockets/tracing/seccomp before attaching program to an event
  * pairs with bpf_prog_put()
  */
        if (IS_ERR(prog))
                return prog;
 
-       atomic_inc(&prog->aux->refcnt);
+       prog = bpf_prog_inc(prog);
        fdput(f);
 
        return prog;
 
                                return -E2BIG;
                        }
 
-                       /* remember this map */
-                       env->used_maps[env->used_map_cnt++] = map;
-
                        /* hold the map. If the program is rejected by verifier,
                         * the map will be released by release_maps() or it
                         * will be used by the valid program until it's unloaded
                         * and all maps are released in free_bpf_prog_info()
                         */
-                       bpf_map_inc(map, false);
+                       map = bpf_map_inc(map, false);
+                       if (IS_ERR(map)) {
+                               fdput(f);
+                               return PTR_ERR(map);
+                       }
+                       env->used_maps[env->used_map_cnt++] = map;
+
                        fdput(f);
 next_insn:
                        insn++;