return ctx_type;
 }
 
+static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
+#define BPF_LINK_TYPE(_id, _name)
+#define BPF_MAP_TYPE(_id, _ops) \
+       [_id] = &_ops,
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+#undef BPF_LINK_TYPE
+#undef BPF_MAP_TYPE
+};
+
+static int btf_vmlinux_map_ids_init(const struct btf *btf,
+                                   struct bpf_verifier_log *log)
+{
+       const struct bpf_map_ops *ops;
+       int i, btf_id;
+
+       for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
+               ops = btf_vmlinux_map_ops[i];
+               if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
+                       continue;
+               if (!ops->map_btf_name || !ops->map_btf_id) {
+                       bpf_log(log, "map type %d is misconfigured\n", i);
+                       return -EINVAL;
+               }
+               btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
+                                              BTF_KIND_STRUCT);
+               if (btf_id < 0)
+                       return btf_id;
+               *ops->map_btf_id = btf_id;
+       }
+
+       return 0;
+}
+
 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
                                     struct btf *btf,
                                     const struct btf_type *t,
        /* btf_parse_vmlinux() runs under bpf_verifier_lock */
        bpf_ctx_convert.t = btf_type_by_id(btf, btf_id);
 
+       /* find bpf map structs for map_ptr access checking */
+       err = btf_vmlinux_map_ids_init(btf, log);
+       if (err < 0)
+               goto errout;
+
        bpf_struct_ops_init(btf, log);
 
        btf_verifier_env_free(env);
 
        __mark_reg_not_init(env, regs + regno);
 }
 
+static void mark_btf_ld_reg(struct bpf_verifier_env *env,
+                           struct bpf_reg_state *regs, u32 regno,
+                           enum bpf_reg_type reg_type, u32 btf_id)
+{
+       if (reg_type == SCALAR_VALUE) {
+               mark_reg_unknown(env, regs, regno);
+               return;
+       }
+       mark_reg_known_zero(env, regs, regno);
+       regs[regno].type = PTR_TO_BTF_ID;
+       regs[regno].btf_id = btf_id;
+}
+
 #define DEF_NOT_SUBREG (0)
 static void init_reg_state(struct bpf_verifier_env *env,
                           struct bpf_func_state *state)
        if (ret < 0)
                return ret;
 
-       if (atype == BPF_READ && value_regno >= 0) {
-               if (ret == SCALAR_VALUE) {
-                       mark_reg_unknown(env, regs, value_regno);
-                       return 0;
-               }
-               mark_reg_known_zero(env, regs, value_regno);
-               regs[value_regno].type = PTR_TO_BTF_ID;
-               regs[value_regno].btf_id = btf_id;
+       if (atype == BPF_READ && value_regno >= 0)
+               mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+
+       return 0;
+}
+
+static int check_ptr_to_map_access(struct bpf_verifier_env *env,
+                                  struct bpf_reg_state *regs,
+                                  int regno, int off, int size,
+                                  enum bpf_access_type atype,
+                                  int value_regno)
+{
+       struct bpf_reg_state *reg = regs + regno;
+       struct bpf_map *map = reg->map_ptr;
+       const struct btf_type *t;
+       const char *tname;
+       u32 btf_id;
+       int ret;
+
+       if (!btf_vmlinux) {
+               verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
+               return -ENOTSUPP;
+       }
+
+       if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
+               verbose(env, "map_ptr access not supported for map type %d\n",
+                       map->map_type);
+               return -ENOTSUPP;
+       }
+
+       t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
+       tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+
+       if (!env->allow_ptr_to_map_access) {
+               verbose(env,
+                       "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
+                       tname);
+               return -EPERM;
        }
 
+       if (off < 0) {
+               verbose(env, "R%d is %s invalid negative access: off=%d\n",
+                       regno, tname, off);
+               return -EACCES;
+       }
+
+       if (atype != BPF_READ) {
+               verbose(env, "only read from %s is supported\n", tname);
+               return -EACCES;
+       }
+
+       ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
+       if (ret < 0)
+               return ret;
+
+       if (value_regno >= 0)
+               mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+
        return 0;
 }
 
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
        } else if (reg->type == PTR_TO_BTF_ID) {
                err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
                                              value_regno);
+       } else if (reg->type == CONST_PTR_TO_MAP) {
+               err = check_ptr_to_map_access(env, regs, regno, off, size, t,
+                                             value_regno);
        } else {
                verbose(env, "R%d invalid mem access '%s'\n", regno,
                        reg_type_str[reg->type]);
                env->strict_alignment = false;
 
        env->allow_ptr_leaks = bpf_allow_ptr_leaks();
+       env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
        env->bypass_spec_v1 = bpf_bypass_spec_v1();
        env->bypass_spec_v4 = bpf_bypass_spec_v4();
        env->bpf_capable = bpf_capable();