void *security;
 #endif
        struct bpf_prog_offload *offload;
+       struct btf *btf;
+       u32 type_id; /* type id for this prog/func */
        union {
                struct work_struct work;
                struct rcu_head rcu;
 }
 
 /* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
+             union bpf_attr __user *uattr);
 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
 
 /* Map specifics */
 
 struct bpf_subprog_info {
        u32 start; /* insn idx of function entry point */
        u16 stack_depth; /* max. stack depth used by this function */
+       u32 type_id; /* btf type_id for this subprog */
 };
 
 /* single container for all structs
 
                       struct seq_file *m);
 int btf_get_fd_by_id(u32 id);
 u32 btf_id(const struct btf *btf);
+const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+const char *btf_name_by_offset(const struct btf *btf, u32 offset);
 
 #endif
 
                 * (context accesses, allowed helpers, etc).
                 */
                __u32           expected_attach_type;
+               __u32           prog_btf_fd;    /* fd pointing to BTF type data */
+               __u32           func_info_rec_size;     /* userspace bpf_func_info size */
+               __aligned_u64   func_info;      /* func info */
+               __u32           func_info_cnt;  /* number of bpf_func_info records */
        };
 
        struct { /* anonymous struct used by BPF_OBJ_* commands */
        __u32 nr_jited_func_lens;
        __aligned_u64 jited_ksyms;
        __aligned_u64 jited_func_lens;
+       __u32 btf_id;
+       __u32 func_info_rec_size;
+       __aligned_u64 func_info;
+       __u32 func_info_cnt;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
        };
 };
 
+struct bpf_func_info {
+       __u32   insn_offset;
+       __u32   type_id;
+};
+
 #endif /* _UAPI__LINUX_BPF_H__ */
 
        return !*src;
 }
 
-static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
+const char *btf_name_by_offset(const struct btf *btf, u32 offset)
 {
        if (!offset)
                return "(anon)";
                return "(invalid-name-offset)";
 }
 
-static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
+const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
 {
        if (type_id > btf->nr_types)
                return NULL;
 
  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  */
 
+#include <uapi/linux/btf.h>
 #include <linux/filter.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
 #include <linux/random.h>
 #include <linux/moduleloader.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/frame.h>
 #include <linux/rbtree_latch.h>
 #include <linux/kallsyms.h>
 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 {
        const char *end = sym + KSYM_NAME_LEN;
+       const struct btf_type *type;
+       const char *func_name;
 
        BUILD_BUG_ON(sizeof("bpf_prog_") +
                     sizeof(prog->tag) * 2 +
 
        sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
        sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
+
+       /* prog->aux->name will be ignored if full btf name is available */
+       if (prog->aux->btf) {
+               type = btf_type_by_id(prog->aux->btf, prog->aux->type_id);
+               func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
+               snprintf(sym, (size_t)(end - sym), "_%s", func_name);
+               return;
+       }
+
        if (prog->aux->name[0])
                snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
        else
 
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
                bpf_prog_kallsyms_del_all(prog);
+               btf_put(prog->aux->btf);
 
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
 }
 
 /* last field in 'union bpf_attr' used by this command */
-#define        BPF_PROG_LOAD_LAST_FIELD expected_attach_type
+#define        BPF_PROG_LOAD_LAST_FIELD func_info_cnt
 
-static int bpf_prog_load(union bpf_attr *attr)
+static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
 {
        enum bpf_prog_type type = attr->prog_type;
        struct bpf_prog *prog;
                goto free_prog;
 
        /* run eBPF verifier */
-       err = bpf_check(&prog, attr);
+       err = bpf_check(&prog, attr, uattr);
        if (err < 0)
                goto free_used_maps;
 
                info.xlated_prog_len = 0;
                info.nr_jited_ksyms = 0;
                info.nr_jited_func_lens = 0;
+               info.func_info_cnt = 0;
                goto done;
        }
 
                }
        }
 
+       if (prog->aux->btf) {
+               u32 ucnt, urec_size;
+
+               info.btf_id = btf_id(prog->aux->btf);
+
+               ucnt = info.func_info_cnt;
+               info.func_info_cnt = prog->aux->func_cnt ? : 1;
+               urec_size = info.func_info_rec_size;
+               info.func_info_rec_size = sizeof(struct bpf_func_info);
+               if (ucnt) {
+                       /* expect passed-in urec_size is what the kernel expects */
+                       if (urec_size != info.func_info_rec_size)
+                               return -EINVAL;
+
+                       if (bpf_dump_raw_ok()) {
+                               struct bpf_func_info kern_finfo;
+                               char __user *user_finfo;
+                               u32 i, insn_offset;
+
+                               user_finfo = u64_to_user_ptr(info.func_info);
+                               if (prog->aux->func_cnt) {
+                                       ucnt = min_t(u32, info.func_info_cnt, ucnt);
+                                       insn_offset = 0;
+                                       for (i = 0; i < ucnt; i++) {
+                                               kern_finfo.insn_offset = insn_offset;
+                                               kern_finfo.type_id = prog->aux->func[i]->aux->type_id;
+                                               if (copy_to_user(user_finfo, &kern_finfo,
+                                                                sizeof(kern_finfo)))
+                                                       return -EFAULT;
+
+                                               /* func[i]->len holds the prog len */
+                                               insn_offset += prog->aux->func[i]->len;
+                                               user_finfo += urec_size;
+                                       }
+                               } else {
+                                       kern_finfo.insn_offset = 0;
+                                       kern_finfo.type_id = prog->aux->type_id;
+                                       if (copy_to_user(user_finfo, &kern_finfo,
+                                                        sizeof(kern_finfo)))
+                                               return -EFAULT;
+                               }
+                       } else {
+                               info.func_info_cnt = 0;
+                       }
+               }
+       } else {
+               info.func_info_cnt = 0;
+       }
+
 done:
        if (copy_to_user(uinfo, &info, info_len) ||
            put_user(info_len, &uattr->info.info_len))
                err = map_get_next_key(&attr);
                break;
        case BPF_PROG_LOAD:
-               err = bpf_prog_load(&attr);
+               err = bpf_prog_load(&attr, uattr);
                break;
        case BPF_OBJ_PIN:
                err = bpf_obj_pin(&attr);
 
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  * General Public License for more details.
  */
+#include <uapi/linux/btf.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/filter.h>
 #include <net/netlink.h>
        return ret;
 }
 
+/* The minimum supported BTF func info size */
+#define MIN_BPF_FUNCINFO_SIZE  8
+#define MAX_FUNCINFO_REC_SIZE  252
+
+static int check_btf_func(struct bpf_prog *prog, struct bpf_verifier_env *env,
+                         union bpf_attr *attr, union bpf_attr __user *uattr)
+{
+       u32 i, nfuncs, urec_size, min_size, prev_offset;
+       u32 krec_size = sizeof(struct bpf_func_info);
+       struct bpf_func_info krecord = {};
+       const struct btf_type *type;
+       void __user *urecord;
+       struct btf *btf;
+       int ret = 0;
+
+       nfuncs = attr->func_info_cnt;
+       if (!nfuncs)
+               return 0;
+
+       if (nfuncs != env->subprog_cnt) {
+               verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
+               return -EINVAL;
+       }
+
+       urec_size = attr->func_info_rec_size;
+       if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
+           urec_size > MAX_FUNCINFO_REC_SIZE ||
+           urec_size % sizeof(u32)) {
+               verbose(env, "invalid func info rec size %u\n", urec_size);
+               return -EINVAL;
+       }
+
+       btf = btf_get_by_fd(attr->prog_btf_fd);
+       if (IS_ERR(btf)) {
+               verbose(env, "unable to get btf from fd\n");
+               return PTR_ERR(btf);
+       }
+
+       urecord = u64_to_user_ptr(attr->func_info);
+       min_size = min_t(u32, krec_size, urec_size);
+
+       for (i = 0; i < nfuncs; i++) {
+               ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
+               if (ret) {
+                       if (ret == -E2BIG) {
+                               verbose(env, "nonzero tailing record in func info");
+                               /* set the size kernel expects so loader can zero
+                                * out the rest of the record.
+                                */
+                               if (put_user(min_size, &uattr->func_info_rec_size))
+                                       ret = -EFAULT;
+                       }
+                       goto free_btf;
+               }
+
+               if (copy_from_user(&krecord, urecord, min_size)) {
+                       ret = -EFAULT;
+                       goto free_btf;
+               }
+
+               /* check insn_offset */
+               if (i == 0) {
+                       if (krecord.insn_offset) {
+                               verbose(env,
+                                       "nonzero insn_offset %u for the first func info record",
+                                       krecord.insn_offset);
+                               ret = -EINVAL;
+                               goto free_btf;
+                       }
+               } else if (krecord.insn_offset <= prev_offset) {
+                       verbose(env,
+                               "same or smaller insn offset (%u) than previous func info record (%u)",
+                               krecord.insn_offset, prev_offset);
+                       ret = -EINVAL;
+                       goto free_btf;
+               }
+
+               if (env->subprog_info[i].start != krecord.insn_offset) {
+                       verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
+                       ret = -EINVAL;
+                       goto free_btf;
+               }
+
+               /* check type_id */
+               type = btf_type_by_id(btf, krecord.type_id);
+               if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
+                       verbose(env, "invalid type id %d in func info",
+                               krecord.type_id);
+                       ret = -EINVAL;
+                       goto free_btf;
+               }
+
+               if (i == 0)
+                       prog->aux->type_id = krecord.type_id;
+               env->subprog_info[i].type_id = krecord.type_id;
+
+               prev_offset = krecord.insn_offset;
+               urecord += urec_size;
+       }
+
+       prog->aux->btf = btf;
+       return 0;
+
+free_btf:
+       btf_put(btf);
+       return ret;
+}
+
 /* check %cur's range satisfies %old's */
 static bool range_within(struct bpf_reg_state *old,
                         struct bpf_reg_state *cur)
                func[i]->aux->name[0] = 'F';
                func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
                func[i]->jit_requested = 1;
+               /* the btf will be freed only at prog->aux */
+               func[i]->aux->btf = prog->aux->btf;
+               func[i]->aux->type_id = env->subprog_info[i].type_id;
                func[i] = bpf_int_jit_compile(func[i]);
                if (!func[i]->jited) {
                        err = -ENOTSUPP;
        kfree(env->explored_states);
 }
 
-int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
+int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
+             union bpf_attr __user *uattr)
 {
        struct bpf_verifier_env *env;
        struct bpf_verifier_log *log;
        if (ret < 0)
                goto skip_full_check;
 
+       ret = check_btf_func(env->prog, env, attr, uattr);
+       if (ret < 0)
+               goto skip_full_check;
+
        ret = do_check(env);
        if (env->cur_state) {
                free_verifier_state(env->cur_state, true);