return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
 }
 
-static u32 btf_nr_types_total(const struct btf *btf)
+u32 btf_nr_types(const struct btf *btf)
 {
        u32 total = 0;
 
        const char *tname;
        u32 i, total;
 
-       total = btf_nr_types_total(btf);
+       total = btf_nr_types(btf);
        for (i = 1; i < total; i++) {
                t = btf_type_by_id(btf, i);
                if (BTF_INFO_KIND(t->info) != kind)
        return btf->kernel_btf;
 }
 
+bool btf_is_module(const struct btf *btf)
+{
+       return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
+}
+
 static int btf_id_cmp_func(const void *a, const void *b)
 {
        const int *pa = a, *pb = b;
 
 fs_initcall(btf_module_init);
 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
+
+struct module *btf_try_get_module(const struct btf *btf)
+{
+       struct module *res = NULL;
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+       struct btf_module *btf_mod, *tmp;
+
+       mutex_lock(&btf_module_mutex);
+       list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
+               if (btf_mod->btf != btf)
+                       continue;
+
+               if (try_module_get(btf_mod->module))
+                       res = btf_mod->module;
+
+               break;
+       }
+       mutex_unlock(&btf_module_mutex);
+#endif
+
+       return res;
+}
 
        return 0;
 }
 
+static int find_btf_percpu_datasec(struct btf *btf)
+{
+       const struct btf_type *t;
+       const char *tname;
+       int i, n;
+
+       /*
+        * Both vmlinux and module each have their own ".data..percpu"
+        * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
+        * types to look at only module's own BTF types.
+        */
+       n = btf_nr_types(btf);
+       if (btf_is_module(btf))
+               i = btf_nr_types(btf_vmlinux);
+       else
+               i = 1;
+
+       for(; i < n; i++) {
+               t = btf_type_by_id(btf, i);
+               if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
+                       continue;
+
+               tname = btf_name_by_offset(btf, t->name_off);
+               if (!strcmp(tname, ".data..percpu"))
+                       return i;
+       }
+
+       return -ENOENT;
+}
+
 /* replace pseudo btf_id with kernel symbol address */
 static int check_pseudo_btf_id(struct bpf_verifier_env *env,
                               struct bpf_insn *insn,
 {
        const struct btf_var_secinfo *vsi;
        const struct btf_type *datasec;
+       struct btf_mod_pair *btf_mod;
        const struct btf_type *t;
        const char *sym_name;
        bool percpu = false;
        u32 type, id = insn->imm;
+       struct btf *btf;
        s32 datasec_id;
        u64 addr;
-       int i;
-
-       if (!btf_vmlinux) {
-               verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
-               return -EINVAL;
-       }
+       int i, btf_fd, err;
 
-       if (insn[1].imm != 0) {
-               verbose(env, "reserved field (insn[1].imm) is used in pseudo_btf_id ldimm64 insn.\n");
-               return -EINVAL;
+       btf_fd = insn[1].imm;
+       if (btf_fd) {
+               btf = btf_get_by_fd(btf_fd);
+               if (IS_ERR(btf)) {
+                       verbose(env, "invalid module BTF object FD specified.\n");
+                       return -EINVAL;
+               }
+       } else {
+               if (!btf_vmlinux) {
+                       verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
+                       return -EINVAL;
+               }
+               btf = btf_vmlinux;
+               btf_get(btf);
        }
 
-       t = btf_type_by_id(btf_vmlinux, id);
+       t = btf_type_by_id(btf, id);
        if (!t) {
                verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
-               return -ENOENT;
+               err = -ENOENT;
+               goto err_put;
        }
 
        if (!btf_type_is_var(t)) {
-               verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n",
-                       id);
-               return -EINVAL;
+               verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
+               err = -EINVAL;
+               goto err_put;
        }
 
-       sym_name = btf_name_by_offset(btf_vmlinux, t->name_off);
+       sym_name = btf_name_by_offset(btf, t->name_off);
        addr = kallsyms_lookup_name(sym_name);
        if (!addr) {
                verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
                        sym_name);
-               return -ENOENT;
+               err = -ENOENT;
+               goto err_put;
        }
 
-       datasec_id = btf_find_by_name_kind(btf_vmlinux, ".data..percpu",
-                                          BTF_KIND_DATASEC);
+       datasec_id = find_btf_percpu_datasec(btf);
        if (datasec_id > 0) {
-               datasec = btf_type_by_id(btf_vmlinux, datasec_id);
+               datasec = btf_type_by_id(btf, datasec_id);
                for_each_vsi(i, datasec, vsi) {
                        if (vsi->type == id) {
                                percpu = true;
        insn[1].imm = addr >> 32;
 
        type = t->type;
-       t = btf_type_skip_modifiers(btf_vmlinux, type, NULL);
+       t = btf_type_skip_modifiers(btf, type, NULL);
        if (percpu) {
                aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID;
-               aux->btf_var.btf = btf_vmlinux;
+               aux->btf_var.btf = btf;
                aux->btf_var.btf_id = type;
        } else if (!btf_type_is_struct(t)) {
                const struct btf_type *ret;
                u32 tsize;
 
                /* resolve the type size of ksym. */
-               ret = btf_resolve_size(btf_vmlinux, t, &tsize);
+               ret = btf_resolve_size(btf, t, &tsize);
                if (IS_ERR(ret)) {
-                       tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+                       tname = btf_name_by_offset(btf, t->name_off);
                        verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
                                tname, PTR_ERR(ret));
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto err_put;
                }
                aux->btf_var.reg_type = PTR_TO_MEM;
                aux->btf_var.mem_size = tsize;
        } else {
                aux->btf_var.reg_type = PTR_TO_BTF_ID;
-               aux->btf_var.btf = btf_vmlinux;
+               aux->btf_var.btf = btf;
                aux->btf_var.btf_id = type;
        }
+
+       /* check whether we recorded this BTF (and maybe module) already */
+       for (i = 0; i < env->used_btf_cnt; i++) {
+               if (env->used_btfs[i].btf == btf) {
+                       btf_put(btf);
+                       return 0;
+               }
+       }
+
+       if (env->used_btf_cnt >= MAX_USED_BTFS) {
+               err = -E2BIG;
+               goto err_put;
+       }
+
+       btf_mod = &env->used_btfs[env->used_btf_cnt];
+       btf_mod->btf = btf;
+       btf_mod->module = NULL;
+
+       /* if we reference variables from kernel module, bump its refcount */
+       if (btf_is_module(btf)) {
+               btf_mod->module = btf_try_get_module(btf);
+               if (!btf_mod->module) {
+                       err = -ENXIO;
+                       goto err_put;
+               }
+       }
+
+       env->used_btf_cnt++;
+
        return 0;
+err_put:
+       btf_put(btf);
+       return err;
 }
 
 static int check_map_prealloc(struct bpf_map *map)
                             env->used_map_cnt);
 }
 
+/* drop refcnt of maps used by the rejected program */
+static void release_btfs(struct bpf_verifier_env *env)
+{
+       __bpf_free_used_btfs(env->prog->aux, env->used_btfs,
+                            env->used_btf_cnt);
+}
+
 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
 {
                goto err_release_maps;
        }
 
-       if (ret == 0 && env->used_map_cnt) {
+       if (ret)
+               goto err_release_maps;
+
+       if (env->used_map_cnt) {
                /* if program passed verifier, update used_maps in bpf_prog_info */
                env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
                                                          sizeof(env->used_maps[0]),
                memcpy(env->prog->aux->used_maps, env->used_maps,
                       sizeof(env->used_maps[0]) * env->used_map_cnt);
                env->prog->aux->used_map_cnt = env->used_map_cnt;
+       }
+       if (env->used_btf_cnt) {
+               /* if program passed verifier, update used_btfs in bpf_prog_aux */
+               env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
+                                                         sizeof(env->used_btfs[0]),
+                                                         GFP_KERNEL);
+               if (!env->prog->aux->used_btfs) {
+                       ret = -ENOMEM;
+                       goto err_release_maps;
+               }
 
+               memcpy(env->prog->aux->used_btfs, env->used_btfs,
+                      sizeof(env->used_btfs[0]) * env->used_btf_cnt);
+               env->prog->aux->used_btf_cnt = env->used_btf_cnt;
+       }
+       if (env->used_map_cnt || env->used_btf_cnt) {
                /* program is valid. Convert pseudo bpf_ld_imm64 into generic
                 * bpf_ld_imm64 instructions
                 */
                convert_pseudo_ld_imm64(env);
        }
 
-       if (ret == 0)
-               adjust_btf_func(env);
+       adjust_btf_func(env);
 
 err_release_maps:
        if (!env->prog->aux->used_maps)
                 * them now. Otherwise free_used_maps() will release them.
                 */
                release_maps(env);
+       if (!env->prog->aux->used_btfs)
+               release_btfs(env);
 
        /* extension progs temporarily inherit the attach_type of their targets
           for verification purposes, so set it back to zero before returning