#include <linux/file.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
+#include <linux/idr.h>
 #include <linux/bpf_verifier.h>
 #include <linux/btf.h>
 
             i < btf_type_vlen(struct_type);                            \
             i++, member++)
 
+static DEFINE_IDR(btf_idr);
+static DEFINE_SPINLOCK(btf_idr_lock);
+
 struct btf {
        union {
                struct btf_header *hdr;
        u32 types_size;
        u32 data_size;
        refcount_t refcnt;
+       u32 id;
+       struct rcu_head rcu;
 };
 
 enum verifier_phase {
        return 0;
 }
 
+static int btf_alloc_id(struct btf *btf)
+{
+       int id;
+
+       idr_preload(GFP_KERNEL);
+       spin_lock_bh(&btf_idr_lock);
+       id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
+       if (id > 0)
+               btf->id = id;
+       spin_unlock_bh(&btf_idr_lock);
+       idr_preload_end();
+
+       if (WARN_ON_ONCE(!id))
+               return -ENOSPC;
+
+       return id > 0 ? 0 : id;
+}
+
+static void btf_free_id(struct btf *btf)
+{
+       unsigned long flags;
+
+       /*
+        * In map-in-map, calling map_delete_elem() on outer
+        * map will call bpf_map_put on the inner map.
+        * It will then eventually call btf_free_id()
+        * on the inner map.  Some of the map_delete_elem()
+        * implementation may have irq disabled, so
+        * we need to use the _irqsave() version instead
+        * of the _bh() version.
+        */
+       spin_lock_irqsave(&btf_idr_lock, flags);
+       idr_remove(&btf_idr, btf->id);
+       spin_unlock_irqrestore(&btf_idr_lock, flags);
+}
+
 static void btf_free(struct btf *btf)
 {
        kvfree(btf->types);
        kfree(btf);
 }
 
-static void btf_get(struct btf *btf)
+static void btf_free_rcu(struct rcu_head *rcu)
 {
-       refcount_inc(&btf->refcnt);
+       struct btf *btf = container_of(rcu, struct btf, rcu);
+
+       btf_free(btf);
 }
 
 void btf_put(struct btf *btf)
 {
-       if (btf && refcount_dec_and_test(&btf->refcnt))
-               btf_free(btf);
+       if (btf && refcount_dec_and_test(&btf->refcnt)) {
+               btf_free_id(btf);
+               call_rcu(&btf->rcu, btf_free_rcu);
+       }
 }
 
 static int env_resolve_init(struct btf_verifier_env *env)
        .release        = btf_release,
 };
 
+static int __btf_new_fd(struct btf *btf)
+{
+       return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
+}
+
 int btf_new_fd(const union bpf_attr *attr)
 {
        struct btf *btf;
-       int fd;
+       int ret;
 
        btf = btf_parse(u64_to_user_ptr(attr->btf),
                        attr->btf_size, attr->btf_log_level,
        if (IS_ERR(btf))
                return PTR_ERR(btf);
 
-       fd = anon_inode_getfd("btf", &btf_fops, btf,
-                             O_RDONLY | O_CLOEXEC);
-       if (fd < 0)
+       ret = btf_alloc_id(btf);
+       if (ret) {
+               btf_free(btf);
+               return ret;
+       }
+
+       /*
+        * The BTF ID is published to the userspace.
+        * All BTF free must go through call_rcu() from
+        * now on (i.e. free by calling btf_put()).
+        */
+
+       ret = __btf_new_fd(btf);
+       if (ret < 0)
                btf_put(btf);
 
-       return fd;
+       return ret;
 }
 
 struct btf *btf_get_by_fd(int fd)
        }
 
        btf = f.file->private_data;
-       btf_get(btf);
+       refcount_inc(&btf->refcnt);
        fdput(f);
 
        return btf;
 
        return 0;
 }
+
+int btf_get_fd_by_id(u32 id)
+{
+       struct btf *btf;
+       int fd;
+
+       rcu_read_lock();
+       btf = idr_find(&btf_idr, id);
+       if (!btf || !refcount_inc_not_zero(&btf->refcnt))
+               btf = ERR_PTR(-ENOENT);
+       rcu_read_unlock();
+
+       if (IS_ERR(btf))
+               return PTR_ERR(btf);
+
+       fd = __btf_new_fd(btf);
+       if (fd < 0)
+               btf_put(btf);
+
+       return fd;
+}
+
+u32 btf_id(const struct btf *btf)
+{
+       return btf->id;
+}
 
 
        bpf_map_uncharge_memlock(map);
        security_bpf_map_free(map);
-       btf_put(map->btf);
        /* implementation dependent freeing */
        map->ops->map_free(map);
 }
        if (atomic_dec_and_test(&map->refcnt)) {
                /* bpf_map_free_id() must be called first */
                bpf_map_free_id(map, do_idr_lock);
+               btf_put(map->btf);
                INIT_WORK(&map->work, bpf_map_free_deferred);
                schedule_work(&map->work);
        }
        info.map_flags = map->map_flags;
        memcpy(info.name, map->name, sizeof(map->name));
 
+       if (map->btf) {
+               info.btf_id = btf_id(map->btf);
+               info.btf_key_id = map->btf_key_id;
+               info.btf_value_id = map->btf_value_id;
+       }
+
        if (bpf_map_is_dev_bound(map)) {
                err = bpf_map_offload_info_fill(&info, map);
                if (err)
        return btf_new_fd(attr);
 }
 
+#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
+
+static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
+{
+       if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       return btf_get_fd_by_id(attr->btf_id);
+}
+
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 {
        union bpf_attr attr = {};
        case BPF_BTF_LOAD:
                err = bpf_btf_load(&attr);
                break;
+       case BPF_BTF_GET_FD_BY_ID:
+               err = bpf_btf_get_fd_by_id(&attr);
+               break;
        default:
                err = -EINVAL;
                break;