#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
-#include <linux/idr.h>
#include <linux/sort.h>
+#include <linux/xarray.h>
#include <linux/bpf_verifier.h>
#include <linux/btf.h>
i < btf_type_vlen(struct_type); \
i++, member++)
-static DEFINE_IDR(btf_idr);
-static DEFINE_SPINLOCK(btf_idr_lock);
+static DEFINE_XARRAY_FLAGS(btf_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
+static u32 btf_next_id;
struct btf {
void *data;
static int btf_alloc_id(struct btf *btf)
{
- int id;
-
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&btf_idr_lock);
- id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
- if (id > 0)
- btf->id = id;
- spin_unlock_bh(&btf_idr_lock);
- idr_preload_end();
-
- if (WARN_ON_ONCE(!id))
- return -ENOSPC;
-
- return id > 0 ? 0 : id;
+ return xa_alloc_cyclic_bh(&btf_xa, &btf->id, btf, xa_limit_31b,
+ &btf_next_id, GFP_KERNEL);
}
static void btf_free_id(struct btf *btf)
* we need to use the _irqsave() version instead
* of the _bh() version.
*/
- spin_lock_irqsave(&btf_idr_lock, flags);
- idr_remove(&btf_idr, btf->id);
- spin_unlock_irqrestore(&btf_idr_lock, flags);
+ xa_lock_irqsave(&btf_xa, flags);
+ __xa_erase(&btf_xa, btf->id);
+ xa_unlock_irqrestore(&btf_xa, flags);
}
static void btf_free(struct btf *btf)
return PTR_ERR(btf);
ret = btf_alloc_id(btf);
- if (ret) {
+ if (ret < 0) {
btf_free(btf);
return ret;
}
int fd;
rcu_read_lock();
- btf = idr_find(&btf_idr, id);
- if (!btf || !refcount_inc_not_zero(&btf->refcnt))
- btf = ERR_PTR(-ENOENT);
+ btf = xa_load(&btf_xa, id);
+ if (btf && !refcount_inc_not_zero(&btf->refcnt))
+ btf = NULL;
rcu_read_unlock();
- if (IS_ERR(btf))
- return PTR_ERR(btf);
+ if (!btf)
+ return -ENOENT;
fd = __btf_new_fd(btf);
if (fd < 0)