#include <linux/filter.h>
#include <linux/version.h>
#include <linux/kernel.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/cred.h>
#include <linux/timekeeping.h>
#include <linux/ctype.h>
#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
DEFINE_PER_CPU(int, bpf_prog_active);
-static DEFINE_IDR(prog_idr);
-static DEFINE_SPINLOCK(prog_idr_lock);
-static DEFINE_IDR(map_idr);
-static DEFINE_SPINLOCK(map_idr_lock);
+static DEFINE_XARRAY_FLAGS(prog_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
+static u32 prog_xa_next;
+static DEFINE_XARRAY_FLAGS(map_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
+static u32 map_xa_next;
int sysctl_unprivileged_bpf_disabled __read_mostly;
static int bpf_map_alloc_id(struct bpf_map *map)
{
- int id;
-
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&map_idr_lock);
- id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
- if (id > 0)
- map->id = id;
- spin_unlock_bh(&map_idr_lock);
- idr_preload_end();
-
- if (WARN_ON_ONCE(!id))
- return -ENOSPC;
-
- return id > 0 ? 0 : id;
+ return xa_alloc_cyclic_bh(&map_xa, &map->id, map, xa_limit_31b,
+ &map_xa_next, GFP_KERNEL);
}
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_free_id(struct bpf_map *map, bool unlocked)
{
unsigned long flags;
- /* Offloaded maps are removed from the IDR store when their device
+ /* Offloaded maps are removed from the global store when their device
* disappears - even if someone holds an fd to them they are unusable,
* the memory is gone, all ops will fail; they are simply waiting for
* refcnt to drop to be freed.
if (!map->id)
return;
- if (do_idr_lock)
- spin_lock_irqsave(&map_idr_lock, flags);
+ if (unlocked)
+ xa_lock_irqsave(&map_xa, flags);
else
- __acquire(&map_idr_lock);
-
- idr_remove(&map_idr, map->id);
+ __acquire(&map_xa.xa_lock);
+ __xa_erase(&map_xa, map->id);
map->id = 0;
-
- if (do_idr_lock)
- spin_unlock_irqrestore(&map_idr_lock, flags);
+ if (unlocked)
+ xa_unlock_irqrestore(&map_xa, flags);
else
- __release(&map_idr_lock);
+ __release(&map_xa.xa_lock);
}
/* called from workqueue */
/* decrement map refcnt and schedule it for freeing via workqueue
* (unrelying map implementation ops->map_free() might sleep)
*/
-static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
+static void __bpf_map_put(struct bpf_map *map, bool unlocked)
{
if (atomic_dec_and_test(&map->refcnt)) {
/* bpf_map_free_id() must be called first */
- bpf_map_free_id(map, do_idr_lock);
+ bpf_map_free_id(map, unlocked);
btf_put(map->btf);
INIT_WORK(&map->work, bpf_map_free_deferred);
schedule_work(&map->work);
goto free_map;
err = bpf_map_alloc_id(map);
- if (err)
+ if (err < 0)
goto free_map_sec;
err = bpf_map_new_fd(map, f_flags);
return map;
}
-/* map_idr_lock should have been held */
+/* map lock should be held */
static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
bool uref)
{
static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
- int id;
-
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&prog_idr_lock);
- id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
- if (id > 0)
- prog->aux->id = id;
- spin_unlock_bh(&prog_idr_lock);
- idr_preload_end();
-
- /* id is in [1, INT_MAX) */
- if (WARN_ON_ONCE(!id))
- return -ENOSPC;
-
- return id > 0 ? 0 : id;
+ return xa_alloc_cyclic_bh(&prog_xa, &prog->aux->id, prog, xa_limit_31b,
+ &prog_xa_next, GFP_KERNEL);
}
-void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+void bpf_prog_free_id(struct bpf_prog *prog, bool unlocked)
{
- /* cBPF to eBPF migrations are currently not in the idr store.
+ /* cBPF to eBPF migrations are currently not in the global store.
* Offloaded programs are removed from the store when their device
* disappears - even if someone grabs an fd to them they are unusable,
* simply waiting for refcnt to drop to be freed.
if (!prog->aux->id)
return;
- if (do_idr_lock)
- spin_lock_bh(&prog_idr_lock);
+ if (unlocked)
+ xa_lock_bh(&prog_xa);
else
- __acquire(&prog_idr_lock);
+ __acquire(&prog_xa.xa_lock);
- idr_remove(&prog_idr, prog->aux->id);
+ __xa_erase(&prog_xa, prog->aux->id);
prog->aux->id = 0;
- if (do_idr_lock)
- spin_unlock_bh(&prog_idr_lock);
+ if (unlocked)
+ xa_unlock_bh(&prog_xa);
else
- __release(&prog_idr_lock);
+ __release(&prog_xa.xa_lock);
}
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
bpf_prog_free(aux->prog);
}
-static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
+static void __bpf_prog_put(struct bpf_prog *prog, bool unlocked)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
/* bpf_prog_free_id() must be called first */
- bpf_prog_free_id(prog, do_idr_lock);
+ bpf_prog_free_id(prog, unlocked);
bpf_prog_kallsyms_del_all(prog);
btf_put(prog->aux->btf);
kvfree(prog->aux->func_info);
}
EXPORT_SYMBOL_GPL(bpf_prog_inc);
-/* prog_idr_lock should have been held */
+/* prog lock should be held */
struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
int refold;
goto free_used_maps;
err = bpf_prog_alloc_id(prog);
- if (err)
+ if (err < 0)
goto free_used_maps;
err = bpf_prog_new_fd(prog);
static int bpf_obj_get_next_id(const union bpf_attr *attr,
union bpf_attr __user *uattr,
- struct idr *idr,
- spinlock_t *lock)
+ struct xarray *xa)
{
- u32 next_id = attr->start_id;
+ unsigned long next_id = attr->start_id;
int err = 0;
if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- next_id++;
- spin_lock_bh(lock);
- if (!idr_get_next(idr, &next_id))
+ if (!xa_find_after(xa, &next_id, INT_MAX, XA_PRESENT))
err = -ENOENT;
- spin_unlock_bh(lock);
if (!err)
err = put_user(next_id, &uattr->next_id);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- spin_lock_bh(&prog_idr_lock);
- prog = idr_find(&prog_idr, id);
+ xa_lock_bh(&prog_xa);
+ prog = xa_load(&prog_xa, id);
if (prog)
prog = bpf_prog_inc_not_zero(prog);
else
prog = ERR_PTR(-ENOENT);
- spin_unlock_bh(&prog_idr_lock);
+ xa_unlock_bh(&prog_xa);
if (IS_ERR(prog))
return PTR_ERR(prog);
if (f_flags < 0)
return f_flags;
- spin_lock_bh(&map_idr_lock);
- map = idr_find(&map_idr, id);
+ xa_lock_bh(&map_xa);
+ map = xa_load(&map_xa, id);
if (map)
map = bpf_map_inc_not_zero(map, true);
else
map = ERR_PTR(-ENOENT);
- spin_unlock_bh(&map_idr_lock);
+ xa_unlock_bh(&map_xa);
if (IS_ERR(map))
return PTR_ERR(map);
err = bpf_prog_test_run(&attr, uattr);
break;
case BPF_PROG_GET_NEXT_ID:
- err = bpf_obj_get_next_id(&attr, uattr,
- &prog_idr, &prog_idr_lock);
+ err = bpf_obj_get_next_id(&attr, uattr, &prog_xa);
break;
case BPF_MAP_GET_NEXT_ID:
- err = bpf_obj_get_next_id(&attr, uattr,
- &map_idr, &map_idr_lock);
+ err = bpf_obj_get_next_id(&attr, uattr, &map_xa);
break;
case BPF_PROG_GET_FD_BY_ID:
err = bpf_prog_get_fd_by_id(&attr);