#include <linux/percpu-rwsem.h>
#include <linux/string.h>
#include <linux/hashtable.h>
-#include <linux/idr.h>
+#include <linux/xarray.h>
#include <linux/kthread.h>
#include <linux/atomic.h>
#include <linux/cpuset.h>
LIST_HEAD(cgroup_roots);
static int cgroup_root_count;
-/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
-static DEFINE_IDR(cgroup_hierarchy_idr);
+/* hierarchy ID allocation and mapping */
+static DEFINE_XARRAY_ALLOC(cgroup_hierarchy);
+static u32 cgroup_hierarchy_next;
/*
* Assign a monotonically increasing serial number to csses. It guarantees
static int cgroup_init_root_id(struct cgroup_root *root)
{
- int id;
-
- lockdep_assert_held(&cgroup_mutex);
-
- id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
- if (id < 0)
- return id;
-
- root->hierarchy_id = id;
- return 0;
+ return xa_alloc_cyclic(&cgroup_hierarchy, &root->hierarchy_id,
+ root, xa_limit_32b, &cgroup_hierarchy_next, GFP_KERNEL);
}
static void cgroup_exit_root_id(struct cgroup_root *root)
{
- lockdep_assert_held(&cgroup_mutex);
-
- idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
+ xa_erase(&cgroup_hierarchy, root->hierarchy_id);
}
void cgroup_free_root(struct cgroup_root *root)
goto cancel_ref;
ret = cgroup_init_root_id(root);
- if (ret)
+ if (ret < 0)
goto cancel_ref;
kf_sops = root == &cgrp_dfl_root ?
{
struct cgroup_root *root;
struct cgroup *cgrp;
- int hierarchy_id = 1;
+ unsigned long hierarchy_id = 1;
int ret;
- mutex_lock(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
- root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
+ xa_lock(&cgroup_hierarchy);
+ root = xa_find(&cgroup_hierarchy, &hierarchy_id, ULONG_MAX, XA_PRESENT);
if (root) {
cgrp = task_cgroup_from_root(task, root);
ret = strlcpy(buf, "/", buflen);
}
+ xa_unlock(&cgroup_hierarchy);
spin_unlock_irq(&css_set_lock);
- mutex_unlock(&cgroup_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(task_cgroup_path);