char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
bool cgroup_debug __read_mostly;
-/*
- * Protects css_idr so that IDs can be released without
- * grabbing cgroup_mutex.
- */
-static DEFINE_SPINLOCK(cgroup_idr_lock);
-
/*
* Protects cgroup_file->kn for !self csses. It synchronizes notifications
* against file removal/re-creation across css hiding.
return cgrp->root == &cgrp_dfl_root;
}
-/* IDR wrappers which synchronize using cgroup_idr_lock */
-static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
- gfp_t gfp_mask)
-{
- int ret;
-
- idr_preload(gfp_mask);
- spin_lock_bh(&cgroup_idr_lock);
- ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
- spin_unlock_bh(&cgroup_idr_lock);
- idr_preload_end();
- return ret;
-}
-
-static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
-{
- void *ret;
-
- spin_lock_bh(&cgroup_idr_lock);
- ret = idr_replace(idr, ptr, id);
- spin_unlock_bh(&cgroup_idr_lock);
- return ret;
-}
-
-static void cgroup_idr_remove(struct idr *idr, int id)
-{
- spin_lock_bh(&cgroup_idr_lock);
- idr_remove(idr, id);
- spin_unlock_bh(&cgroup_idr_lock);
-}
-
static bool cgroup_has_tasks(struct cgroup *cgrp)
{
return cgrp->nr_populated_csets;
int id = css->id;
ss->css_free(css);
- cgroup_idr_remove(&ss->css_idr, id);
+ xa_erase_bh(&ss->css_xa, id);
cgroup_put(cgrp);
if (parent)
list_del_rcu(&css->rstat_css_node);
}
- cgroup_idr_replace(&ss->css_idr, NULL, css->id);
+ xa_store_bh(&ss->css_xa, css->id, NULL, 0);
if (ss->css_released)
ss->css_released(css);
} else {
if (err)
goto err_free_css;
- err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
+ err = xa_alloc_bh(&ss->css_xa, &css->id, NULL, xa_limit_31b,
+ GFP_KERNEL);
if (err < 0)
goto err_free_css;
- css->id = err;
+ BUG_ON(css->id < 2);
/* @css is ready to be brought online now, make it visible */
list_add_tail_rcu(&css->sibling, &parent_css->children);
- cgroup_idr_replace(&ss->css_idr, css, css->id);
+ xa_store_bh(&ss->css_xa, css->id, css, 0);
err = online_css(css);
if (err)
mutex_lock(&cgroup_mutex);
- idr_init(&ss->css_idr);
+ xa_init_flags(&ss->css_xa, XA_FLAGS_ALLOC1 | XA_FLAGS_LOCK_BH);
INIT_LIST_HEAD(&ss->cfts);
/* Create the root cgroup state for this subsystem */
* percpu_ref during early init. Disable refcnting.
*/
css->flags |= CSS_NO_REF;
+ css->id = 1;
- if (early) {
+ if (!early) {
/* allocation can't be done safely during early init */
- css->id = 1;
- } else {
- css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
- BUG_ON(css->id < 0);
+ void *old = xa_store_bh(&ss->css_xa, 1, css, GFP_KERNEL);
+ BUG_ON(old);
}
/* Update the init_css_set to contain a subsys
struct cgroup_subsys_state *css =
init_css_set.subsys[ss->id];
- css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
- GFP_KERNEL);
- BUG_ON(css->id < 0);
+ void *p = xa_store_bh(&ss->css_xa, 1, css, GFP_KERNEL);
+ BUG_ON(p != NULL);
} else {
cgroup_init_subsys(ss, false);
}
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- return idr_find(&ss->css_idr, id);
+ return xa_load(&ss->css_xa, id);
}
/**