};
/*
- * Private memory cgroup IDR
+ * Private memory cgroup XArray
*
* Swap-out records and page cache shadow entries need to store memcg
* references in constrained space, so we maintain an ID space that is
* those references are manageable from userspace.
*/
-static DEFINE_IDR(mem_cgroup_idr);
+static DEFINE_XARRAY_ALLOC1(mem_cgroups);
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
if (memcg->id.id > 0) {
- idr_remove(&mem_cgroup_idr, memcg->id.id);
+ xa_erase(&mem_cgroups, memcg->id.id);
memcg->id.id = 0;
}
}
*/
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return idr_find(&mem_cgroup_idr, id);
+ return xa_load(&mem_cgroups, id);
}
static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
if (!memcg)
return NULL;
- memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
- 1, MEM_CGROUP_ID_MAX,
- GFP_KERNEL);
- if (memcg->id.id < 0)
+ if (xa_alloc(&mem_cgroups, &memcg->id.id, NULL,
+ XA_LIMIT(0, MEM_CGROUP_ID_MAX - 1), GFP_KERNEL))
goto fail;
memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&memcg->cgwb_list);
#endif
- idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
+ xa_store(&mem_cgroups, memcg->id.id, memcg, GFP_KERNEL);
return memcg;
fail:
mem_cgroup_id_remove(memcg);