]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
memcg: replace memcg ID idr with xarray
authorShakeel Butt <shakeel.butt@linux.dev>
Fri, 9 Aug 2024 17:26:18 +0000 (10:26 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 17 Aug 2024 00:53:05 +0000 (17:53 -0700)
At the moment memcg IDs are managed through IDR which requires external
synchronization mechanisms and makes the allocation code a bit awkward.
Let's switch to xarray and make the code simpler.

Link: https://lkml.kernel.org/r/20240809172618.2946790-1-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Dan Carpenter <dan.carpenter@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index 8971d3473a7bfc4e100d50088f1395616a935c58..d4622e600e1535a58c8c5ec249f19f135c7b2c04 100644 (file)
@@ -3363,29 +3363,12 @@ static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
  */
 
 #define MEM_CGROUP_ID_MAX      ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
-static DEFINE_IDR(mem_cgroup_idr);
-static DEFINE_SPINLOCK(memcg_idr_lock);
-
-static int mem_cgroup_alloc_id(void)
-{
-       int ret;
-
-       idr_preload(GFP_KERNEL);
-       spin_lock(&memcg_idr_lock);
-       ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
-                       GFP_NOWAIT);
-       spin_unlock(&memcg_idr_lock);
-       idr_preload_end();
-       return ret;
-}
+static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
 
 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
 {
        if (memcg->id.id > 0) {
-               spin_lock(&memcg_idr_lock);
-               idr_remove(&mem_cgroup_idr, memcg->id.id);
-               spin_unlock(&memcg_idr_lock);
-
+               xa_erase(&mem_cgroup_ids, memcg->id.id);
                memcg->id.id = 0;
        }
 }
@@ -3420,7 +3403,7 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
 {
        WARN_ON_ONCE(!rcu_read_lock_held());
-       return idr_find(&mem_cgroup_idr, id);
+       return xa_load(&mem_cgroup_ids, id);
 }
 
 #ifdef CONFIG_SHRINKER_DEBUG
@@ -3519,11 +3502,10 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
        if (!memcg)
                return ERR_PTR(error);
 
-       memcg->id.id = mem_cgroup_alloc_id();
-       if (memcg->id.id < 0) {
-               error = memcg->id.id;
+       error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
+                        XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
+       if (error)
                goto fail;
-       }
 
        memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
                                 GFP_KERNEL_ACCOUNT);
@@ -3664,9 +3646,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
         * publish it here at the end of onlining. This matches the
         * regular ID destruction during offlining.
         */
-       spin_lock(&memcg_idr_lock);
-       idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
-       spin_unlock(&memcg_idr_lock);
+       xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
 
        return 0;
 offline_kmem: