]> www.infradead.org Git - users/hch/configfs.git/commitdiff
mm: list_lru: fix UAF for memory cgroup
authorMuchun Song <songmuchun@bytedance.com>
Thu, 18 Jul 2024 08:36:07 +0000 (16:36 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 8 Aug 2024 01:33:56 +0000 (18:33 -0700)
The mem_cgroup_from_slab_obj() is supposed to be called under rcu lock or
cgroup_mutex or others which could prevent returned memcg from being
freed.  Fix it by adding missing rcu read lock.

Found by code inspection.

[songmuchun@bytedance.com: only grab rcu lock when necessary, per Vlastimil]
Link: https://lkml.kernel.org/r/20240801024603.1865-1-songmuchun@bytedance.com
Link: https://lkml.kernel.org/r/20240718083607.42068-1-songmuchun@bytedance.com
Fixes: 0a97c01cd20b ("list_lru: allow explicit memcg and NUMA node selection")
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/list_lru.c

index a29d96929d7c7e17138995319ca511e231d1b582..9b7ff06e9d326bb0da9f486da6c05a56c77d786c 100644 (file)
@@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
 }
 #endif /* CONFIG_MEMCG */
 
+/* The caller must ensure the memcg lifetime. */
 bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
                    struct mem_cgroup *memcg)
 {
@@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
 
 bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
 {
+       bool ret;
        int nid = page_to_nid(virt_to_page(item));
-       struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
-               mem_cgroup_from_slab_obj(item) : NULL;
 
-       return list_lru_add(lru, item, nid, memcg);
+       if (list_lru_memcg_aware(lru)) {
+               rcu_read_lock();
+               ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
+               rcu_read_unlock();
+       } else {
+               ret = list_lru_add(lru, item, nid, NULL);
+       }
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(list_lru_add_obj);
 
+/* The caller must ensure the memcg lifetime. */
 bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
                    struct mem_cgroup *memcg)
 {
@@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
 
 bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
 {
+       bool ret;
        int nid = page_to_nid(virt_to_page(item));
-       struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
-               mem_cgroup_from_slab_obj(item) : NULL;
 
-       return list_lru_del(lru, item, nid, memcg);
+       if (list_lru_memcg_aware(lru)) {
+               rcu_read_lock();
+               ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
+               rcu_read_unlock();
+       } else {
+               ret = list_lru_del(lru, item, nid, NULL);
+       }
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(list_lru_del_obj);