]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: thp: introduce folio_split_queue_lock and its variants
authorMuchun Song <songmuchun@bytedance.com>
Wed, 15 Oct 2025 06:35:31 +0000 (14:35 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 22 Oct 2025 01:51:39 +0000 (18:51 -0700)
In future memcg removal, the binding between a folio and a memcg may
change, making the split lock within the memcg unstable when held.

A new approach is required to reparent the split queue to its parent.
This patch starts introducing a unified way to acquire the split lock for
future work.

It's a code-only refactoring with no functional changes.

Link: https://lkml.kernel.org/r/77069514656ea81a82969369f24da25ea1304e9c.1760509767.git.zhengqi.arch@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memcontrol.h
mm/huge_memory.c

index 873e510d6f8d9451566d2688941033385bef47a3..0b2d4ec79adfe28e63c4eb61de37ccf7fb3e00b4 100644 (file)
@@ -1674,6 +1674,11 @@ int alloc_shrinker_info(struct mem_cgroup *memcg);
 void free_shrinker_info(struct mem_cgroup *memcg);
 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
 void reparent_shrinker_deferred(struct mem_cgroup *memcg);
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+       return shrinker->id;
+}
 #else
 #define mem_cgroup_sockets_enabled 0
 
@@ -1705,6 +1710,11 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg,
                                    int nid, int shrinker_id)
 {
 }
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+       return -1;
+}
 #endif
 
 #ifdef CONFIG_MEMCG
index 49f3463a81f566e5380568989cb2fa3a51d15e8c..77edb51a6f12965b7a4ac968b35f0770f8592b6b 100644 (file)
@@ -1076,28 +1076,86 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
        return pmd;
 }
 
+static struct deferred_split *split_queue_node(int nid)
+{
+       struct pglist_data *pgdata = NODE_DATA(nid);
+
+       return &pgdata->deferred_split_queue;
+}
+
 #ifdef CONFIG_MEMCG
 static inline
-struct deferred_split *get_deferred_split_queue(struct folio *folio)
+struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
+                                          struct deferred_split *queue)
 {
-       struct mem_cgroup *memcg = folio_memcg(folio);
-       struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
+       if (mem_cgroup_disabled())
+               return NULL;
+       if (split_queue_node(folio_nid(folio)) == queue)
+               return NULL;
+       return container_of(queue, struct mem_cgroup, deferred_split_queue);
+}
 
-       if (memcg)
-               return &memcg->deferred_split_queue;
-       else
-               return &pgdat->deferred_split_queue;
+static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
+{
+       return memcg ? &memcg->deferred_split_queue : split_queue_node(nid);
 }
 #else
 static inline
-struct deferred_split *get_deferred_split_queue(struct folio *folio)
+struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
+                                          struct deferred_split *queue)
 {
-       struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
+       return NULL;
+}
 
-       return &pgdat->deferred_split_queue;
+static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
+{
+       return split_queue_node(nid);
 }
 #endif
 
+static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg)
+{
+       struct deferred_split *queue;
+
+       queue = memcg_split_queue(nid, memcg);
+       spin_lock(&queue->split_queue_lock);
+
+       return queue;
+}
+
+static struct deferred_split *
+split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags)
+{
+       struct deferred_split *queue;
+
+       queue = memcg_split_queue(nid, memcg);
+       spin_lock_irqsave(&queue->split_queue_lock, *flags);
+
+       return queue;
+}
+
+static struct deferred_split *folio_split_queue_lock(struct folio *folio)
+{
+       return split_queue_lock(folio_nid(folio), folio_memcg(folio));
+}
+
+static struct deferred_split *
+folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
+{
+       return split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
+}
+
+static inline void split_queue_unlock(struct deferred_split *queue)
+{
+       spin_unlock(&queue->split_queue_lock);
+}
+
+static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
+                                                unsigned long flags)
+{
+       spin_unlock_irqrestore(&queue->split_queue_lock, flags);
+}
+
 static inline bool is_transparent_hugepage(const struct folio *folio)
 {
        if (!folio_test_large(folio))
@@ -3688,7 +3746,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                struct page *split_at, struct page *lock_at,
                struct list_head *list, bool uniform_split, bool unmapped)
 {
-       struct deferred_split *ds_queue = get_deferred_split_queue(folio);
+       struct deferred_split *ds_queue;
        XA_STATE(xas, &folio->mapping->i_pages, folio->index);
        struct folio *end_folio = folio_next(folio);
        bool is_anon = folio_test_anon(folio);
@@ -3828,7 +3886,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
        }
 
        /* Prevent deferred_split_scan() touching ->_refcount */
-       spin_lock(&ds_queue->split_queue_lock);
+       ds_queue = folio_split_queue_lock(folio);
        if (folio_ref_freeze(folio, 1 + extra_pins)) {
                struct swap_cluster_info *ci = NULL;
                struct lruvec *lruvec;
@@ -3850,7 +3908,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                         */
                        list_del_init(&folio->_deferred_list);
                }
-               spin_unlock(&ds_queue->split_queue_lock);
+               split_queue_unlock(ds_queue);
                if (mapping) {
                        int nr = folio_nr_pages(folio);
 
@@ -3950,7 +4008,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                if (ci)
                        swap_cluster_unlock(ci);
        } else {
-               spin_unlock(&ds_queue->split_queue_lock);
+               split_queue_unlock(ds_queue);
                ret = -EAGAIN;
        }
 fail:
@@ -4131,8 +4189,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
        WARN_ON_ONCE(folio_ref_count(folio));
        WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
 
-       ds_queue = get_deferred_split_queue(folio);
-       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+       ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
        if (!list_empty(&folio->_deferred_list)) {
                ds_queue->split_queue_len--;
                if (folio_test_partially_mapped(folio)) {
@@ -4143,7 +4200,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
                list_del_init(&folio->_deferred_list);
                unqueued = true;
        }
-       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+       split_queue_unlock_irqrestore(ds_queue, flags);
 
        return unqueued;        /* useful for debug warnings */
 }
@@ -4151,10 +4208,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
 /* partially_mapped=false won't clear PG_partially_mapped folio flag */
 void deferred_split_folio(struct folio *folio, bool partially_mapped)
 {
-       struct deferred_split *ds_queue = get_deferred_split_queue(folio);
-#ifdef CONFIG_MEMCG
-       struct mem_cgroup *memcg = folio_memcg(folio);
-#endif
+       struct deferred_split *ds_queue;
        unsigned long flags;
 
        /*
@@ -4177,7 +4231,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
        if (folio_test_swapcache(folio))
                return;
 
-       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+       ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
        if (partially_mapped) {
                if (!folio_test_partially_mapped(folio)) {
                        folio_set_partially_mapped(folio);
@@ -4192,15 +4246,16 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
                VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
        }
        if (list_empty(&folio->_deferred_list)) {
+               struct mem_cgroup *memcg;
+
+               memcg = folio_split_queue_memcg(folio, ds_queue);
                list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
                ds_queue->split_queue_len++;
-#ifdef CONFIG_MEMCG
                if (memcg)
                        set_shrinker_bit(memcg, folio_nid(folio),
-                                        deferred_split_shrinker->id);
-#endif
+                                        shrinker_id(deferred_split_shrinker));
        }
-       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+       split_queue_unlock_irqrestore(ds_queue, flags);
 }
 
 static unsigned long deferred_split_count(struct shrinker *shrink,
@@ -4246,19 +4301,13 @@ static bool thp_underused(struct folio *folio)
 static unsigned long deferred_split_scan(struct shrinker *shrink,
                struct shrink_control *sc)
 {
-       struct pglist_data *pgdata = NODE_DATA(sc->nid);
-       struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
+       struct deferred_split *ds_queue;
        unsigned long flags;
        LIST_HEAD(list);
        struct folio *folio, *next, *prev = NULL;
        int split = 0, removed = 0;
 
-#ifdef CONFIG_MEMCG
-       if (sc->memcg)
-               ds_queue = &sc->memcg->deferred_split_queue;
-#endif
-
-       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+       ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
        /* Take pin on all head pages to avoid freeing them under us */
        list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
                                                        _deferred_list) {
@@ -4277,7 +4326,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
                if (!--sc->nr_to_scan)
                        break;
        }
-       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+       split_queue_unlock_irqrestore(ds_queue, flags);
 
        list_for_each_entry_safe(folio, next, &list, _deferred_list) {
                bool did_split = false;