]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: thp: use folio_batch to handle THP splitting in deferred_split_scan()
authorMuchun Song <songmuchun@bytedance.com>
Wed, 15 Oct 2025 06:35:32 +0000 (14:35 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 22 Oct 2025 01:51:39 +0000 (18:51 -0700)
The maintenance of the folio->_deferred_list is intricate because it's
reused in a local list.

Here are some peculiarities:

   1) When a folio is removed from its split queue and added to a local
      on-stack list in deferred_split_scan(), the ->split_queue_len isn't
      updated, leading to an inconsistency between it and the actual
      number of folios in the split queue.

   2) When the folio is split via split_folio() later, it's removed from
      the local list while holding the split queue lock. At this time,
      the lock is not needed as it is not protecting anything.

   3) To handle the race condition with a third-party freeing or migrating
      the preceding folio, we must ensure there's always one safe (with
      raised refcount) folio before by delaying its folio_put(). More
      details can be found in commit e66f3185fa04 ("mm/thp: fix deferred
      split queue not partially_mapped"). It's rather tricky.

We can use the folio_batch infrastructure to handle this clearly.  In this
case, ->split_queue_len will be consistent with the real number of folios
in the split queue.  If list_empty(&folio->_deferred_list) returns false,
it's clear the folio must be in its split queue (not in a local list
anymore).

In the future, we will reparent LRU folios during memcg offline to
eliminate dying memory cgroups, which requires reparenting the split queue
to its parent first.  So this patch prepares for using
folio_split_queue_lock_irqsave() as the memcg may change then.

Link: https://lkml.kernel.org/r/4f5d7a321c72dfe65e0e19a3f89180d5988eae2e.1760509767.git.zhengqi.arch@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 77edb51a6f12965b7a4ac968b35f0770f8592b6b..920475659defe823cd080bc84f2a69f3ef3fdb55 100644 (file)
@@ -3892,9 +3892,18 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
                struct lruvec *lruvec;
                int expected_refs;
 
-               if (old_order > 1 &&
-                   !list_empty(&folio->_deferred_list)) {
-                       ds_queue->split_queue_len--;
+               if (old_order > 1) {
+                       if (!list_empty(&folio->_deferred_list)) {
+                               ds_queue->split_queue_len--;
+                               /*
+                                * Reinitialize page_deferred_list after
+                                * removing the page from the split_queue,
+                                * otherwise a subsequent split will see list
+                                * corruption when checking the
+                                * page_deferred_list.
+                                */
+                               list_del_init(&folio->_deferred_list);
+                       }
                        if (folio_test_partially_mapped(folio)) {
                                folio_clear_partially_mapped(folio);
                                mod_mthp_stat(old_order,
@@ -4303,35 +4312,40 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
 {
        struct deferred_split *ds_queue;
        unsigned long flags;
-       LIST_HEAD(list);
-       struct folio *folio, *next, *prev = NULL;
-       int split = 0, removed = 0;
+       struct folio *folio, *next;
+       int split = 0, i;
+       struct folio_batch fbatch;
+
+       folio_batch_init(&fbatch);
 
+retry:
        ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
        /* Take pin on all head pages to avoid freeing them under us */
        list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
                                                        _deferred_list) {
                if (folio_try_get(folio)) {
-                       list_move(&folio->_deferred_list, &list);
-               } else {
+                       folio_batch_add(&fbatch, folio);
+               } else if (folio_test_partially_mapped(folio)) {
                        /* We lost race with folio_put() */
-                       if (folio_test_partially_mapped(folio)) {
-                               folio_clear_partially_mapped(folio);
-                               mod_mthp_stat(folio_order(folio),
-                                             MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
-                       }
-                       list_del_init(&folio->_deferred_list);
-                       ds_queue->split_queue_len--;
+                       folio_clear_partially_mapped(folio);
+                       mod_mthp_stat(folio_order(folio),
+                                     MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
                }
+               list_del_init(&folio->_deferred_list);
+               ds_queue->split_queue_len--;
                if (!--sc->nr_to_scan)
                        break;
+               if (!folio_batch_space(&fbatch))
+                       break;
        }
        split_queue_unlock_irqrestore(ds_queue, flags);
 
-       list_for_each_entry_safe(folio, next, &list, _deferred_list) {
+       for (i = 0; i < folio_batch_count(&fbatch); i++) {
                bool did_split = false;
                bool underused = false;
+               struct deferred_split *fqueue;
 
+               folio = fbatch.folios[i];
                if (!folio_test_partially_mapped(folio)) {
                        /*
                         * See try_to_map_unused_to_zeropage(): we cannot
@@ -4354,38 +4368,27 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
                }
                folio_unlock(folio);
 next:
+               if (did_split || !folio_test_partially_mapped(folio))
+                       continue;
                /*
-                * split_folio() removes folio from list on success.
                 * Only add back to the queue if folio is partially mapped.
                 * If thp_underused returns false, or if split_folio fails
                 * in the case it was underused, then consider it used and
                 * don't add it back to split_queue.
                 */
-               if (did_split) {
-                       ; /* folio already removed from list */
-               } else if (!folio_test_partially_mapped(folio)) {
-                       list_del_init(&folio->_deferred_list);
-                       removed++;
-               } else {
-                       /*
-                        * That unlocked list_del_init() above would be unsafe,
-                        * unless its folio is separated from any earlier folios
-                        * left on the list (which may be concurrently unqueued)
-                        * by one safe folio with refcount still raised.
-                        */
-                       swap(folio, prev);
+               fqueue = folio_split_queue_lock_irqsave(folio, &flags);
+               if (list_empty(&folio->_deferred_list)) {
+                       list_add_tail(&folio->_deferred_list, &fqueue->split_queue);
+                       fqueue->split_queue_len++;
                }
-               if (folio)
-                       folio_put(folio);
+               split_queue_unlock_irqrestore(fqueue, flags);
        }
+       folios_put(&fbatch);
 
-       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
-       list_splice_tail(&list, &ds_queue->split_queue);
-       ds_queue->split_queue_len -= removed;
-       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
-
-       if (prev)
-               folio_put(prev);
+       if (sc->nr_to_scan && !list_empty(&ds_queue->split_queue)) {
+               cond_resched();
+               goto retry;
+       }
 
        /*
         * Stop shrinker if we didn't split any page, but the queue is empty.