]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: vmscan: rework move_pages_to_lru()
authorMuchun Song <songmuchun@bytedance.com>
Tue, 21 Jun 2022 12:56:52 +0000 (20:56 +0800)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 20 Jul 2022 00:15:10 +0000 (20:15 -0400)
In a later patch, we will reparent the LRU pages.  The pages moved to
appropriate LRU list can be reparented during the process of the
move_pages_to_lru().  So holding a lruvec lock by the caller is wrong, we
should use the more general interface of folio_lruvec_relock_irq() to
acquire the correct lruvec lock.

Link: https://lkml.kernel.org/r/20220621125658.64935-6-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Waiman Long <longman@redhat.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index 6a554712ef5d8cb796264be99341ce06d8bf2a6e..69765615143195d8d4050530643fe195a58159cb 100644 (file)
@@ -2312,23 +2312,26 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
  * move_pages_to_lru() moves folios from private @list to appropriate LRU list.
  * On return, @list is reused as a list of folios to be freed by the caller.
  *
- * Returns the number of pages moved to the given lruvec.
+ * Returns the number of pages moved to the appropriate LRU list.
+ *
+ * Note: The caller must not hold any lruvec lock.
  */
-static unsigned int move_pages_to_lru(struct lruvec *lruvec,
-                                     struct list_head *list)
+static unsigned int move_pages_to_lru(struct list_head *list)
 {
        int nr_pages, nr_moved = 0;
+       struct lruvec *lruvec = NULL;
        LIST_HEAD(folios_to_free);
 
        while (!list_empty(list)) {
                struct folio *folio = lru_to_folio(list);
 
+               lruvec = folio_lruvec_relock_irq(folio, lruvec);
                VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
                list_del(&folio->lru);
                if (unlikely(!folio_evictable(folio))) {
-                       spin_unlock_irq(&lruvec->lru_lock);
+                       lruvec_unlock_irq(lruvec);
                        folio_putback_lru(folio);
-                       spin_lock_irq(&lruvec->lru_lock);
+                       lruvec = NULL;
                        continue;
                }
 
@@ -2349,19 +2352,15 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
                        __folio_clear_lru_flags(folio);
 
                        if (unlikely(folio_test_large(folio))) {
-                               spin_unlock_irq(&lruvec->lru_lock);
+                               lruvec_unlock_irq(lruvec);
                                destroy_large_folio(folio);
-                               spin_lock_irq(&lruvec->lru_lock);
+                               lruvec = NULL;
                        } else
                                list_add(&folio->lru, &folios_to_free);
 
                        continue;
                }
 
-               /*
-                * All pages were isolated from the same lruvec (and isolation
-                * inhibits memcg migration).
-                */
                VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
                lruvec_add_folio(lruvec, folio);
                nr_pages = folio_nr_pages(folio);
@@ -2370,6 +2369,8 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
                        workingset_age_nonresident(lruvec, nr_pages);
        }
 
+       if (lruvec)
+               lruvec_unlock_irq(lruvec);
        /*
         * To save our caller's stack, now use input list for pages to free.
         */
@@ -2440,16 +2441,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 
        nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
 
-       spin_lock_irq(&lruvec->lru_lock);
-       move_pages_to_lru(lruvec, &page_list);
+       move_pages_to_lru(&page_list);
 
+       local_irq_disable();
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
        item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
        if (!cgroup_reclaim(sc))
                __count_vm_events(item, nr_reclaimed);
        __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
        __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed);
-       spin_unlock_irq(&lruvec->lru_lock);
+       local_irq_enable();
 
        lru_note_cost(lruvec, file, stat.nr_pageout);
        mem_cgroup_uncharge_list(&page_list);
@@ -2578,18 +2579,16 @@ static void shrink_active_list(unsigned long nr_to_scan,
        /*
         * Move folios back to the lru list.
         */
-       spin_lock_irq(&lruvec->lru_lock);
-
-       nr_activate = move_pages_to_lru(lruvec, &l_active);
-       nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
+       nr_activate = move_pages_to_lru(&l_active);
+       nr_deactivate = move_pages_to_lru(&l_inactive);
        /* Keep all free folios in l_active list */
        list_splice(&l_inactive, &l_active);
 
+       local_irq_disable();
        __count_vm_events(PGDEACTIVATE, nr_deactivate);
        __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
-
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
-       spin_unlock_irq(&lruvec->lru_lock);
+       local_irq_enable();
 
        mem_cgroup_uncharge_list(&l_active);
        free_unref_page_list(&l_active);