]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: lru: add VM_WARN_ON_ONCE_FOLIO to lru maintenance function
authorMuchun Song <songmuchun@bytedance.com>
Tue, 21 Jun 2022 12:56:57 +0000 (20:56 +0800)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 20 Jul 2022 00:15:11 +0000 (20:15 -0400)
We need to make sure that the page is deleted from or added to the correct
lruvec list.  So add a VM_WARN_ON_ONCE_FOLIO() to catch invalid users.
Then the VM_BUG_ON_PAGE() in move_pages_to_lru() could be removed since
add_page_to_lru_list() will check that.

Link: https://lkml.kernel.org/r/20220621125658.64935-11-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Koutný <mkoutny@suse.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Waiman Long <longman@redhat.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm_inline.h
mm/vmscan.c

index 7b25b53c474a7f17d6ce5cc378a8f1d226afdc00..6585198b19e28830b76ac2bb324c4d31d2ba8f03 100644 (file)
@@ -99,6 +99,8 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
 {
        enum lru_list lru = folio_lru_list(folio);
 
+       VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
+
        update_lru_size(lruvec, lru, folio_zonenum(folio),
                        folio_nr_pages(folio));
        if (lru != LRU_UNEVICTABLE)
@@ -116,6 +118,8 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
 {
        enum lru_list lru = folio_lru_list(folio);
 
+       VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
+
        update_lru_size(lruvec, lru, folio_zonenum(folio),
                        folio_nr_pages(folio));
        /* This is not expected to be used on LRU_UNEVICTABLE */
@@ -133,6 +137,8 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
 {
        enum lru_list lru = folio_lru_list(folio);
 
+       VM_WARN_ON_ONCE_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
+
        if (lru != LRU_UNEVICTABLE)
                list_del(&folio->lru);
        update_lru_size(lruvec, lru, folio_zonenum(folio),
index 69765615143195d8d4050530643fe195a58159cb..51b1607c81e4551436926eb392704a6a3a527914 100644 (file)
@@ -2361,7 +2361,6 @@ static unsigned int move_pages_to_lru(struct list_head *list)
                        continue;
                }
 
-               VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
                lruvec_add_folio(lruvec, folio);
                nr_pages = folio_nr_pages(folio);
                nr_moved += nr_pages;