]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/memcg: Add folio_lruvec_relock_irq() and folio_lruvec_relock_irqsave()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 30 Jun 2021 02:27:31 +0000 (22:27 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 14 Aug 2021 04:35:20 +0000 (00:35 -0400)
These are the folio equivalents of relock_page_lruvec_irq() and
folio_lruvec_relock_irqsave().  Also convert page_matches_lruvec()
to folio_matches_lruvec().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/linux/memcontrol.h
mm/mlock.c
mm/swap.c
mm/vmscan.c

index 8c24f5b8638b48c60e3b8fc8752990816cd1d05b..5394c69ef8e2dd59e23e5f1fc719348c7791c6ab 100644 (file)
@@ -1528,19 +1528,19 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
 }
 
 /* Test requires a stable page->memcg binding, see page_memcg() */
-static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
+static inline bool folio_matches_lruvec(struct folio *folio,
+               struct lruvec *lruvec)
 {
-       return lruvec_pgdat(lruvec) == page_pgdat(page) &&
-              lruvec_memcg(lruvec) == page_memcg(page);
+       return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
+              lruvec_memcg(lruvec) == folio_memcg(folio);
 }
 
 /* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
                struct lruvec *locked_lruvec)
 {
-       struct folio *folio = page_folio(page);
        if (locked_lruvec) {
-               if (page_matches_lruvec(page, locked_lruvec))
+               if (folio_matches_lruvec(folio, locked_lruvec))
                        return locked_lruvec;
 
                unlock_page_lruvec_irq(locked_lruvec);
@@ -1550,12 +1550,11 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
 }
 
 /* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
                struct lruvec *locked_lruvec, unsigned long *flags)
 {
-       struct folio *folio = page_folio(page);
        if (locked_lruvec) {
-               if (page_matches_lruvec(page, locked_lruvec))
+               if (folio_matches_lruvec(folio, locked_lruvec))
                        return locked_lruvec;
 
                unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
index 16d2ee160d43c572d5a19ed989b7a8d70df8e9ca..e263d62ae2d09bddf14006ffec64926b60fd6eb5 100644 (file)
@@ -271,6 +271,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
        /* Phase 1: page isolation */
        for (i = 0; i < nr; i++) {
                struct page *page = pvec->pages[i];
+               struct folio *folio = page_folio(page);
 
                if (TestClearPageMlocked(page)) {
                        /*
@@ -278,7 +279,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                         * so we can spare the get_page() here.
                         */
                        if (TestClearPageLRU(page)) {
-                               lruvec = relock_page_lruvec_irq(page, lruvec);
+                               lruvec = folio_lruvec_relock_irq(folio, lruvec);
                                del_page_from_lru_list(page, lruvec);
                                continue;
                        } else
index 6d0d2bfca48e9626a674100a936d758d4e29605b..83ffb9a03ce198e95e79a86c98c65329ca4fd9ac 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -211,12 +211,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
 
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
+               struct folio *folio = page_folio(page);
 
                /* block memcg migration during page moving between lru */
                if (!TestClearPageLRU(page))
                        continue;
 
-               lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
+               lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
                (*move_fn)(page, lruvec);
 
                SetPageLRU(page);
@@ -902,11 +903,12 @@ void release_pages(struct page **pages, int nr)
        int i;
        LIST_HEAD(pages_to_free);
        struct lruvec *lruvec = NULL;
-       unsigned long flags;
+       unsigned long flags = 0;
        unsigned int lock_batch;
 
        for (i = 0; i < nr; i++) {
                struct page *page = pages[i];
+               struct folio *folio = page_folio(page);
 
                /*
                 * Make sure the IRQ-safe lock-holding time does not get
@@ -918,7 +920,7 @@ void release_pages(struct page **pages, int nr)
                        lruvec = NULL;
                }
 
-               page = compound_head(page);
+               page = &folio->page;
                if (is_huge_zero_page(page))
                        continue;
 
@@ -957,7 +959,7 @@ void release_pages(struct page **pages, int nr)
                if (PageLRU(page)) {
                        struct lruvec *prev_lruvec = lruvec;
 
-                       lruvec = relock_page_lruvec_irqsave(page, lruvec,
+                       lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
                                                                        &flags);
                        if (prev_lruvec != lruvec)
                                lock_batch = 0;
@@ -1061,8 +1063,9 @@ void __pagevec_lru_add(struct pagevec *pvec)
 
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
+               struct folio *folio = page_folio(page);
 
-               lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
+               lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
                __pagevec_lru_add_fn(page, lruvec);
        }
        if (lruvec)
index 0d48306d37dc89bb7bd2631b125b2017fe0020a6..7a2f25b904d9b9f014fe373d6ba1472a39872edc 100644 (file)
@@ -2075,7 +2075,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
                 * All pages were isolated from the same lruvec (and isolation
                 * inhibits memcg migration).
                 */
-               VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page);
+               VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
                add_page_to_lru_list(page, lruvec);
                nr_pages = thp_nr_pages(page);
                nr_moved += nr_pages;
@@ -4514,6 +4514,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
 
        for (i = 0; i < pvec->nr; i++) {
                struct page *page = pvec->pages[i];
+               struct folio *folio = page_folio(page);
                int nr_pages;
 
                if (PageTransTail(page))
@@ -4526,7 +4527,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
                if (!TestClearPageLRU(page))
                        continue;
 
-               lruvec = relock_page_lruvec_irq(page, lruvec);
+               lruvec = folio_lruvec_relock_irq(folio, lruvec);
                if (page_evictable(page) && PageUnevictable(page)) {
                        del_page_from_lru_list(page, lruvec);
                        ClearPageUnevictable(page);