]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
khugepaged: Convert collapse_file() to use folios more
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 04:52:56 +0000 (23:52 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 14:00:15 +0000 (09:00 -0500)
The newly allocated hpage is now always referred to as a folio.
Removes a number of calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/khugepaged.c

index 1fcab4e6f36f4b2f45e40884526890488575b9f2..2574d6647746381250dce0e39e48d8329a912b6e 100644 (file)
@@ -1742,7 +1742,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
 {
        struct address_space *mapping = file->f_mapping;
        struct folio *newf;
-       struct page *hpage;
        pgoff_t index = 0, end = start + HPAGE_PMD_NR;
        LIST_HEAD(pagelist);
        XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1756,7 +1755,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
        result = alloc_charge_hpage(&newf, mm, cc);
        if (result != SCAN_SUCCEED)
                goto out;
-       hpage = &newf->page;
 
        /*
         * Ensure we have slots for all the pages in the range.  This is
@@ -1774,14 +1772,14 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                }
        } while (1);
 
-       __SetPageLocked(hpage);
+       __folio_set_locked(newf);
        if (is_shmem)
-               __SetPageSwapBacked(hpage);
-       hpage->index = start;
-       hpage->mapping = mapping;
+               __folio_set_swapbacked(newf);
+       newf->index = start;
+       newf->mapping = mapping;
 
        /*
-        * At this point the hpage is locked and not up-to-date.
+        * At this point the folio is locked and not up-to-date.
         * It's safe to insert it into the page cache, because nobody would
         * be able to map it or use it in another way until we unlock it.
         */
@@ -1810,7 +1808,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                                        result = SCAN_FAIL;
                                        goto xa_locked;
                                }
-                               xas_store(&xas, hpage);
+                               xas_store(&xas, newf);
                                nr_none++;
                                continue;
                        }
@@ -1961,20 +1959,20 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
                 */
                list_add_tail(&page->lru, &pagelist);
 
-               /* Finally, replace with the new page. */
-               xas_store(&xas, hpage);
+               /* Finally, replace with the new folio */
+               xas_store(&xas, newf);
                continue;
 out_unlock:
                unlock_page(page);
                put_page(page);
                goto xa_unlocked;
        }
-       nr = thp_nr_pages(hpage);
+       nr = folio_nr_pages(newf);
 
        if (is_shmem)
-               __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
+               __lruvec_stat_mod_folio(newf, NR_SHMEM_THPS, nr);
        else {
-               __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
+               __lruvec_stat_mod_folio(newf, NR_FILE_THPS, nr);
                filemap_nr_thps_inc(mapping);
                /*
                 * Paired with smp_mb() in do_dentry_open() to ensure
@@ -1985,21 +1983,21 @@ out_unlock:
                smp_mb();
                if (inode_is_open_for_write(mapping->host)) {
                        result = SCAN_FAIL;
-                       __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
+                       __lruvec_stat_mod_folio(newf, NR_FILE_THPS, -nr);
                        filemap_nr_thps_dec(mapping);
                        goto xa_locked;
                }
        }
 
        if (nr_none) {
-               __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
+               __lruvec_stat_mod_folio(newf, NR_FILE_PAGES, nr_none);
                /* nr_none is always 0 for non-shmem. */
-               __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
+               __lruvec_stat_mod_folio(newf, NR_SHMEM, nr_none);
        }
 
        /* Join all the small entries into a single multi-index entry */
        xas_set_order(&xas, start, HPAGE_PMD_ORDER);
-       xas_store(&xas, hpage);
+       xas_store(&xas, newf);
 xa_locked:
        xas_unlock_irq(&xas);
 xa_unlocked:
@@ -2013,7 +2011,6 @@ xa_unlocked:
 
        if (result == SCAN_SUCCEED) {
                struct page *page, *tmp;
-               struct folio *folio;
 
                /*
                 * Replacing old pages with new one has succeeded, now we
@@ -2021,12 +2018,14 @@ xa_unlocked:
                 */
                index = start;
                list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+                       struct page *new_page = folio_page(newf,
+                                                       index % HPAGE_PMD_NR);
                        while (index < page->index) {
-                               clear_highpage(hpage + (index % HPAGE_PMD_NR));
+                               clear_highpage(new_page);
                                index++;
+                               new_page++;
                        }
-                       copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
-                                     page);
+                       copy_highpage(new_page, page);
                        list_del(&page->lru);
                        page->mapping = NULL;
                        page_ref_unfreeze(page, 1);
@@ -2037,25 +2036,23 @@ xa_unlocked:
                        index++;
                }
                while (index < end) {
-                       clear_highpage(hpage + (index % HPAGE_PMD_NR));
+                       clear_highpage(folio_page(newf, index % HPAGE_PMD_NR));
                        index++;
                }
 
-               folio = page_folio(hpage);
-               folio_mark_uptodate(folio);
-               folio_ref_add(folio, HPAGE_PMD_NR - 1);
+               folio_mark_uptodate(newf);
+               folio_ref_add(newf, HPAGE_PMD_NR - 1);
 
                if (is_shmem)
-                       folio_mark_dirty(folio);
-               folio_add_lru(folio);
+                       folio_mark_dirty(newf);
+               folio_add_lru(newf);
 
                /*
                 * Remove pte page tables, so we can re-fault the page as huge.
                 */
-               result = retract_page_tables(mapping, start, mm, addr, hpage,
-                                            cc);
-               unlock_page(hpage);
-               hpage = NULL;
+               result = retract_page_tables(mapping, start, mm, addr,
+                                               &newf->page, cc);
+               folio_unlock(newf);
        } else {
                struct page *page;
 
@@ -2094,7 +2091,7 @@ xa_unlocked:
                VM_BUG_ON(nr_none);
                xas_unlock_irq(&xas);
 
-               hpage->mapping = NULL;
+               newf->mapping = NULL;
        }
 
        if (newf)