]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm: Convert page_remove_rmap() to use a folio internally
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 30 Dec 2022 18:48:39 +0000 (13:48 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 3 Jan 2023 04:00:55 +0000 (23:00 -0500)
The API for page_remove_rmap() needs to be page-based, because we can
remove mappings of pages individually.  But inside the function, we want
to only call compound_head() once and then use the folio APIs instead
of the page APIs that each call compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/rmap.c

index 17984eb9f9906c0f475a626138002bc3a91ff516..f17967db296063c1c91c6b737971a88757bc0b63 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1378,42 +1378,42 @@ void page_add_file_rmap(struct page *page,
  *
  * The caller needs to hold the pte lock.
  */
-void page_remove_rmap(struct page *page,
-       struct vm_area_struct *vma, bool compound)
+void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
+               bool compound)
 {
-       atomic_t *mapped;
+       struct folio *folio = page_folio(page);
+       atomic_t *mapped = &folio->_nr_pages_mapped;
        int nr = 0, nr_pmdmapped = 0;
        bool last;
+       enum node_stat_item idx;
 
        VM_BUG_ON_PAGE(compound && !PageHead(page), page);
 
        /* Hugetlb pages are not counted in NR_*MAPPED */
-       if (unlikely(PageHuge(page))) {
+       if (unlikely(folio_test_hugetlb(folio))) {
                /* hugetlb pages are always mapped with pmds */
-               atomic_dec(compound_mapcount_ptr(page));
+               atomic_dec(&folio->_entire_mapcount);
                return;
        }
 
-       lock_page_memcg(page);
+       folio_memcg_lock(folio);
 
        /* Is page being unmapped by PTE? Is this its last map to be removed? */
        if (likely(!compound)) {
                last = atomic_add_negative(-1, &page->_mapcount);
                nr = last;
-               if (last && PageCompound(page)) {
-                       mapped = subpages_mapcount_ptr(compound_head(page));
+               if (last && folio_test_large(folio)) {
                        nr = atomic_dec_return_relaxed(mapped);
                        nr = (nr < COMPOUND_MAPPED);
                }
-       } else if (PageTransHuge(page)) {
+       } else if (folio_test_pmd_mappable(folio)) {
                /* That test is redundant: it's for safety or to optimize out */
 
-               last = atomic_add_negative(-1, compound_mapcount_ptr(page));
+               last = atomic_add_negative(-1, &folio->_entire_mapcount);
                if (last) {
-                       mapped = subpages_mapcount_ptr(page);
                        nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
                        if (likely(nr < COMPOUND_MAPPED)) {
-                               nr_pmdmapped = thp_nr_pages(page);
+                               nr_pmdmapped = folio_nr_pages(folio);
                                nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
                                /* Raced ahead of another remove and an add? */
                                if (unlikely(nr < 0))
@@ -1426,21 +1426,26 @@ void page_remove_rmap(struct page *page,
        }
 
        if (nr_pmdmapped) {
-               __mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_THPS :
-                               (PageSwapBacked(page) ? NR_SHMEM_PMDMAPPED :
-                               NR_FILE_PMDMAPPED), -nr_pmdmapped);
+               if (folio_test_anon(folio))
+                       idx = NR_ANON_THPS;
+               else if (folio_test_swapbacked(folio))
+                       idx = NR_SHMEM_PMDMAPPED;
+               else
+                       idx = NR_FILE_PMDMAPPED;
+               __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped);
        }
        if (nr) {
-               __mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_MAPPED :
-                               NR_FILE_MAPPED, -nr);
+               idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
+               __lruvec_stat_mod_folio(folio, idx, -nr);
+
                /*
-                * Queue anon THP for deferred split if at least one small
-                * page of the compound page is unmapped, but at least one
-                * small page is still mapped.
+                * Queue anon THP for deferred split if at least one
+                * page of the folio is unmapped, but at least one
+                * page is still mapped.
                 */
-               if (PageTransCompound(page) && PageAnon(page))
+               if (folio_test_pmd_mappable(folio) && folio_test_anon(folio))
                        if (!compound || nr < nr_pmdmapped)
-                               deferred_split_huge_page(compound_head(page));
+                               deferred_split_huge_page(&folio->page);
        }
 
        /*
@@ -1451,7 +1456,7 @@ void page_remove_rmap(struct page *page,
         * and remember that it's only reliable while mapped.
         */
 
-       unlock_page_memcg(page);
+       folio_memcg_unlock(folio);
 
        munlock_vma_page(page, vma, compound);
 }