]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm: add pmd_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 26 Mar 2024 20:28:23 +0000 (20:28 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:19 +0000 (20:56 -0700)
Convert directly from a pmd to a folio without going through another
representation first.  For now this is just a slightly shorter way to
write it, but it might end up being more efficient later.

Link: https://lkml.kernel.org/r/20240326202833.523759-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/pgtable.h
mm/huge_memory.c
mm/madvise.c
mm/mempolicy.c
mm/mlock.c
mm/userfaultfd.c

index 600e17d036599be32fd94a00bb4ecd7e335bfeb0..09c85c7bf9c2c6df4b5b913658bb2e9954daeb9d 100644 (file)
@@ -50,6 +50,8 @@
 #define pmd_pgtable(pmd) pmd_page(pmd)
 #endif
 
+#define pmd_folio(pmd) page_folio(pmd_page(pmd))
+
 /*
  * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
  *
index 5c043c7b506244697be921cb65d6f6b7e035f4f7..712263e3b1f6e7256fe4cc2426f8b094f9171d0c 100644 (file)
@@ -1816,7 +1816,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                goto out;
        }
 
-       folio = pfn_folio(pmd_pfn(orig_pmd));
+       folio = pmd_folio(orig_pmd);
        /*
         * If other processes are mapping this folio, we couldn't discard
         * the folio unless they all do MADV_FREE so let's skip the folio.
@@ -2086,7 +2086,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                if (pmd_protnone(*pmd))
                        goto unlock;
 
-               folio = page_folio(pmd_page(*pmd));
+               folio = pmd_folio(*pmd);
                toptier = node_is_toptier(folio_nid(folio));
                /*
                 * Skip scanning top tier node if normal numa
@@ -2663,7 +2663,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                 * It's safe to call pmd_page when folio is set because it's
                 * guaranteed that pmd is present.
                 */
-               if (folio && folio != page_folio(pmd_page(*pmd)))
+               if (folio && folio != pmd_folio(*pmd))
                        goto out;
                __split_huge_pmd_locked(vma, pmd, range.start, freeze);
        }
index 7625830d6ae91144a84c4391cccfb219b1b1ffcf..1f77a51baaac29785aa54d4ace21aafe3142e1e1 100644 (file)
@@ -363,7 +363,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
                        goto huge_unlock;
                }
 
-               folio = pfn_folio(pmd_pfn(orig_pmd));
+               folio = pmd_folio(orig_pmd);
 
                /* Do not interfere with other mappings of this folio */
                if (folio_likely_mapped_shared(folio))
index 5743028a63a511b66e8e066708a23d56bcc57d93..aec756ae56377996e41e41e06b8659023dab1c9e 100644 (file)
@@ -509,7 +509,7 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
                qp->nr_failed++;
                return;
        }
-       folio = pfn_folio(pmd_pfn(*pmd));
+       folio = pmd_folio(*pmd);
        if (is_huge_zero_folio(folio)) {
                walk->action = ACTION_CONTINUE;
                return;
index 1ed2f2ab37cd18a08fb1b4069c4aee372a30b934..30b51cdea89decc236249867bd984475e75d58e2 100644 (file)
@@ -378,7 +378,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
                        goto out;
                if (is_huge_zero_pmd(*pmd))
                        goto out;
-               folio = page_folio(pmd_page(*pmd));
+               folio = pmd_folio(*pmd);
                if (vma->vm_flags & VM_LOCKED)
                        mlock_folio(folio);
                else
index a0ec14553fbeaabda75c2816ec6acc3f90cdf699..b70618e8dcd2486821303fa9ae6360baa40fbb62 100644 (file)
@@ -1662,7 +1662,7 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
                        /* Check if we can move the pmd without splitting it. */
                        if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
                            !pmd_none(dst_pmdval)) {
-                               struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
+                               struct folio *folio = pmd_folio(*src_pmd);
 
                                if (!folio || (!is_huge_zero_folio(folio) &&
                                               !PageAnonExclusive(&folio->page))) {