]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
mm: Add folio_mk_pmd()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 19 Feb 2025 21:36:48 +0000 (16:36 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 25 Feb 2025 19:47:20 +0000 (14:47 -0500)
Removes five conversions from folio to page.  Also removes both callers
of mk_pmd() that aren't part of mk_huge_pmd(), getting us a step closer to
removing the confusion between mk_pmd(), mk_huge_pmd() and pmd_mkhuge().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
fs/dax.c
include/linux/mm.h
mm/huge_memory.c
mm/khugepaged.c
mm/memory.c

index 21b47402b3dca4684f5caff34d68cfd0df993080..22efc6c44539f530279de15276b31088c61a0a60 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1237,8 +1237,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
                mm_inc_nr_ptes(vma->vm_mm);
        }
-       pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
-       pmd_entry = pmd_mkhuge(pmd_entry);
+       pmd_entry = folio_mk_pmd(zero_folio, vmf->vma->vm_page_prot);
        set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
        spin_unlock(ptl);
        trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
index b1e311bae6b712ab4da36c9b81f9f25eae3ffa62..5c883c619fa41dd607c8609f2cdc5041bbd1afed 100644 (file)
@@ -1936,7 +1936,24 @@ static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot)
 {
        return pfn_pte(folio_pfn(folio), pgprot);
 }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/**
+ * folio_mk_pmd - Create a PMD for this folio
+ * @folio: The folio to create a PMD for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_pmd_at().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
+{
+       return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
+}
 #endif
+#endif /* CONFIG_MMU */
 
 /**
  * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
index 3d3ebdc002d59734755ddaf66489e93e12eee7df..95ed5dd9622b8db12b4b2a39b9144f11673265bb 100644 (file)
@@ -1203,7 +1203,7 @@ static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
 {
        pmd_t entry;
 
-       entry = mk_huge_pmd(&folio->page, vma->vm_page_prot);
+       entry = folio_mk_pmd(folio, vma->vm_page_prot);
        entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
        folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
        folio_add_lru_vma(folio, vma);
@@ -1311,8 +1311,7 @@ static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
        pmd_t entry;
        if (!pmd_none(*pmd))
                return;
-       entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
-       entry = pmd_mkhuge(entry);
+       entry = folio_mk_pmd(zero_folio, vma->vm_page_prot);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, haddr, pmd, entry);
        mm_inc_nr_ptes(mm);
@@ -2570,12 +2569,12 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
                folio_move_anon_rmap(src_folio, dst_vma);
                src_folio->index = linear_page_index(dst_vma, dst_addr);
 
-               _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
+               _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
                /* Follow mremap() behavior and treat the entry dirty after the move */
                _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
        } else {
                src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
-               _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
+               _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
        }
        set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
 
@@ -4306,7 +4305,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
 
        entry = pmd_to_swp_entry(*pvmw->pmd);
        folio_get(folio);
-       pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
+       pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
        if (is_writable_migration_entry(entry))
index 5f0be134141e83666c7465af5d90b72fd5b6ec95..4f85597a7f645fbc3e357c6c6e1110be101f3f03 100644 (file)
@@ -1239,7 +1239,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
        __folio_mark_uptodate(folio);
        pgtable = pmd_pgtable(_pmd);
 
-       _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
+       _pmd = folio_mk_pmd(folio, vma->vm_page_prot);
        _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
 
        spin_lock(pmd_ptl);
index ea5a58db76ddf1b5f4ee1c93e8a98e1b4e69ab54..6d1a1185c34cfe581aa101f421d18e59d0c85e82 100644 (file)
@@ -5078,7 +5078,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
 
        flush_icache_pages(vma, page, HPAGE_PMD_NR);
 
-       entry = mk_huge_pmd(page, vma->vm_page_prot);
+       entry = folio_mk_pmd(folio, vma->vm_page_prot);
        if (write)
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);