]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
hugetlb: Simplify make_huge_pte()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 19 Feb 2025 04:38:06 +0000 (23:38 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 25 Feb 2025 19:47:20 +0000 (14:47 -0500)
mk_huge_pte() is a bad API.  Despite its name, it creates a normal
PTE which is later transformed into a huge PTE by arch_make_huge_pte().
So replace the page argument with a folio argument and call folio_mk_pte()
instead.  Then, because we now know this is a regular PTE rather than a
huge one, use pte_mkdirty() instead of huge_pte_mkdirty() (and similar
functions).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/hugetlb.c

index 163190e89ea16450026496c020b544877db147d1..1ea42dd0101200203336caf08ce365f5560ed85b 100644 (file)
@@ -5162,18 +5162,16 @@ const struct vm_operations_struct hugetlb_vm_ops = {
        .pagesize = hugetlb_vm_op_pagesize,
 };
 
-static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
+static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
                bool try_mkwrite)
 {
-       pte_t entry;
+       pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
        unsigned int shift = huge_page_shift(hstate_vma(vma));
 
        if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
-               entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
-                                        vma->vm_page_prot)));
+               entry = pte_mkwrite_novma(pte_mkdirty(entry));
        } else {
-               entry = huge_pte_wrprotect(mk_huge_pte(page,
-                                          vma->vm_page_prot));
+               entry = pte_wrprotect(entry);
        }
        entry = pte_mkyoung(entry);
        entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
@@ -5228,7 +5226,7 @@ static void
 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
                      struct folio *new_folio, pte_t old, unsigned long sz)
 {
-       pte_t newpte = make_huge_pte(vma, &new_folio->page, true);
+       pte_t newpte = make_huge_pte(vma, new_folio, true);
 
        __folio_mark_uptodate(new_folio);
        hugetlb_add_new_anon_rmap(new_folio, vma, addr);
@@ -5978,7 +5976,7 @@ retry_avoidcopy:
        spin_lock(vmf->ptl);
        vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
        if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
-               pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
+               pte_t newpte = make_huge_pte(vma, new_folio, !unshare);
 
                /* Break COW or unshare */
                huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
@@ -6258,7 +6256,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
                hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
        else
                hugetlb_add_file_rmap(folio);
-       new_pte = make_huge_pte(vma, &folio->page, vma->vm_flags & VM_SHARED);
+       new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED);
        /*
         * If this pte was previously wr-protected, keep it wr-protected even
         * if populated.
@@ -6743,7 +6741,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
         * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
         * with wp flag set, don't set pte write bit.
         */
-       _dst_pte = make_huge_pte(dst_vma, &folio->page,
+       _dst_pte = make_huge_pte(dst_vma, folio,
                                 !wp_enabled && !(is_continue && !vm_shared));
        /*
         * Always mark UFFDIO_COPY page dirty; note that this may not be