]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/khugepaged: unify pmd folio installation with map_anon_folio_pmd()
authorWei Yang <richard.weiyang@gmail.com>
Wed, 8 Oct 2025 09:54:53 +0000 (09:54 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:41 +0000 (21:28 -0700)
Currently we install pmd folio with map_anon_folio_pmd() in
__do_huge_pmd_anonymous_page() and do_huge_zero_wp_pmd().  While in
collapse_huge_page(), it is done with identical code except statistics
adjustment.

Unify the process with map_anon_folio_pmd() to install pmd folio.  Split
it to map_anon_folio_pmd_pf() and map_anon_folio_pmd_nopf() to be used in
page fault or not respectively.

No functional change is intended.

Link: https://lkml.kernel.org/r/20251008095453.18772-3-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Lance Yang <lance.yang@linux.dev>
Cc: David Hildenbrand <david@redhat.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/khugepaged.c

index 80fee248b5918c3062320d2663c97f09249d2a5a..9ef4f7cc47a64aad475061099a4be4eb13e28e60 100644 (file)
@@ -548,6 +548,8 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
                           pmd_t *pmd, bool freeze);
 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
                           pmd_t *pmdp, struct folio *folio);
+void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
+               struct vm_area_struct *vma, unsigned long haddr);
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -638,6 +640,11 @@ static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
        return false;
 }
 
+static inline void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
+               struct vm_area_struct *vma, unsigned long haddr)
+{
+}
+
 #define split_huge_pud(__vma, __pmd, __address)        \
        do { } while (0)
 
index 49f2ff9de1cad5a0a2a13d8d20158d95239fbf60..2764613a9b3dc03fda7b004d27c1fba18c487c2e 100644 (file)
@@ -1217,7 +1217,7 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
        return folio;
 }
 
-static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
+void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
                struct vm_area_struct *vma, unsigned long haddr)
 {
        pmd_t entry;
@@ -1228,11 +1228,17 @@ static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
        folio_add_lru_vma(folio, vma);
        set_pmd_at(vma->vm_mm, haddr, pmd, entry);
        update_mmu_cache_pmd(vma, haddr, pmd);
+       deferred_split_folio(folio, false);
+}
+
+static void map_anon_folio_pmd_pf(struct folio *folio, pmd_t *pmd,
+               struct vm_area_struct *vma, unsigned long haddr)
+{
+       map_anon_folio_pmd_nopf(folio, pmd, vma, haddr);
        add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
        count_vm_event(THP_FAULT_ALLOC);
        count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
        count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
-       deferred_split_folio(folio, false);
 }
 
 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
@@ -1271,7 +1277,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                        return ret;
                }
                pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
-               map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
+               map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
                mm_inc_nr_ptes(vma->vm_mm);
                spin_unlock(vmf->ptl);
        }
@@ -1943,7 +1949,7 @@ static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
        if (ret)
                goto release;
        (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
-       map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
+       map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr);
        goto unlock;
 release:
        folio_put(folio);
index abe54f0043c730e7eb8459e108205d75f018fbf5..e947b96e144354630f71685aba08b6c32db061e7 100644 (file)
@@ -1224,17 +1224,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
        __folio_mark_uptodate(folio);
        pgtable = pmd_pgtable(_pmd);
 
-       _pmd = folio_mk_pmd(folio, vma->vm_page_prot);
-       _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-
        spin_lock(pmd_ptl);
        BUG_ON(!pmd_none(*pmd));
-       folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
-       folio_add_lru_vma(folio, vma);
        pgtable_trans_huge_deposit(mm, pmd, pgtable);
-       set_pmd_at(mm, address, pmd, _pmd);
-       update_mmu_cache_pmd(vma, address, pmd);
-       deferred_split_folio(folio, false);
+       map_anon_folio_pmd_nopf(folio, pmd, vma, address);
        spin_unlock(pmd_ptl);
 
        folio = NULL;