From 7216db147f571746e584614ed313892a26cba8ef Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 4 Jun 2024 12:00:45 -0400 Subject: [PATCH] Revert "mm/madvise: optimize lazyfreeing with mTHP in madvise_free" This reverts commit dce7d10be4bbd31412c4bedd3a8bb2d25b96e025. --- mm/madvise.c | 85 +++++++++++++++++++++++++--------------------------- 1 file changed, 41 insertions(+), 44 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index a77893462b92..284abcf07eb4 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -643,7 +643,6 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { - const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY; struct mmu_gather *tlb = walk->private; struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; @@ -698,57 +697,44 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, continue; /* - * If we encounter a large folio, only split it if it is not - * fully mapped within the range we are operating on. Otherwise - * leave it as is so that it can be marked as lazyfree. If we - * fail to split a folio, leave it in place and advance to the - * next pte in the range. + * If pmd isn't transhuge but the folio is large and + * is owned by only this process, split it and + * deactivate all pages. */ if (folio_test_large(folio)) { - bool any_young, any_dirty; - - nr = madvise_folio_pte_batch(addr, end, folio, pte, - ptent, &any_young, &any_dirty); - - if (nr < folio_nr_pages(folio)) { - int err; - - if (folio_likely_mapped_shared(folio)) - continue; - if (!folio_trylock(folio)) - continue; - folio_get(folio); - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(start_pte, ptl); - start_pte = NULL; - err = split_folio(folio); - folio_unlock(folio); - folio_put(folio); - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - start_pte = pte; - if (!start_pte) - break; - arch_enter_lazy_mmu_mode(); - if (!err) - nr = 0; - continue; - } + int err; - if (any_young) - ptent = pte_mkyoung(ptent); - if (any_dirty) - ptent = pte_mkdirty(ptent); + if (folio_likely_mapped_shared(folio)) + break; + if (!folio_trylock(folio)) + break; + folio_get(folio); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + start_pte = NULL; + err = split_folio(folio); + folio_unlock(folio); + folio_put(folio); + if (err) + break; + start_pte = pte = + pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + break; + arch_enter_lazy_mmu_mode(); + pte--; + addr -= PAGE_SIZE; + continue; } if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { if (!folio_trylock(folio)) continue; /* - * If we have a large folio at this point, we know it is - * fully mapped so if its mapcount is the same as its - * number of pages, it must be exclusive. + * If folio is shared with others, we mustn't clear + * the folio's dirty flag. */ - if (folio_mapcount(folio) != folio_nr_pages(folio)) { + if (folio_mapcount(folio) != 1) { folio_unlock(folio); continue; } @@ -764,8 +750,19 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, } if (pte_young(ptent) || pte_dirty(ptent)) { - clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags); - tlb_remove_tlb_entries(tlb, pte, nr, addr); + /* + * Some of architecture(ex, PPC) don't update TLB + * with set_pte_at and tlb_remove_tlb_entry so for + * the portability, remap the pte with old|clean + * after pte clearing. + */ + ptent = ptep_get_and_clear_full(mm, addr, pte, + tlb->fullmm); + + ptent = pte_mkold(ptent); + ptent = pte_mkclean(ptent); + set_pte_at(mm, addr, pte, ptent); + tlb_remove_tlb_entry(tlb, pte, addr); } folio_mark_lazyfree(folio); } -- 2.49.0