]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Revert "mm/madvise: optimize lazyfreeing with mTHP in madvise_free"
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 4 Jun 2024 16:00:45 +0000 (12:00 -0400)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Tue, 4 Jun 2024 16:31:56 +0000 (12:31 -0400)
This reverts commit dce7d10be4bbd31412c4bedd3a8bb2d25b96e025.

mm/madvise.c

index a77893462b92449c29616aa7553ba172c8b9836f..284abcf07eb4396e909d9d36b7521f092737109b 100644 (file)
@@ -643,7 +643,6 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
                                unsigned long end, struct mm_walk *walk)
 
 {
-       const cydp_t cydp_flags = CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY;
        struct mmu_gather *tlb = walk->private;
        struct mm_struct *mm = tlb->mm;
        struct vm_area_struct *vma = walk->vma;
@@ -698,57 +697,44 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
                        continue;
 
                /*
-                * If we encounter a large folio, only split it if it is not
-                * fully mapped within the range we are operating on. Otherwise
-                * leave it as is so that it can be marked as lazyfree. If we
-                * fail to split a folio, leave it in place and advance to the
-                * next pte in the range.
+                * If pmd isn't transhuge but the folio is large and
+                * is owned by only this process, split it and
+                * deactivate all pages.
                 */
                if (folio_test_large(folio)) {
-                       bool any_young, any_dirty;
-
-                       nr = madvise_folio_pte_batch(addr, end, folio, pte,
-                                                    ptent, &any_young, &any_dirty);
-
-                       if (nr < folio_nr_pages(folio)) {
-                               int err;
-
-                               if (folio_likely_mapped_shared(folio))
-                                       continue;
-                               if (!folio_trylock(folio))
-                                       continue;
-                               folio_get(folio);
-                               arch_leave_lazy_mmu_mode();
-                               pte_unmap_unlock(start_pte, ptl);
-                               start_pte = NULL;
-                               err = split_folio(folio);
-                               folio_unlock(folio);
-                               folio_put(folio);
-                               pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-                               start_pte = pte;
-                               if (!start_pte)
-                                       break;
-                               arch_enter_lazy_mmu_mode();
-                               if (!err)
-                                       nr = 0;
-                               continue;
-                       }
+                       int err;
 
-                       if (any_young)
-                               ptent = pte_mkyoung(ptent);
-                       if (any_dirty)
-                               ptent = pte_mkdirty(ptent);
+                       if (folio_likely_mapped_shared(folio))
+                               break;
+                       if (!folio_trylock(folio))
+                               break;
+                       folio_get(folio);
+                       arch_leave_lazy_mmu_mode();
+                       pte_unmap_unlock(start_pte, ptl);
+                       start_pte = NULL;
+                       err = split_folio(folio);
+                       folio_unlock(folio);
+                       folio_put(folio);
+                       if (err)
+                               break;
+                       start_pte = pte =
+                               pte_offset_map_lock(mm, pmd, addr, &ptl);
+                       if (!start_pte)
+                               break;
+                       arch_enter_lazy_mmu_mode();
+                       pte--;
+                       addr -= PAGE_SIZE;
+                       continue;
                }
 
                if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
                        if (!folio_trylock(folio))
                                continue;
                        /*
-                        * If we have a large folio at this point, we know it is
-                        * fully mapped so if its mapcount is the same as its
-                        * number of pages, it must be exclusive.
+                        * If folio is shared with others, we mustn't clear
+                        * the folio's dirty flag.
                         */
-                       if (folio_mapcount(folio) != folio_nr_pages(folio)) {
+                       if (folio_mapcount(folio) != 1) {
                                folio_unlock(folio);
                                continue;
                        }
@@ -764,8 +750,19 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
                }
 
                if (pte_young(ptent) || pte_dirty(ptent)) {
-                       clear_young_dirty_ptes(vma, addr, pte, nr, cydp_flags);
-                       tlb_remove_tlb_entries(tlb, pte, nr, addr);
+                       /*
+                        * Some of architecture(ex, PPC) don't update TLB
+                        * with set_pte_at and tlb_remove_tlb_entry so for
+                        * the portability, remap the pte with old|clean
+                        * after pte clearing.
+                        */
+                       ptent = ptep_get_and_clear_full(mm, addr, pte,
+                                                       tlb->fullmm);
+
+                       ptent = pte_mkold(ptent);
+                       ptent = pte_mkclean(ptent);
+                       set_pte_at(mm, addr, pte, ptent);
+                       tlb_remove_tlb_entry(tlb, pte, addr);
                }
                folio_mark_lazyfree(folio);
        }