]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: rmap: simplify the hugetlb handling when unmapping or migration
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Mon, 20 Jun 2022 11:47:15 +0000 (19:47 +0800)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 20 Jul 2022 00:15:11 +0000 (20:15 -0400)
According to previous discussion [1], there are so many levels of
indenting to handle the hugetlb case when unmapping or migration.  We can
combine folio_test_anon() and huge_pmd_unshare() to save one level of
indenting, by adding a local variable and moving the VM_BUG_ON() a little
forward.

No intended functional changes in this patch.

[1] https://lore.kernel.org/all/0b986dc4-5843-3e2d-c2df-5a2e9f13e6ab@oracle.com/

Link: https://lkml.kernel.org/r/28414b1b96f095e838c1e548074f8e0fc70d78cf.1655724713.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/rmap.c

index d28a0ab725b67581aa10d7357a93d7082a9e1e7e..5099b0023472cd3b20fbe4843552171451672450 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1537,6 +1537,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                                 PageAnonExclusive(subpage);
 
                if (folio_test_hugetlb(folio)) {
+                       bool anon = folio_test_anon(folio);
+
                        /*
                         * The try_to_unmap() is only passed a hugetlb page
                         * in the case where the hugetlb page is poisoned.
@@ -1551,31 +1553,28 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                         */
                        flush_cache_range(vma, range.start, range.end);
 
-                       if (!folio_test_anon(folio)) {
+                       /*
+                        * To call huge_pmd_unshare, i_mmap_rwsem must be
+                        * held in write mode.  Caller needs to explicitly
+                        * do this outside rmap routines.
+                        */
+                       VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
+                       if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
+                               flush_tlb_range(vma, range.start, range.end);
+                               mmu_notifier_invalidate_range(mm, range.start,
+                                                             range.end);
+
                                /*
-                                * To call huge_pmd_unshare, i_mmap_rwsem must be
-                                * held in write mode.  Caller needs to explicitly
-                                * do this outside rmap routines.
+                                * The ref count of the PMD page was dropped
+                                * which is part of the way map counting
+                                * is done for shared PMDs.  Return 'true'
+                                * here.  When there is no other sharing,
+                                * huge_pmd_unshare returns false and we will
+                                * unmap the actual page and drop map count
+                                * to zero.
                                 */
-                               VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
-
-                               if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
-                                       flush_tlb_range(vma, range.start, range.end);
-                                       mmu_notifier_invalidate_range(mm, range.start,
-                                                                     range.end);
-
-                                       /*
-                                        * The ref count of the PMD page was dropped
-                                        * which is part of the way map counting
-                                        * is done for shared PMDs.  Return 'true'
-                                        * here.  When there is no other sharing,
-                                        * huge_pmd_unshare returns false and we will
-                                        * unmap the actual page and drop map count
-                                        * to zero.
-                                        */
-                                       page_vma_mapped_walk_done(&pvmw);
-                                       break;
-                               }
+                               page_vma_mapped_walk_done(&pvmw);
+                               break;
                        }
                        pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
                } else {
@@ -1921,6 +1920,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                 PageAnonExclusive(subpage);
 
                if (folio_test_hugetlb(folio)) {
+                       bool anon = folio_test_anon(folio);
+
                        /*
                         * huge_pmd_unshare may unmap an entire PMD page.
                         * There is no way of knowing exactly which PMDs may
@@ -1930,31 +1931,28 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                         */
                        flush_cache_range(vma, range.start, range.end);
 
-                       if (!folio_test_anon(folio)) {
+                       /*
+                        * To call huge_pmd_unshare, i_mmap_rwsem must be
+                        * held in write mode.  Caller needs to explicitly
+                        * do this outside rmap routines.
+                        */
+                       VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
+                       if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
+                               flush_tlb_range(vma, range.start, range.end);
+                               mmu_notifier_invalidate_range(mm, range.start,
+                                                             range.end);
+
                                /*
-                                * To call huge_pmd_unshare, i_mmap_rwsem must be
-                                * held in write mode.  Caller needs to explicitly
-                                * do this outside rmap routines.
+                                * The ref count of the PMD page was dropped
+                                * which is part of the way map counting
+                                * is done for shared PMDs.  Return 'true'
+                                * here.  When there is no other sharing,
+                                * huge_pmd_unshare returns false and we will
+                                * unmap the actual page and drop map count
+                                * to zero.
                                 */
-                               VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
-
-                               if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
-                                       flush_tlb_range(vma, range.start, range.end);
-                                       mmu_notifier_invalidate_range(mm, range.start,
-                                                                     range.end);
-
-                                       /*
-                                        * The ref count of the PMD page was dropped
-                                        * which is part of the way map counting
-                                        * is done for shared PMDs.  Return 'true'
-                                        * here.  When there is no other sharing,
-                                        * huge_pmd_unshare returns false and we will
-                                        * unmap the actual page and drop map count
-                                        * to zero.
-                                        */
-                                       page_vma_mapped_walk_done(&pvmw);
-                                       break;
-                               }
+                               page_vma_mapped_walk_done(&pvmw);
+                               break;
                        }
 
                        /* Nuke the hugetlb page table entry */