]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
hugetlb: do not update address in huge_pmd_unshare
authorMike Kravetz <mike.kravetz@oracle.com>
Tue, 21 Jun 2022 23:56:19 +0000 (16:56 -0700)
committerLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 20 Jul 2022 00:15:11 +0000 (20:15 -0400)
As an optimization for loops sequentially processing hugetlb address
ranges, huge_pmd_unshare would update a passed address if it unshared a
pmd.  Updating a loop control variable outside the loop like this is
generally a bad idea.  These loops are now using hugetlb_mask_last_page to
optimize scanning when non-present ptes are discovered.  The same can be
done when huge_pmd_unshare returns 1 indicating a pmd was unshared.

Remove address update from huge_pmd_unshare.  Change the passed argument
type and update all callers.  In loops sequentially processing addresses
use hugetlb_mask_last_page to update address if pmd is unshared.

Link: https://lkml.kernel.org/r/20220621235620.291305-4-mike.kravetz@oracle.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: James Houghton <jthoughton@google.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rolf Eike Beer <eike-kernel@sf-tec.de>
Cc: Will Deacon <will@kernel.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/rmap.c

index e37465e830fe8fb6b0fd2da88f7130ff40295a73..ee9a28ef26ee24cbbf9c451d06f981850bf6c935 100644 (file)
@@ -199,7 +199,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
                       unsigned long addr, unsigned long sz);
 unsigned long hugetlb_mask_last_page(struct hstate *h);
 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
-                               unsigned long *addr, pte_t *ptep);
+                               unsigned long addr, pte_t *ptep);
 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
                                unsigned long *start, unsigned long *end);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -246,7 +246,7 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
 
 static inline int huge_pmd_unshare(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
-                                       unsigned long *addr, pte_t *ptep)
+                                       unsigned long addr, pte_t *ptep)
 {
        return 0;
 }
index 4338cb574af0533552b8e7f5c823caa8e75c94c2..b8d2dbf5b9109028513d387dcbad6482730aa56b 100644 (file)
@@ -4945,7 +4945,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long old_end = old_addr + len;
        unsigned long last_addr_mask;
-       unsigned long old_addr_copy;
        pte_t *src_pte, *dst_pte;
        struct mmu_notifier_range range;
        bool shared_pmd = false;
@@ -4973,14 +4972,10 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
                if (huge_pte_none(huge_ptep_get(src_pte)))
                        continue;
 
-               /* old_addr arg to huge_pmd_unshare() is a pointer and so the
-                * arg may be modified. Pass a copy instead to preserve the
-                * value in old_addr.
-                */
-               old_addr_copy = old_addr;
-
-               if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) {
+               if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
                        shared_pmd = true;
+                       old_addr |= last_addr_mask;
+                       new_addr |= last_addr_mask;
                        continue;
                }
 
@@ -5045,10 +5040,11 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
                }
 
                ptl = huge_pte_lock(h, mm, ptep);
-               if (huge_pmd_unshare(mm, vma, &address, ptep)) {
+               if (huge_pmd_unshare(mm, vma, address, ptep)) {
                        spin_unlock(ptl);
                        tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
                        force_flush = true;
+                       address |= last_addr_mask;
                        continue;
                }
 
@@ -6337,7 +6333,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                        continue;
                }
                ptl = huge_pte_lock(h, mm, ptep);
-               if (huge_pmd_unshare(mm, vma, &address, ptep)) {
+               if (huge_pmd_unshare(mm, vma, address, ptep)) {
                        /*
                         * When uffd-wp is enabled on the vma, unshare
                         * shouldn't happen at all.  Warn about it if it
@@ -6347,6 +6343,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                        pages++;
                        spin_unlock(ptl);
                        shared_pmd = true;
+                       address |= last_addr_mask;
                        continue;
                }
                pte = huge_ptep_get(ptep);
@@ -6770,11 +6767,11 @@ out:
  *         0 the underlying pte page is not shared, or it is the last user
  */
 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
-                                       unsigned long *addr, pte_t *ptep)
+                                       unsigned long addr, pte_t *ptep)
 {
-       pgd_t *pgd = pgd_offset(mm, *addr);
-       p4d_t *p4d = p4d_offset(pgd, *addr);
-       pud_t *pud = pud_offset(p4d, *addr);
+       pgd_t *pgd = pgd_offset(mm, addr);
+       p4d_t *p4d = p4d_offset(pgd, addr);
+       pud_t *pud = pud_offset(p4d, addr);
 
        i_mmap_assert_write_locked(vma->vm_file->f_mapping);
        BUG_ON(page_count(virt_to_page(ptep)) == 0);
@@ -6784,14 +6781,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
        pud_clear(pud);
        put_page(virt_to_page(ptep));
        mm_dec_nr_pmds(mm);
-       /*
-        * This update of passed address optimizes loops sequentially
-        * processing addresses in increments of huge page size (PMD_SIZE
-        * in this case).  By clearing the pud, a PUD_SIZE area is unmapped.
-        * Update address to the 'last page' in the cleared area so that
-        * calling loop can move to first page past this area.
-        */
-       *addr |= PUD_SIZE - PMD_SIZE;
        return 1;
 }
 
@@ -6803,7 +6792,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
-                               unsigned long *addr, pte_t *ptep)
+                               unsigned long addr, pte_t *ptep)
 {
        return 0;
 }
@@ -6910,6 +6899,12 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
 /* See description above.  Architectures can provide their own version. */
 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
 {
+       unsigned long hp_size = huge_page_size(h);
+
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+       if (hp_size == PMD_SIZE)
+               return PUD_SIZE - PMD_SIZE;
+#endif
        return 0UL;
 }
 
@@ -7136,14 +7131,11 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
        mmu_notifier_invalidate_range_start(&range);
        i_mmap_lock_write(vma->vm_file->f_mapping);
        for (address = start; address < end; address += PUD_SIZE) {
-               unsigned long tmp = address;
-
                ptep = huge_pte_offset(mm, address, sz);
                if (!ptep)
                        continue;
                ptl = huge_pte_lock(h, mm, ptep);
-               /* We don't want 'address' to be changed */
-               huge_pmd_unshare(mm, vma, &tmp, ptep);
+               huge_pmd_unshare(mm, vma, address, ptep);
                spin_unlock(ptl);
        }
        flush_hugetlb_tlb_range(vma, start, end);
index 5099b0023472cd3b20fbe4843552171451672450..12897a1fa4fd59e13070da3eba784b8646ccaa75 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1559,7 +1559,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                         * do this outside rmap routines.
                         */
                        VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
-                       if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
+                       if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
                                flush_tlb_range(vma, range.start, range.end);
                                mmu_notifier_invalidate_range(mm, range.start,
                                                              range.end);
@@ -1937,7 +1937,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                         * do this outside rmap routines.
                         */
                        VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
-                       if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
+                       if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
                                flush_tlb_range(vma, range.start, range.end);
                                mmu_notifier_invalidate_range(mm, range.start,
                                                              range.end);