]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/mremap: move TLB flush outside page table lock
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Wed, 2 Jun 2021 03:52:30 +0000 (13:52 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 2 Jun 2021 03:52:30 +0000 (13:52 +1000)
Move TLB flush outside page table lock so that kernel does less with page
table lock held.  Releasing the ptl with old TLB contents still valid will
behave such that such access happened before the level3 or level2 entry
update.

Link: https://lkml.kernel.org/r/20210422054323.150993-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/mremap.c

index cb25c248a717f35298a42c53e5dcce728a4fb608..82e76e85532d813ef77baddde3d79363cffe02e1 100644 (file)
@@ -258,7 +258,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
         * We don't have to worry about the ordering of src and dst
         * ptlocks because exclusive mmap_lock prevents deadlock.
         */
-       old_ptl = pmd_lock(vma->vm_mm, old_pmd);
+       old_ptl = pmd_lock(mm, old_pmd);
        new_ptl = pmd_lockptr(mm, new_pmd);
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -270,11 +270,11 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
        VM_BUG_ON(!pmd_none(*new_pmd));
        pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
 
-       flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PMD_SIZE);
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
        spin_unlock(old_ptl);
 
+       flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PMD_SIZE);
        return true;
 }
 #else
@@ -305,7 +305,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
         * We don't have to worry about the ordering of src and dst
         * ptlocks because exclusive mmap_lock prevents deadlock.
         */
-       old_ptl = pud_lock(vma->vm_mm, old_pud);
+       old_ptl = pud_lock(mm, old_pud);
        new_ptl = pud_lockptr(mm, new_pud);
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -317,11 +317,11 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
        VM_BUG_ON(!pud_none(*new_pud));
 
        pud_populate(mm, new_pud, (pmd_t *)pud_page_vaddr(pud));
-       flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PUD_SIZE);
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
        spin_unlock(old_ptl);
 
+       flush_pte_tlb_pwc_range(vma, old_addr, old_addr + PUD_SIZE);
        return true;
 }
 #else