]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
fs/proc/task_mmu: reduce scope of lazy mmu region
authorRyan Roberts <ryan.roberts@arm.com>
Mon, 3 Mar 2025 14:15:36 +0000 (14:15 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 4 Mar 2025 05:50:47 +0000 (21:50 -0800)
Update the way arch_[enter|leave]_lazy_mmu_mode() is called in
pagemap_scan_pmd_entry() to follow the normal pattern of holding the ptl
for user space mappings.  As a result the scope is reduced to only the pte
table, but that's where most of the performance win is.

While I believe there wasn't technically a bug here, the original scope
made it easier to accidentally nest or, worse, accidentally call something
like kmap() which would expect an immediate mode pte modification but it
would end up deferred.

Link: https://lkml.kernel.org/r/20250303141542.3371656-3-ryan.roberts@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Juergen Gross <jgross@suse.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c

index 061f16b7671185e78fa015dfbafbf6c66c2e78a3..994cde10e3f4d327b349e98a1a30519872f90f4e 100644 (file)
@@ -2479,22 +2479,19 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
        spinlock_t *ptl;
        int ret;
 
-       arch_enter_lazy_mmu_mode();
-
        ret = pagemap_scan_thp_entry(pmd, start, end, walk);
-       if (ret != -ENOENT) {
-               arch_leave_lazy_mmu_mode();
+       if (ret != -ENOENT)
                return ret;
-       }
 
        ret = 0;
        start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
        if (!pte) {
-               arch_leave_lazy_mmu_mode();
                walk->action = ACTION_AGAIN;
                return 0;
        }
 
+       arch_enter_lazy_mmu_mode();
+
        if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
                /* Fast path for performing exclusive WP */
                for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
@@ -2563,8 +2560,8 @@ flush_and_return:
        if (flush_end)
                flush_tlb_range(vma, start, addr);
 
-       pte_unmap_unlock(start_pte, ptl);
        arch_leave_lazy_mmu_mode();
+       pte_unmap_unlock(start_pte, ptl);
 
        cond_resched();
        return ret;