]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: mprotect: avoid unnecessary struct page accessing if pte_protnone()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 14 Oct 2025 11:33:48 +0000 (19:33 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:56 +0000 (21:28 -0700)
If the pte_protnone() is true, we could avoid unnecessary struct page
accessing and reduce cache footprint when scanning page tables for prot
numa, the performance test of pmbench memory accessing benchmark should be
benifit, see more commit a818f5363a0e ("autonuma: reduce cache footprint
when scanning page tables").

Link: https://lkml.kernel.org/r/20251014113349.2618158-3-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mprotect.c

index 056986d9076a846a72314832f76bc09031f6a500..c998cd226c71e89f99fb29282ff3e3c54061bfc6 100644 (file)
@@ -118,18 +118,13 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
        return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
 }
 
-static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
-                          pte_t oldpte, pte_t *pte, int target_node,
-                          struct folio *folio)
+static bool prot_numa_skip(struct vm_area_struct *vma, int target_node,
+               struct folio *folio)
 {
        bool ret = true;
        bool toptier;
        int nid;
 
-       /* Avoid TLB flush if possible */
-       if (pte_protnone(oldpte))
-               goto skip;
-
        if (!folio)
                goto skip;
 
@@ -307,23 +302,23 @@ static long change_pte_range(struct mmu_gather *tlb,
                        struct page *page;
                        pte_t ptent;
 
+                       /* Already in the desired state. */
+                       if (prot_numa && pte_protnone(oldpte))
+                               continue;
+
                        page = vm_normal_page(vma, addr, oldpte);
                        if (page)
                                folio = page_folio(page);
+
                        /*
                         * Avoid trapping faults against the zero or KSM
                         * pages. See similar comment in change_huge_pmd.
                         */
-                       if (prot_numa) {
-                               int ret = prot_numa_skip(vma, addr, oldpte, pte,
-                                                        target_node, folio);
-                               if (ret) {
-
-                                       /* determine batch to skip */
-                                       nr_ptes = mprotect_folio_pte_batch(folio,
-                                                 pte, oldpte, max_nr_ptes, /* flags = */ 0);
-                                       continue;
-                               }
+                       if (prot_numa & prot_numa_skip(vma, target_node, folio)) {
+                               /* determine batch to skip */
+                               nr_ptes = mprotect_folio_pte_batch(folio,
+                                         pte, oldpte, max_nr_ptes, /* flags = */ 0);
+                               continue;
                        }
 
                        nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);