]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: replace READ_ONCE() with standard page table accessors
authorAnshuman Khandual <anshuman.khandual@arm.com>
Tue, 7 Oct 2025 06:31:00 +0000 (07:31 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:30 +0000 (21:28 -0700)
Replace all READ_ONCE() with a standard page table accessors i.e
pxdp_get() that defaults into READ_ONCE() in cases where platform does not
override.

Link: https://lkml.kernel.org/r/20251007063100.2396936-1-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lance Yang <lance.yang@linux.dev>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Dev Jain <dev.jain@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/gup.c
mm/hmm.c
mm/memory.c
mm/mprotect.c
mm/sparse-vmemmap.c
mm/vmscan.c

index d2524fe09338fd56e2229714ffb2c58873dea028..95d948c8e86c92dd74d781d8cdf0d049143ffc95 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -950,7 +950,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
 
        pudp = pud_offset(p4dp, address);
-       pud = READ_ONCE(*pudp);
+       pud = pudp_get(pudp);
        if (!pud_present(pud))
                return no_page_table(vma, flags, address);
        if (pud_leaf(pud)) {
@@ -975,7 +975,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
        p4d_t *p4dp, p4d;
 
        p4dp = p4d_offset(pgdp, address);
-       p4d = READ_ONCE(*p4dp);
+       p4d = p4dp_get(p4dp);
        BUILD_BUG_ON(p4d_leaf(p4d));
 
        if (!p4d_present(p4d) || p4d_bad(p4d))
@@ -3060,7 +3060,7 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
 
        pudp = pud_offset_lockless(p4dp, p4d, addr);
        do {
-               pud_t pud = READ_ONCE(*pudp);
+               pud_t pud = pudp_get(pudp);
 
                next = pud_addr_end(addr, end);
                if (unlikely(!pud_present(pud)))
@@ -3086,7 +3086,7 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
 
        p4dp = p4d_offset_lockless(pgdp, pgd, addr);
        do {
-               p4d_t p4d = READ_ONCE(*p4dp);
+               p4d_t p4d = p4dp_get(p4dp);
 
                next = p4d_addr_end(addr, end);
                if (!p4d_present(p4d))
@@ -3108,7 +3108,7 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
 
        pgdp = pgd_offset(current->mm, addr);
        do {
-               pgd_t pgd = READ_ONCE(*pgdp);
+               pgd_t pgd = pgdp_get(pgdp);
 
                next = pgd_addr_end(addr, end);
                if (pgd_none(pgd))
index 87562914670a1f86634de6745433e434f3d56d74..a56081d67ad691d81728586ba7a4927034e949e5 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -491,7 +491,7 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
        /* Normally we don't want to split the huge page */
        walk->action = ACTION_CONTINUE;
 
-       pud = READ_ONCE(*pudp);
+       pud = pudp_get(pudp);
        if (!pud_present(pud)) {
                spin_unlock(ptl);
                return hmm_vma_walk_hole(start, end, -1, walk);
index d350c7a6301b2a1205d210266c21a0ef18f65226..62e70829d1bdec9baa317af36b7b828278e2d71f 100644 (file)
@@ -6716,12 +6716,12 @@ retry:
                goto out;
 
        p4dp = p4d_offset(pgdp, address);
-       p4d = READ_ONCE(*p4dp);
+       p4d = p4dp_get(p4dp);
        if (p4d_none(p4d) || unlikely(p4d_bad(p4d)))
                goto out;
 
        pudp = pud_offset(p4dp, address);
-       pud = READ_ONCE(*pudp);
+       pud = pudp_get(pudp);
        if (pud_none(pud))
                goto out;
        if (pud_leaf(pud)) {
index 113b489858341ff1d68af3596c1a695453493551..988c366137d506ba24a716943175917447ff9ce8 100644 (file)
@@ -599,7 +599,7 @@ again:
                        break;
                }
 
-               pud = READ_ONCE(*pudp);
+               pud = pudp_get(pudp);
                if (pud_none(pud))
                        continue;
 
index dbd8daccade28c99eb1e336e01da097939cc895f..37522d6cb3988514f3e1f644647532d26b778b45 100644 (file)
@@ -439,7 +439,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
                        return -ENOMEM;
 
                pmd = pmd_offset(pud, addr);
-               if (pmd_none(READ_ONCE(*pmd))) {
+               if (pmd_none(pmdp_get(pmd))) {
                        void *p;
 
                        p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
index aadbee50a851a207b369bfe4708981b3bf67bcf8..a306b96e6515af0d40663ebf2c650d0a8e474329 100644 (file)
@@ -3751,7 +3751,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
        pud = pud_offset(p4d, start & P4D_MASK);
 restart:
        for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
-               pud_t val = READ_ONCE(pud[i]);
+               pud_t val = pudp_get(pud + i);
 
                next = pud_addr_end(addr, end);