]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
mm/treewide: replace pmd_large() with pmd_leaf()
authorPeter Xu <peterx@redhat.com>
Tue, 5 Mar 2024 04:37:47 +0000 (12:37 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 6 Mar 2024 21:04:19 +0000 (13:04 -0800)
pmd_large() is always defined as pmd_leaf().  Merge their usages.  Chose
pmd_leaf() because pmd_leaf() is a global API, while pmd_large() is not.

Link: https://lkml.kernel.org/r/20240305043750.93762-8-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
25 files changed:
arch/arm/mm/dump.c
arch/powerpc/mm/book3s64/pgtable.c
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/powerpc/mm/pgtable_64.c
arch/s390/boot/vmem.c
arch/s390/include/asm/pgtable.h
arch/s390/mm/gmap.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/pageattr.c
arch/s390/mm/pgtable.c
arch/s390/mm/vmem.c
arch/sparc/mm/init_64.c
arch/x86/boot/compressed/ident_map_64.c
arch/x86/kvm/mmu/mmu.c
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/mem_encrypt_identity.c
arch/x86/mm/pat/set_memory.c
arch/x86/mm/pgtable.c
arch/x86/mm/pti.c
arch/x86/power/hibernate.c
arch/x86/xen/mmu_pv.c
drivers/misc/sgi-gru/grufault.c

index a9381095ab3662cc6f2e7fdcbedb387c12efeed2..cd032522d902fff4cf8adc13f6a83d00f344fb12 100644 (file)
@@ -349,12 +349,12 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
        for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
                addr = start + i * PMD_SIZE;
                domain = get_domain_name(pmd);
-               if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
+               if (pmd_none(*pmd) || pmd_leaf(*pmd) || !pmd_present(*pmd))
                        note_page(st, addr, 4, pmd_val(*pmd), domain);
                else
                        walk_pte(st, pmd, addr, domain);
 
-               if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
+               if (SECTION_SIZE < PMD_SIZE && pmd_leaf(pmd[1])) {
                        addr += SECTION_SIZE;
                        pmd++;
                        domain = get_domain_name(pmd);
index 3438ab72c346b8c09ed2340ca3792f6b85c5b172..45f526547b277c73b1535e9180c69dbed4e8c5cb 100644 (file)
@@ -113,7 +113,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 
        WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
        assert_spin_locked(pmd_lockptr(mm, pmdp));
-       WARN_ON(!(pmd_large(pmd)));
+       WARN_ON(!(pmd_leaf(pmd)));
 #endif
        trace_hugepage_set_pmd(addr, pmd_val(pmd));
        return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
index 1f8db10693e32586c1a46fa16e5a387002105466..5cc4008329be79d521c961d277df3ae79fed9093 100644 (file)
@@ -924,7 +924,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
                                unsigned long addr, unsigned long next)
 {
-       int large = pmd_large(*pmdp);
+       int large = pmd_leaf(*pmdp);
 
        if (large)
                vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
index 386c6b06eab7819466ec680701243e663d7154c1..9b99113cb51a8f3df10c5ec103fa52fbb8ba8737 100644 (file)
@@ -132,7 +132,7 @@ struct page *pmd_page(pmd_t pmd)
                 * enabled so these checks can't be used.
                 */
                if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
-                       VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
+                       VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd)));
                return pte_page(pmd_pte(pmd));
        }
        return virt_to_page(pmd_page_vaddr(pmd));
index e3a4500a5a75714370285f46f8c5a97518b6a31d..348ab02b10282c3f035086681acd31068275e643 100644 (file)
@@ -333,7 +333,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
                        }
                        pte = boot_pte_alloc();
                        pmd_populate(&init_mm, pmd, pte);
-               } else if (pmd_large(*pmd)) {
+               } else if (pmd_leaf(*pmd)) {
                        continue;
                }
                pgtable_pte_populate(pmd, addr, next, mode);
index 4b91e65c85d97a585eed74e8619f17aa0be40310..431d03d5116bccaea6d01f9e8165ac6607e4d846 100644 (file)
@@ -721,7 +721,7 @@ static inline int pmd_large(pmd_t pmd)
 
 static inline int pmd_bad(pmd_t pmd)
 {
-       if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
+       if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
                return 1;
        return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
 }
@@ -820,8 +820,8 @@ static inline int pte_protnone(pte_t pte)
 
 static inline int pmd_protnone(pmd_t pmd)
 {
-       /* pmd_large(pmd) implies pmd_present(pmd) */
-       return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
+       /* pmd_leaf(pmd) implies pmd_present(pmd) */
+       return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
 }
 #endif
 
@@ -1385,7 +1385,7 @@ static inline unsigned long pmd_deref(pmd_t pmd)
        unsigned long origin_mask;
 
        origin_mask = _SEGMENT_ENTRY_ORIGIN;
-       if (pmd_large(pmd))
+       if (pmd_leaf(pmd))
                origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
        return (unsigned long)__va(pmd_val(pmd) & origin_mask);
 }
index e43a5a3befd49e321998fb72669cdfc26b4aa7ec..767c6f077b53cfd28b759da2112c5ae94592122c 100644 (file)
@@ -603,7 +603,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
        pmd = pmd_offset(pud, vmaddr);
        VM_BUG_ON(pmd_none(*pmd));
        /* Are we allowed to use huge pages? */
-       if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
+       if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
                return -EFAULT;
        /* Link gmap segment table entry location to page table. */
        rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
@@ -615,7 +615,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
                rc = radix_tree_insert(&gmap->host_to_guest,
                                       vmaddr >> PMD_SHIFT, table);
                if (!rc) {
-                       if (pmd_large(*pmd)) {
+                       if (pmd_leaf(*pmd)) {
                                *table = (pmd_val(*pmd) &
                                          _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
                                        | _SEGMENT_ENTRY_GMAP_UC;
@@ -945,7 +945,7 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
        }
 
        /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
-       if (!pmd_large(*pmdp))
+       if (!pmd_leaf(*pmdp))
                spin_unlock(&gmap->guest_table_lock);
        return pmdp;
 }
@@ -957,7 +957,7 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
  */
 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
 {
-       if (pmd_large(*pmdp))
+       if (pmd_leaf(*pmdp))
                spin_unlock(&gmap->guest_table_lock);
 }
 
@@ -1068,7 +1068,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
                rc = -EAGAIN;
                pmdp = gmap_pmd_op_walk(gmap, gaddr);
                if (pmdp) {
-                       if (!pmd_large(*pmdp)) {
+                       if (!pmd_leaf(*pmdp)) {
                                rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
                                                      bits);
                                if (!rc) {
@@ -2500,7 +2500,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
        if (!pmdp)
                return;
 
-       if (pmd_large(*pmdp)) {
+       if (pmd_leaf(*pmdp)) {
                if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
                        bitmap_fill(bitmap, _PAGE_ENTRIES);
        } else {
index 297a6d897d5a0c0e2e00f271ae23d918c4c6862a..1ccb5b40fe92528b2da91216681b431d7ffe6d4e 100644 (file)
@@ -235,7 +235,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
 
 int pmd_huge(pmd_t pmd)
 {
-       return pmd_large(pmd);
+       return pmd_leaf(pmd);
 }
 
 int pud_huge(pud_t pud)
index 631e3a4ee2de82f66d4d97232e84734522cfe666..9f55d5a3210c09559d3be0494772164dd26cf99f 100644 (file)
@@ -185,7 +185,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
                if (pmd_none(*pmdp))
                        return -EINVAL;
                next = pmd_addr_end(addr, end);
-               if (pmd_large(*pmdp)) {
+               if (pmd_leaf(*pmdp)) {
                        need_split  = !!(flags & SET_MEMORY_4K);
                        need_split |= !!(addr & ~PMD_MASK);
                        need_split |= !!(addr + PMD_SIZE > next);
index b71432b15d665c7e496fa74fd182e06f7f215fbc..9ac66304d77640268f07941645659101527735c1 100644 (file)
@@ -827,7 +827,7 @@ again:
                return key ? -EFAULT : 0;
        }
 
-       if (pmd_large(*pmdp)) {
+       if (pmd_leaf(*pmdp)) {
                paddr = pmd_val(*pmdp) & HPAGE_MASK;
                paddr |= addr & ~HPAGE_MASK;
                /*
@@ -938,7 +938,7 @@ again:
                return 0;
        }
 
-       if (pmd_large(*pmdp)) {
+       if (pmd_leaf(*pmdp)) {
                paddr = pmd_val(*pmdp) & HPAGE_MASK;
                paddr |= addr & ~HPAGE_MASK;
                cc = page_reset_referenced(paddr);
@@ -1002,7 +1002,7 @@ again:
                return 0;
        }
 
-       if (pmd_large(*pmdp)) {
+       if (pmd_leaf(*pmdp)) {
                paddr = pmd_val(*pmdp) & HPAGE_MASK;
                paddr |= addr & ~HPAGE_MASK;
                *key = page_get_storage_key(paddr);
index eb100479f7bec4c8078675552be4badd95e3b978..afe5edf2a60447b9cc993b962cf4289c9258af12 100644 (file)
@@ -236,7 +236,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
                if (!add) {
                        if (pmd_none(*pmd))
                                continue;
-                       if (pmd_large(*pmd)) {
+                       if (pmd_leaf(*pmd)) {
                                if (IS_ALIGNED(addr, PMD_SIZE) &&
                                    IS_ALIGNED(next, PMD_SIZE)) {
                                        if (!direct)
@@ -281,7 +281,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
                        if (!pte)
                                goto out;
                        pmd_populate(&init_mm, pmd, pte);
-               } else if (pmd_large(*pmd)) {
+               } else if (pmd_leaf(*pmd)) {
                        if (!direct)
                                vmemmap_use_sub_pmd(addr, next);
                        continue;
@@ -610,7 +610,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
                if (!pte)
                        goto out;
                pmd_populate(&init_mm, pmd, pte);
-       } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
+       } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
                goto out;
        }
        ptep = pte_offset_kernel(pmd, addr);
index f83017992eaaeb79e757adebf7cb2923e4859b0b..5e067b6a44643b17aac963edf03090ae4abe9d1c 100644 (file)
@@ -1672,7 +1672,7 @@ bool kern_addr_valid(unsigned long addr)
        if (pmd_none(*pmd))
                return false;
 
-       if (pmd_large(*pmd))
+       if (pmd_leaf(*pmd))
                return pfn_valid(pmd_pfn(*pmd));
 
        pte = pte_offset_kernel(pmd, addr);
@@ -2968,7 +2968,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
        struct mm_struct *mm;
        pmd_t entry = *pmd;
 
-       if (!pmd_large(entry) || !pmd_young(entry))
+       if (!pmd_leaf(entry) || !pmd_young(entry))
                return;
 
        pte = pmd_val(entry);
index d040080d7edbd6ba53b9556102ac732a4f90e8e3..71c6e2fdcec776532264eb14cc89c64e8196c1cd 100644 (file)
@@ -284,7 +284,7 @@ static int set_clr_page_flags(struct x86_mapping_info *info,
        pudp = pud_offset(p4dp, address);
        pmdp = pmd_offset(pudp, address);
 
-       if (pmd_large(*pmdp))
+       if (pmd_leaf(*pmdp))
                ptep = split_large_pmd(info, pmdp, address);
        else
                ptep = pte_offset_kernel(pmdp, address);
index 2d6cdeab1f8a3e78306148d44a4665a1d51d8b1e..c15123248c526a622372e46904b184659d687fe8 100644 (file)
@@ -3135,7 +3135,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
        if (pmd_none(pmd) || !pmd_present(pmd))
                goto out;
 
-       if (pmd_large(pmd))
+       if (pmd_leaf(pmd))
                level = PG_LEVEL_2M;
 
 out:
index 8b69ce3f411542f0f3080d78fed97c17afd51bdc..09417f95034331c432f585f803dbc236b0e803c1 100644 (file)
@@ -250,7 +250,7 @@ static noinline int vmalloc_fault(unsigned long address)
        if (!pmd_k)
                return -1;
 
-       if (pmd_large(*pmd_k))
+       if (pmd_leaf(*pmd_k))
                return 0;
 
        pte_k = pte_offset_kernel(pmd_k, address);
@@ -319,7 +319,7 @@ static void dump_pagetable(unsigned long address)
         * And let's rather not kmap-atomic the pte, just in case
         * it's allocated already:
         */
-       if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
+       if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd))
                goto out;
 
        pte = pte_offset_kernel(pmd, address);
@@ -384,7 +384,7 @@ static void dump_pagetable(unsigned long address)
                goto bad;
 
        pr_cont("PMD %lx ", pmd_val(*pmd));
-       if (!pmd_present(*pmd) || pmd_large(*pmd))
+       if (!pmd_present(*pmd) || pmd_leaf(*pmd))
                goto out;
 
        pte = pte_offset_kernel(pmd, address);
@@ -1053,7 +1053,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
        if (!pmd_present(*pmd))
                return 0;
 
-       if (pmd_large(*pmd))
+       if (pmd_leaf(*pmd))
                return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
 
        pte = pte_offset_kernel(pmd, address);
index 5c736b707caea07bd41c1c904ee548b51399c1d2..ac41b1e0940d47ca647dc2006c40b6005c2dae4f 100644 (file)
@@ -463,7 +463,7 @@ void __init native_pagetable_init(void)
                        break;
 
                /* should not be large page here */
-               if (pmd_large(*pmd)) {
+               if (pmd_leaf(*pmd)) {
                        pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
                                pfn, pmd, __pa(pmd));
                        BUG_ON(1);
index d691e7992a9ab795568811bb9c0f0ade084e1d9b..2c5490e58f41cc67f6d9f43efa4294f45dd42701 100644 (file)
@@ -530,7 +530,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
                }
 
                if (!pmd_none(*pmd)) {
-                       if (!pmd_large(*pmd)) {
+                       if (!pmd_leaf(*pmd)) {
                                spin_lock(&init_mm.page_table_lock);
                                pte = (pte_t *)pmd_page_vaddr(*pmd);
                                paddr_last = phys_pte_init(pte, paddr,
@@ -1114,7 +1114,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
                if (!pmd_present(*pmd))
                        continue;
 
-               if (pmd_large(*pmd)) {
+               if (pmd_leaf(*pmd)) {
                        if (IS_ALIGNED(addr, PMD_SIZE) &&
                            IS_ALIGNED(next, PMD_SIZE)) {
                                if (!direct)
@@ -1520,9 +1520,9 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
                                unsigned long addr, unsigned long next)
 {
-       int large = pmd_large(*pmd);
+       int large = pmd_leaf(*pmd);
 
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                vmemmap_verify((pte_t *)pmd, node, addr, next);
                vmemmap_use_sub_pmd(addr, next);
        }
index 0302491d799d1b2227826eab5a01f76403e75edc..f41d26bc91611371ce4dd3b82e7cebf3c78b4040 100644 (file)
@@ -95,7 +95,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (!pmd_large(*pmd))
+               if (!pmd_leaf(*pmd))
                        kasan_populate_pmd(pmd, addr, next, nid);
        } while (pmd++, addr = next, addr != end);
 }
index d73aeb16417fcfbb99607c3bc2241962e3590948..bca4fea8057913b188c9bf3a6ec6aaecb47b8092 100644 (file)
@@ -161,7 +161,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
                return;
 
        pmd = pmd_offset(pud, ppd->vaddr);
-       if (pmd_large(*pmd))
+       if (pmd_leaf(*pmd))
                return;
 
        set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
@@ -185,7 +185,7 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
                set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
        }
 
-       if (pmd_large(*pmd))
+       if (pmd_leaf(*pmd))
                return;
 
        pte = pte_offset_kernel(pmd, ppd->vaddr);
index 5359a9c8809976297321b54fafc50572a5374a8f..b4037fe08eed39577e4b8bffa7a879c27e52704f 100644 (file)
@@ -692,7 +692,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
                return NULL;
 
        *level = PG_LEVEL_2M;
-       if (pmd_large(*pmd) || !pmd_present(*pmd))
+       if (pmd_leaf(*pmd) || !pmd_present(*pmd))
                return (pte_t *)pmd;
 
        *level = PG_LEVEL_4K;
@@ -1229,7 +1229,7 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
         * Try to unmap in 2M chunks.
         */
        while (end - start >= PMD_SIZE) {
-               if (pmd_large(*pmd))
+               if (pmd_leaf(*pmd))
                        pmd_clear(pmd);
                else
                        __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
index 0cbc1b8e8e3d10185e4e63ff3fab4e0057417f99..d05dd86ceb41616c35329719a350b92e6e9cd41e 100644 (file)
@@ -792,7 +792,7 @@ int pud_clear_huge(pud_t *pud)
  */
 int pmd_clear_huge(pmd_t *pmd)
 {
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                pmd_clear(pmd);
                return 1;
        }
index c17aab24c1b3a63a6611e24b0dbe784a10e358b1..0442e8f479a65afb1d38a644b3088502f0852549 100644 (file)
@@ -252,7 +252,7 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
                return NULL;
 
        /* We can't do anything sensible if we hit a large mapping. */
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                WARN_ON(1);
                return NULL;
        }
@@ -341,7 +341,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
                        continue;
                }
 
-               if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
+               if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
                        target_pmd = pti_user_pagetable_walk_pmd(addr);
                        if (WARN_ON(!target_pmd))
                                return;
index 28153789f87396302e525e5b77a7a60066e2bf4f..277eaf610e0e0df7ca7546ae0aaa9e98d09ca7da 100644 (file)
@@ -175,7 +175,7 @@ int relocate_restore_code(void)
                goto out;
        }
        pmd = pmd_offset(pud, relocated_restore_code);
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
                goto out;
        }
index 12a43a4abebfb5f733127de60379d3eb9ad5e3d1..dde551bbd2316fbefa552863255b93390988970d 100644 (file)
@@ -1059,7 +1059,7 @@ static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
        pte_t *pte_tbl;
        int i;
 
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
                xen_free_ro_pages(pa, PMD_SIZE);
                return;
@@ -1871,7 +1871,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
        if (!pmd_present(pmd))
                return 0;
        pa = pmd_val(pmd) & PTE_PFN_MASK;
-       if (pmd_large(pmd))
+       if (pmd_leaf(pmd))
                return pa + (vaddr & ~PMD_MASK);
 
        pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
index 629edb6486deaef514ba15a52176b84fc17465e7..3557d78ee47a27bb61d6248bfa7c2133a19bb917 100644 (file)
@@ -227,7 +227,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
        if (unlikely(pmd_none(*pmdp)))
                goto err;
 #ifdef CONFIG_X86_64
-       if (unlikely(pmd_large(*pmdp)))
+       if (unlikely(pmd_leaf(*pmdp)))
                pte = ptep_get((pte_t *)pmdp);
        else
 #endif