} while (pmd++, i++, i < PTRS_PER_PMD);
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
-
-       /*
-        * If debug_page_alloc is enabled we must map the linear map
-        * using pages. However, other mappings created by
-        * create_mapping_noalloc must use sections in some cases. Allow
-        * sections to be used in those cases, where no pgtable_alloc
-        * function is provided.
-        */
-       return !pgtable_alloc || !debug_pagealloc_enabled();
-}
-#else
-static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
-{
-       return true;
-}
-#endif
-
 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 phys_addr_t (*pgtable_alloc)(void))
+                                 phys_addr_t (*pgtable_alloc)(void),
+                                 bool allow_block_mappings)
 {
        pmd_t *pmd;
        unsigned long next;
                next = pmd_addr_end(addr, end);
                /* try section mapping first */
                if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
-                     block_mappings_allowed(pgtable_alloc)) {
+                     allow_block_mappings) {
                        pmd_t old_pmd =*pmd;
                        pmd_set_huge(pmd, phys, prot);
                        /*
 
 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 phys_addr_t (*pgtable_alloc)(void))
+                                 phys_addr_t (*pgtable_alloc)(void),
+                                 bool allow_block_mappings)
 {
        pud_t *pud;
        unsigned long next;
                /*
                 * For 4K granule only, attempt to put down a 1GB block
                 */
-               if (use_1G_block(addr, next, phys) &&
-                   block_mappings_allowed(pgtable_alloc)) {
+               if (use_1G_block(addr, next, phys) && allow_block_mappings) {
                        pud_t old_pud = *pud;
                        pud_set_huge(pud, phys, prot);
 
                        }
                } else {
                        alloc_init_pmd(pud, addr, next, phys, prot,
-                                      pgtable_alloc);
+                                      pgtable_alloc, allow_block_mappings);
                }
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
  */
 static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
                                    phys_addr_t size, pgprot_t prot,
-                                   phys_addr_t (*pgtable_alloc)(void))
+                                   phys_addr_t (*pgtable_alloc)(void),
+                                   bool allow_block_mappings)
 {
        unsigned long addr, length, end, next;
 
        end = addr + length;
        do {
                next = pgd_addr_end(addr, end);
-               alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
+               alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
+                              allow_block_mappings);
                phys += next - addr;
        } while (pgd++, addr = next, addr != end);
 }
 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
                                 unsigned long virt, phys_addr_t size,
                                 pgprot_t prot,
-                                phys_addr_t (*alloc)(void))
+                                phys_addr_t (*alloc)(void),
+                                bool allow_block_mappings)
 {
-       init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
+       init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc,
+                allow_block_mappings);
 }
 
 /*
                        &phys, virt);
                return;
        }
-       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
-                            NULL);
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
                               unsigned long virt, phys_addr_t size,
-                              pgprot_t prot)
+                              pgprot_t prot, bool allow_block_mappings)
 {
        __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
-                            late_pgtable_alloc);
+                            late_pgtable_alloc, allow_block_mappings);
 }
 
 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
        }
 
        __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
-                            late_pgtable_alloc);
+                            late_pgtable_alloc, !debug_pagealloc_enabled());
 }
 
 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
        if (end < kernel_start || start >= kernel_end) {
                __create_pgd_mapping(pgd, start, __phys_to_virt(start),
                                     end - start, PAGE_KERNEL,
-                                    early_pgtable_alloc);
+                                    early_pgtable_alloc,
+                                    !debug_pagealloc_enabled());
                return;
        }
 
                __create_pgd_mapping(pgd, start,
                                     __phys_to_virt(start),
                                     kernel_start - start, PAGE_KERNEL,
-                                    early_pgtable_alloc);
+                                    early_pgtable_alloc,
+                                    !debug_pagealloc_enabled());
        if (kernel_end < end)
                __create_pgd_mapping(pgd, kernel_end,
                                     __phys_to_virt(kernel_end),
                                     end - kernel_end, PAGE_KERNEL,
-                                    early_pgtable_alloc);
+                                    early_pgtable_alloc,
+                                    !debug_pagealloc_enabled());
 
        /*
         * Map the linear alias of the [_text, __init_begin) interval as
         */
        __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
                             kernel_end - kernel_start, PAGE_KERNEL_RO,
-                            early_pgtable_alloc);
+                            early_pgtable_alloc, !debug_pagealloc_enabled());
 }
 
 static void __init map_mem(pgd_t *pgd)
        BUG_ON(!PAGE_ALIGNED(size));
 
        __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
-                            early_pgtable_alloc);
+                            early_pgtable_alloc, !debug_pagealloc_enabled());
 
        vma->addr       = va_start;
        vma->phys_addr  = pa_start;