} while (pmd++, i++, i < PTRS_PER_PMD);
 }
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+
+       /*
+        * If debug_page_alloc is enabled we must map the linear map
+        * using pages. However, other mappings created by
+        * create_mapping_noalloc must use sections in some cases. Allow
+        * sections to be used in those cases, where no pgtable_alloc
+        * function is provided.
+        */
+       return !pgtable_alloc || !debug_pagealloc_enabled();
+}
+#else
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+       return true;
+}
+#endif
+
 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
                                  phys_addr_t (*pgtable_alloc)(void))
        do {
                next = pmd_addr_end(addr, end);
                /* try section mapping first */
-               if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+               if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+                     block_mappings_allowed(pgtable_alloc)) {
                        pmd_t old_pmd =*pmd;
                        set_pmd(pmd, __pmd(phys |
                                           pgprot_val(mk_sect_prot(prot))));
                /*
                 * For 4K granule only, attempt to put down a 1GB block
                 */
-               if (use_1G_block(addr, next, phys)) {
+               if (use_1G_block(addr, next, phys) &&
+                   block_mappings_allowed(pgtable_alloc)) {
                        pud_t old_pud = *pud;
                        set_pud(pud, __pud(phys |
                                           pgprot_val(mk_sect_prot(prot))));
 
        return 0;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+                               pgprot_t set_mask, pgprot_t clear_mask)
+{
+       struct page_change_data data;
+       int ret;
+
+       data.set_mask = set_mask;
+       data.clear_mask = clear_mask;
+
+       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+                                       &data);
+
+       flush_tlb_kernel_range(start, start + size);
+       return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
                                pgprot_t set_mask, pgprot_t clear_mask)
 {
        unsigned long start = addr;
        unsigned long size = PAGE_SIZE*numpages;
        unsigned long end = start + size;
-       int ret;
-       struct page_change_data data;
        struct vm_struct *area;
 
        if (!PAGE_ALIGNED(addr)) {
        if (!numpages)
                return 0;
 
-       data.set_mask = set_mask;
-       data.clear_mask = clear_mask;
-
-       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-                                       &data);
-
-       flush_tlb_kernel_range(start, end);
-       return ret;
+       return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
                                        __pgprot(PTE_PXN));
 }
 EXPORT_SYMBOL_GPL(set_memory_x);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       unsigned long addr = (unsigned long) page_address(page);
+
+       if (enable)
+               __change_memory_common(addr, PAGE_SIZE * numpages,
+                                       __pgprot(PTE_VALID),
+                                       __pgprot(0));
+       else
+               __change_memory_common(addr, PAGE_SIZE * numpages,
+                                       __pgprot(0),
+                                       __pgprot(PTE_VALID));
+}
+#endif