struct list_head        *vmemmap_pages;
 };
 
-static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
 {
        pmd_t __pmd;
        int i;
        unsigned long addr = start;
-       struct page *page = pmd_page(*pmd);
-       pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
+       struct page *head;
+       pte_t *pgtable;
+
+       spin_lock(&init_mm.page_table_lock);
+       head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
+       spin_unlock(&init_mm.page_table_lock);
 
+       if (!head)
+               return 0;
+
+       pgtable = pte_alloc_one_kernel(&init_mm);
        if (!pgtable)
                return -ENOMEM;
 
                pte_t entry, *pte;
                pgprot_t pgprot = PAGE_KERNEL;
 
-               entry = mk_pte(page + i, pgprot);
+               entry = mk_pte(head + i, pgprot);
                pte = pte_offset_kernel(&__pmd, addr);
                set_pte_at(&init_mm, addr, pte, entry);
        }
                 * be treated as indepdenent small pages (as they can be freed
                 * individually).
                 */
-               if (!PageReserved(page))
-                       split_page(page, get_order(PMD_SIZE));
+               if (!PageReserved(head))
+                       split_page(head, get_order(PMD_SIZE));
 
                /* Make pte visible before pmd. See comment in pmd_install(). */
                smp_wmb();
        return 0;
 }
 
-static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
-{
-       int leaf;
-
-       spin_lock(&init_mm.page_table_lock);
-       leaf = pmd_leaf(*pmd);
-       spin_unlock(&init_mm.page_table_lock);
-
-       if (!leaf)
-               return 0;
-
-       return __split_vmemmap_huge_pmd(pmd, start);
-}
-
 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
                              unsigned long end,
                              struct vmemmap_remap_walk *walk)