VM_BUG_ON(!PAGE_ALIGNED(start | end));
 
+       mmap_read_lock(&init_mm);
        ret = walk_page_range_novma(&init_mm, start, end, &vmemmap_remap_ops,
                                    NULL, walk);
+       mmap_read_unlock(&init_mm);
        if (ret)
                return ret;
 
 static int vmemmap_remap_split(unsigned long start, unsigned long end,
                               unsigned long reuse)
 {
-       int ret;
        struct vmemmap_remap_walk walk = {
                .remap_pte      = NULL,
                .flags          = VMEMMAP_SPLIT_NO_TLB_FLUSH,
        /* See the comment in the vmemmap_remap_free(). */
        BUG_ON(start - reuse != PAGE_SIZE);
 
-       mmap_read_lock(&init_mm);
-       ret = vmemmap_remap_range(reuse, end, &walk);
-       mmap_read_unlock(&init_mm);
-
-       return ret;
+       return vmemmap_remap_range(reuse, end, &walk);
 }
 
 /**
         */
        BUG_ON(start - reuse != PAGE_SIZE);
 
-       mmap_read_lock(&init_mm);
        ret = vmemmap_remap_range(reuse, end, &walk);
        if (ret && walk.nr_walked) {
                end = reuse + walk.nr_walked * PAGE_SIZE;
 
                vmemmap_remap_range(reuse, end, &walk);
        }
-       mmap_read_unlock(&init_mm);
 
        return ret;
 }
        if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
                return -ENOMEM;
 
-       mmap_read_lock(&init_mm);
-       vmemmap_remap_range(reuse, end, &walk);
-       mmap_read_unlock(&init_mm);
-
-       return 0;
+       return vmemmap_remap_range(reuse, end, &walk);
 }
 
 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);