return va;
 }
 
+static inline void setup_vmalloc_vm(struct vm_struct *vm,
+       struct vmap_area *va, unsigned long flags, const void *caller)
+{
+       vm->flags = flags;
+       vm->addr = (void *)va->va_start;
+       vm->size = va->va_end - va->va_start;
+       vm->caller = caller;
+       va->vm = vm;
+}
+
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
- * vstart and vend.
+ * vstart and vend. If vm is passed in, the two will also be bound.
  */
 static struct vmap_area *alloc_vmap_area(unsigned long size,
                                unsigned long align,
                                unsigned long vstart, unsigned long vend,
                                int node, gfp_t gfp_mask,
-                               unsigned long va_flags)
+                               unsigned long va_flags, struct vm_struct *vm,
+                               unsigned long flags, const void *caller)
 {
        struct vmap_node *vn;
        struct vmap_area *va;
        va->vm = NULL;
        va->flags = (va_flags | vn_id);
 
+       if (vm)
+               setup_vmalloc_vm(vm, va, flags, caller);
+
        vn = addr_to_node(va->va_start);
 
        spin_lock(&vn->busy.lock);
        va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
                                        VMALLOC_START, VMALLOC_END,
                                        node, gfp_mask,
-                                       VMAP_RAM|VMAP_BLOCK);
+                                       VMAP_RAM|VMAP_BLOCK, NULL,
+                                       0, NULL);
        if (IS_ERR(va)) {
                kfree(vb);
                return ERR_CAST(va);
                struct vmap_area *va;
                va = alloc_vmap_area(size, PAGE_SIZE,
                                VMALLOC_START, VMALLOC_END,
-                               node, GFP_KERNEL, VMAP_RAM);
+                               node, GFP_KERNEL, VMAP_RAM,
+                               NULL, 0, NULL);
                if (IS_ERR(va))
                        return NULL;
 
        kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
 }
 
-static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
-       struct vmap_area *va, unsigned long flags, const void *caller)
-{
-       vm->flags = flags;
-       vm->addr = (void *)va->va_start;
-       vm->size = va->va_end - va->va_start;
-       vm->caller = caller;
-       va->vm = vm;
-}
-
-static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
-                             unsigned long flags, const void *caller)
-{
-       struct vmap_node *vn = addr_to_node(va->va_start);
-
-       spin_lock(&vn->busy.lock);
-       setup_vmalloc_vm_locked(vm, va, flags, caller);
-       spin_unlock(&vn->busy.lock);
-}
-
 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
 {
        /*
        if (!(flags & VM_NO_GUARD))
                size += PAGE_SIZE;
 
-       va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
+       va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area, flags, caller);
        if (IS_ERR(va)) {
                kfree(area);
                return NULL;
        }
 
-       setup_vmalloc_vm(area, va, flags, caller);
-
        /*
         * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
         * best-effort approach, as they can be mapped outside of vmalloc code.
 
                spin_lock(&vn->busy.lock);
                insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
-               setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
+               setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
                                 pcpu_get_vm_areas);
                spin_unlock(&vn->busy.lock);
        }