static DEFINE_SPINLOCK(vmap_area_lock);
+static DEFINE_SPINLOCK(free_vmap_area_lock);
 /* Export for kexec only */
 LIST_HEAD(vmap_area_list);
 static LLIST_HEAD(vmap_purge_list);
                 */
                pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
 
-       spin_lock(&vmap_area_lock);
+       spin_lock(&free_vmap_area_lock);
 
        if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
                kmem_cache_free(vmap_area_cachep, pva);
         * returned. Therefore trigger the overflow path.
         */
        addr = __alloc_vmap_area(size, align, vstart, vend);
+       spin_unlock(&free_vmap_area_lock);
+
        if (unlikely(addr == vend))
                goto overflow;
 
        va->va_start = addr;
        va->va_end = addr + size;
        va->vm = NULL;
-       insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
 
+       spin_lock(&vmap_area_lock);
+       insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
        spin_unlock(&vmap_area_lock);
 
        BUG_ON(!IS_ALIGNED(va->va_start, align));
        return va;
 
 overflow:
-       spin_unlock(&vmap_area_lock);
        if (!purged) {
                purge_vmap_area_lazy();
                purged = 1;
 }
 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
 
-static void __free_vmap_area(struct vmap_area *va)
+/*
+ * Free a region of KVA allocated by alloc_vmap_area
+ */
+static void free_vmap_area(struct vmap_area *va)
 {
        /*
         * Remove from the busy tree/list.
         */
+       spin_lock(&vmap_area_lock);
        unlink_va(va, &vmap_area_root);
+       spin_unlock(&vmap_area_lock);
 
        /*
-        * Merge VA with its neighbors, otherwise just add it.
+        * Insert/Merge it back to the free tree/list.
         */
+       spin_lock(&free_vmap_area_lock);
        merge_or_add_vmap_area(va,
                &free_vmap_area_root, &free_vmap_area_list);
-}
-
-/*
- * Free a region of KVA allocated by alloc_vmap_area
- */
-static void free_vmap_area(struct vmap_area *va)
-{
-       spin_lock(&vmap_area_lock);
-       __free_vmap_area(va);
-       spin_unlock(&vmap_area_lock);
+       spin_unlock(&free_vmap_area_lock);
 }
 
 /*
        flush_tlb_kernel_range(start, end);
        resched_threshold = lazy_max_pages() << 1;
 
-       spin_lock(&vmap_area_lock);
+       spin_lock(&free_vmap_area_lock);
        llist_for_each_entry_safe(va, n_va, valist, purge_list) {
                unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 
                atomic_long_sub(nr, &vmap_lazy_nr);
 
                if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
-                       cond_resched_lock(&vmap_area_lock);
+                       cond_resched_lock(&free_vmap_area_lock);
        }
-       spin_unlock(&vmap_area_lock);
+       spin_unlock(&free_vmap_area_lock);
        return true;
 }
 
 }
 EXPORT_SYMBOL_GPL(map_vm_area);
 
-static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
-                             unsigned long flags, const void *caller)
+static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
+       struct vmap_area *va, unsigned long flags, const void *caller)
 {
-       spin_lock(&vmap_area_lock);
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
        va->vm = vm;
+}
+
+static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+                             unsigned long flags, const void *caller)
+{
+       spin_lock(&vmap_area_lock);
+       setup_vmalloc_vm_locked(vm, va, flags, caller);
        spin_unlock(&vmap_area_lock);
 }
 
                        goto err_free;
        }
 retry:
-       spin_lock(&vmap_area_lock);
+       spin_lock(&free_vmap_area_lock);
 
        /* start scanning - we scan from the top, begin with the last area */
        area = term_area = last_area;
                va = vas[area];
                va->va_start = start;
                va->va_end = start + size;
-
-               insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
        }
 
-       spin_unlock(&vmap_area_lock);
+       spin_unlock(&free_vmap_area_lock);
 
        /* insert all vm's */
-       for (area = 0; area < nr_vms; area++)
-               setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
+       spin_lock(&vmap_area_lock);
+       for (area = 0; area < nr_vms; area++) {
+               insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
+
+               setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
                                 pcpu_get_vm_areas);
+       }
+       spin_unlock(&vmap_area_lock);
 
        kfree(vas);
        return vms;
 
 recovery:
-       /* Remove previously inserted areas. */
+       /*
+        * Remove previously allocated areas. There is no
+        * need in removing these areas from the busy tree,
+        * because they are inserted only on the final step
+        * and when pcpu_get_vm_areas() is success.
+        */
        while (area--) {
-               __free_vmap_area(vas[area]);
+               merge_or_add_vmap_area(vas[area],
+                       &free_vmap_area_root, &free_vmap_area_list);
                vas[area] = NULL;
        }
 
 overflow:
-       spin_unlock(&vmap_area_lock);
+       spin_unlock(&free_vmap_area_lock);
        if (!purged) {
                purge_vmap_area_lazy();
                purged = true;
 
 #ifdef CONFIG_PROC_FS
 static void *s_start(struct seq_file *m, loff_t *pos)
+       __acquires(&vmap_purge_lock)
        __acquires(&vmap_area_lock)
 {
+       mutex_lock(&vmap_purge_lock);
        spin_lock(&vmap_area_lock);
+
        return seq_list_start(&vmap_area_list, *pos);
 }
 
 }
 
 static void s_stop(struct seq_file *m, void *p)
+       __releases(&vmap_purge_lock)
        __releases(&vmap_area_lock)
 {
+       mutex_unlock(&vmap_purge_lock);
        spin_unlock(&vmap_area_lock);
 }