#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 
-#define VM_LAZY_FREE   0x02
 #define VM_VM_AREA     0x04
 
 static DEFINE_SPINLOCK(vmap_area_lock);
        llist_for_each_entry_safe(va, n_va, valist, purge_list) {
                unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 
-               __free_vmap_area(va);
+               /*
+                * Finally insert or merge lazily-freed area. It is
+                * detached and there is no need to "unlink" it from
+                * anything.
+                */
+               merge_or_add_vmap_area(va,
+                       &free_vmap_area_root, &free_vmap_area_list);
+
                atomic_long_sub(nr, &vmap_lazy_nr);
 
                if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
 {
        unsigned long nr_lazy;
 
+       spin_lock(&vmap_area_lock);
+       unlink_va(va, &vmap_area_root);
+       spin_unlock(&vmap_area_lock);
+
        nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
                                PAGE_SHIFT, &vmap_lazy_nr);
 
 
        might_sleep();
 
-       va = find_vmap_area((unsigned long)addr);
+       spin_lock(&vmap_area_lock);
+       va = __find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA) {
                struct vm_struct *vm = va->vm;
 
-               spin_lock(&vmap_area_lock);
                va->vm = NULL;
                va->flags &= ~VM_VM_AREA;
-               va->flags |= VM_LAZY_FREE;
                spin_unlock(&vmap_area_lock);
 
                kasan_free_shadow(vm);
 
                return vm;
        }
+
+       spin_unlock(&vmap_area_lock);
        return NULL;
 }
 
        }
 }
 
+static void show_purge_info(struct seq_file *m)
+{
+       struct llist_node *head;
+       struct vmap_area *va;
+
+       head = READ_ONCE(vmap_purge_list.first);
+       if (head == NULL)
+               return;
+
+       llist_for_each_entry(va, head, purge_list) {
+               seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
+                       (void *)va->va_start, (void *)va->va_end,
+                       va->va_end - va->va_start);
+       }
+}
+
 static int s_show(struct seq_file *m, void *p)
 {
        struct vmap_area *va;
         * behalf of vmap area is being tear down or vm_map_ram allocation.
         */
        if (!(va->flags & VM_VM_AREA)) {
-               seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
+               seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
                        (void *)va->va_start, (void *)va->va_end,
-                       va->va_end - va->va_start,
-                       va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
+                       va->va_end - va->va_start);
 
                return 0;
        }
 
        show_numa_info(m, v);
        seq_putc(m, '\n');
+
+       /*
+        * As a final step, dump "unpurged" areas. Note,
+        * that entire "/proc/vmallocinfo" output will not
+        * be address sorted, because the purge list is not
+        * sorted.
+        */
+       if (list_is_last(&va->list, &vmap_area_list))
+               show_purge_info(m);
+
        return 0;
 }