static void purge_vmap_area_lazy(void);
 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
+static void drain_vmap_area_work(struct work_struct *work);
+static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
 
 static atomic_long_t nr_vmalloc_pages;
 
        return true;
 }
 
-/*
- * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
- * is already purging.
- */
-static void try_purge_vmap_area_lazy(void)
-{
-       if (mutex_trylock(&vmap_purge_lock)) {
-               __purge_vmap_area_lazy(ULONG_MAX, 0);
-               mutex_unlock(&vmap_purge_lock);
-       }
-}
-
 /*
  * Kick off a purge of the outstanding lazy areas.
  */
        mutex_unlock(&vmap_purge_lock);
 }
 
+static void drain_vmap_area_work(struct work_struct *work)
+{
+       unsigned long nr_lazy;
+
+       do {
+               mutex_lock(&vmap_purge_lock);
+               __purge_vmap_area_lazy(ULONG_MAX, 0);
+               mutex_unlock(&vmap_purge_lock);
+
+               /* Recheck if further work is required. */
+               nr_lazy = atomic_long_read(&vmap_lazy_nr);
+       } while (nr_lazy > lazy_max_pages());
+}
+
 /*
  * Free a vmap area, caller ensuring that the area has been unmapped
  * and flush_cache_vunmap had been called for the correct range
 
        /* After this point, we may free va at any time */
        if (unlikely(nr_lazy > lazy_max_pages()))
-               try_purge_vmap_area_lazy();
+               schedule_work(&drain_vmap_work);
 }
 
 /*