* by this look, but we want to avoid concurrent calls for performance
  * reasons and to make the pcpu_get_vm_areas more deterministic.
  */
-static DEFINE_SPINLOCK(vmap_purge_lock);
+static DEFINE_MUTEX(vmap_purge_lock);
 
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
  */
 static void try_purge_vmap_area_lazy(void)
 {
-       if (spin_trylock(&vmap_purge_lock)) {
+       if (mutex_trylock(&vmap_purge_lock)) {
                __purge_vmap_area_lazy(ULONG_MAX, 0);
-               spin_unlock(&vmap_purge_lock);
+               mutex_unlock(&vmap_purge_lock);
        }
 }
 
  */
 static void purge_vmap_area_lazy(void)
 {
-       spin_lock(&vmap_purge_lock);
+       mutex_lock(&vmap_purge_lock);
        purge_fragmented_blocks_allcpus();
        __purge_vmap_area_lazy(ULONG_MAX, 0);
-       spin_unlock(&vmap_purge_lock);
+       mutex_unlock(&vmap_purge_lock);
 }
 
 /*
                rcu_read_unlock();
        }
 
-       spin_lock(&vmap_purge_lock);
+       mutex_lock(&vmap_purge_lock);
        purge_fragmented_blocks_allcpus();
        if (!__purge_vmap_area_lazy(start, end) && flush)
                flush_tlb_kernel_range(start, end);
-       spin_unlock(&vmap_purge_lock);
+       mutex_unlock(&vmap_purge_lock);
 }
 EXPORT_SYMBOL_GPL(vm_unmap_aliases);