struct binder_alloc *alloc;
        uintptr_t page_addr;
        size_t index;
+       struct vm_area_struct *vma;
 
        alloc = page->alloc;
        if (!mutex_trylock(&alloc->mutex))
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
-       if (alloc->vma) {
+       vma = alloc->vma;
+       if (vma) {
                mm = get_task_mm(alloc->tsk);
                if (!mm)
                        goto err_get_task_mm_failed;
                if (!down_write_trylock(&mm->mmap_sem))
                        goto err_down_write_mmap_sem_failed;
+       }
+
+       list_lru_isolate(lru, item);
+       spin_unlock(lock);
 
+       if (vma) {
                trace_binder_unmap_user_start(alloc, index);
 
-               zap_page_range(alloc->vma,
+               zap_page_range(vma,
                               page_addr + alloc->user_buffer_offset,
                               PAGE_SIZE);
 
 
        trace_binder_unmap_kernel_end(alloc, index);
 
-       list_lru_isolate(lru, item);
-
+       spin_lock(lock);
        mutex_unlock(&alloc->mutex);
-       return LRU_REMOVED;
+       return LRU_REMOVED_RETRY;
 
 err_down_write_mmap_sem_failed:
-       mmput(mm);
+       mmput_async(mm);
 err_get_task_mm_failed:
 err_page_already_freed:
        mutex_unlock(&alloc->mutex);
 
 
 /* mmput gets rid of the mappings and all user-space */
 extern void mmput(struct mm_struct *);
+#ifdef CONFIG_MMU
+/* same as above but performs the slow path from the async context. Can
+ * be called from the atomic context as well
+ */
+void mmput_async(struct mm_struct *);
+#endif
 
 /* Grab a reference to a task's mm, if it is not already going away */
 extern struct mm_struct *get_task_mm(struct task_struct *task);
 
 }
 EXPORT_SYMBOL_GPL(mmput);
 
+#ifdef CONFIG_MMU
+static void mmput_async_fn(struct work_struct *work)
+{
+       struct mm_struct *mm = container_of(work, struct mm_struct,
+                                           async_put_work);
+
+       __mmput(mm);
+}
+
+void mmput_async(struct mm_struct *mm)
+{
+       if (atomic_dec_and_test(&mm->mm_users)) {
+               INIT_WORK(&mm->async_put_work, mmput_async_fn);
+               schedule_work(&mm->async_put_work);
+       }
+}
+#endif
+
 /**
  * set_mm_exe_file - change a reference to the mm's executable file
  *