#include <engine/fifo.h>
 
+struct fence_work {
+       struct work_struct base;
+       struct list_head head;
+       void (*func)(void *);
+       void *data;
+};
+
+static void
+nouveau_fence_signal(struct nouveau_fence *fence)
+{
+       struct fence_work *work, *temp;
+
+       list_for_each_entry_safe(work, temp, &fence->work, head) {
+               schedule_work(&work->base);
+               list_del(&work->head);
+       }
+
+       fence->channel = NULL;
+       list_del(&fence->head);
+}
+
 void
 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 {
        struct nouveau_fence *fence, *fnext;
        spin_lock(&fctx->lock);
        list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
-               fence->channel = NULL;
-               list_del(&fence->head);
-               nouveau_fence_unref(&fence);
+               nouveau_fence_signal(fence);
        }
        spin_unlock(&fctx->lock);
 }
        spin_lock_init(&fctx->lock);
 }
 
+static void
+nouveau_fence_work_handler(struct work_struct *kwork)
+{
+       struct fence_work *work = container_of(kwork, typeof(*work), base);
+       work->func(work->data);
+       kfree(work);
+}
+
+void
+nouveau_fence_work(struct nouveau_fence *fence,
+                  void (*func)(void *), void *data)
+{
+       struct nouveau_channel *chan = fence->channel;
+       struct nouveau_fence_chan *fctx;
+       struct fence_work *work = NULL;
+
+       if (nouveau_fence_done(fence)) {
+               func(data);
+               return;
+       }
+
+       fctx = chan->fence;
+       work = kmalloc(sizeof(*work), GFP_KERNEL);
+       if (!work) {
+               WARN_ON(nouveau_fence_wait(fence, false, false));
+               func(data);
+               return;
+       }
+
+       spin_lock(&fctx->lock);
+       if (!fence->channel) {
+               spin_unlock(&fctx->lock);
+               kfree(work);
+               func(data);
+               return;
+       }
+
+       INIT_WORK(&work->base, nouveau_fence_work_handler);
+       work->func = func;
+       work->data = data;
+       list_add(&work->head, &fence->work);
+       spin_unlock(&fctx->lock);
+}
+
 static void
 nouveau_fence_update(struct nouveau_channel *chan)
 {
                if (fctx->read(chan) < fence->sequence)
                        break;
 
-               fence->channel = NULL;
-               list_del(&fence->head);
+               nouveau_fence_signal(fence);
                nouveau_fence_unref(&fence);
        }
        spin_unlock(&fctx->lock);
        if (!fence)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&fence->work);
        fence->sysmem = sysmem;
        kref_init(&fence->kref);
 
 
        return ret;
 }
 
+static void
+nouveau_gem_object_delete(void *data)
+{
+       struct nouveau_vma *vma = data;
+       nouveau_vm_unmap(vma);
+       nouveau_vm_put(vma);
+       kfree(vma);
+}
+
+static void
+nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
+{
+       const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
+       struct nouveau_fence *fence = NULL;
+
+       list_del(&vma->head);
+
+       if (mapped) {
+               spin_lock(&nvbo->bo.bdev->fence_lock);
+               if (nvbo->bo.sync_obj)
+                       fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+               spin_unlock(&nvbo->bo.bdev->fence_lock);
+       }
+
+       if (fence) {
+               nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
+       } else {
+               if (mapped)
+                       nouveau_vm_unmap(vma);
+               nouveau_vm_put(vma);
+               kfree(vma);
+       }
+       nouveau_fence_unref(&fence);
+}
+
 void
 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
 
        vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
        if (vma) {
-               if (--vma->refcount == 0) {
-                       nouveau_bo_vma_del(nvbo, vma);
-                       kfree(vma);
-               }
+               if (--vma->refcount == 0)
+                       nouveau_gem_object_unmap(nvbo, vma);
        }
        ttm_bo_unreserve(&nvbo->bo);
 }