}
 
 void i915_ppgtt_close(struct i915_address_space *vm)
+{
+       GEM_BUG_ON(vm->closed);
+       vm->closed = true;
+}
+
+static void ppgtt_destroy_vma(struct i915_address_space *vm)
 {
        struct list_head *phases[] = {
                &vm->active_list,
                NULL,
        }, **phase;
 
-       GEM_BUG_ON(vm->closed);
        vm->closed = true;
-
        for (phase = phases; *phase; phase++) {
                struct i915_vma *vma, *vn;
 
                list_for_each_entry_safe(vma, vn, *phase, vm_link)
-                       if (!i915_vma_is_closed(vma))
-                               i915_vma_close(vma);
+                       i915_vma_destroy(vma);
        }
 }
 
 
        trace_i915_ppgtt_release(&ppgtt->base);
 
-       /* vmas should already be unbound and destroyed */
+       ppgtt_destroy_vma(&ppgtt->base);
+
        GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
        GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
        GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
 
 
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-       if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
-               WARN_ON(i915_vma_unbind(vma));
 
        GEM_BUG_ON(!i915_gem_object_is_active(obj));
        if (--obj->active_count)
        if (!vma)
                vma = vma_create(obj, vm, view);
 
-       GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
        GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
        GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
        return vma;
        return ret;
 }
 
-static void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_close(struct i915_vma *vma)
+{
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+       GEM_BUG_ON(i915_vma_is_closed(vma));
+       vma->flags |= I915_VMA_CLOSED;
+
+       /*
+        * We defer actually closing, unbinding and destroying the VMA until
+        * the next idle point, or if the object is freed in the meantime. By
+        * postponing the unbind, we allow for it to be resurrected by the
+        * client, avoiding the work required to rebind the VMA. This is
+        * advantageous for DRI, where the client/server pass objects
+        * between themselves, temporarily opening a local VMA to the
+        * object, and then closing it again. The same object is then reused
+        * on the next frame (or two, depending on the depth of the swap queue)
+        * causing us to rebind the VMA once more. This ends up being a lot
+        * of wasted work for the steady state.
+        */
+       list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
+}
+
+void i915_vma_reopen(struct i915_vma *vma)
+{
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+       if (vma->flags & I915_VMA_CLOSED) {
+               vma->flags &= ~I915_VMA_CLOSED;
+               list_del(&vma->closed_link);
+       }
+}
+
+static void __i915_vma_destroy(struct i915_vma *vma)
 {
        int i;
 
        GEM_BUG_ON(vma->node.allocated);
-       GEM_BUG_ON(i915_vma_is_active(vma));
-       GEM_BUG_ON(!i915_vma_is_closed(vma));
        GEM_BUG_ON(vma->fence);
 
        for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
 
        list_del(&vma->obj_link);
        list_del(&vma->vm_link);
+       rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
        if (!i915_vma_is_ggtt(vma))
                i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
        kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
 }
 
-void i915_vma_close(struct i915_vma *vma)
+void i915_vma_destroy(struct i915_vma *vma)
 {
-       GEM_BUG_ON(i915_vma_is_closed(vma));
-       vma->flags |= I915_VMA_CLOSED;
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
-       rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+       GEM_BUG_ON(i915_vma_is_active(vma));
+       GEM_BUG_ON(i915_vma_is_pinned(vma));
+
+       if (i915_vma_is_closed(vma))
+               list_del(&vma->closed_link);
+
+       WARN_ON(i915_vma_unbind(vma));
+       __i915_vma_destroy(vma);
+}
+
+void i915_vma_parked(struct drm_i915_private *i915)
+{
+       struct i915_vma *vma, *next;
 
-       if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
-               WARN_ON(i915_vma_unbind(vma));
+       list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
+               GEM_BUG_ON(!i915_vma_is_closed(vma));
+               i915_vma_destroy(vma);
+       }
+
+       GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
 }
 
 static void __i915_vma_iounmap(struct i915_vma *vma)
                return -EBUSY;
 
        if (!drm_mm_node_allocated(&vma->node))
-               goto destroy;
+               return 0;
 
        GEM_BUG_ON(obj->bind_count == 0);
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
        i915_vma_remove(vma);
 
-destroy:
-       if (unlikely(i915_vma_is_closed(vma)))
-               i915_vma_destroy(vma);
-
        return 0;
 }
 
 
        /** This vma's place in the eviction list */
        struct list_head evict_link;
 
+       struct list_head closed_link;
+
        /**
         * Used for performing relocations during execbuffer insertion.
         */
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 void i915_vma_unlink_ctx(struct i915_vma *vma);
 void i915_vma_close(struct i915_vma *vma);
+void i915_vma_reopen(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
 
 int __i915_vma_do_pin(struct i915_vma *vma,
                      u64 size, u64 alignment, u64 flags);
                __i915_vma_unpin_fence(vma);
 }
 
+void i915_vma_parked(struct drm_i915_private *i915);
+
 #define for_each_until(cond) if (cond) break; else
 
 /**