typedef uint32_t gen6_gtt_pte_t;
 
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+       struct drm_mm_node node;
+       struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
+
+       /** This object's place on the active/inactive lists */
+       struct list_head mm_list;
+
+       struct list_head vma_link; /* Link in the object's VMA list */
+
+       /** This vma's place in the batchbuffer or on the eviction list */
+       struct list_head exec_list;
+
+       /**
+        * Used for performing relocations during execbuffer insertion.
+        */
+       struct hlist_node exec_node;
+       unsigned long exec_handle;
+       struct drm_i915_gem_exec_object2 *exec_entry;
+
+       /**
+        * How many users have pinned this object in GTT space. The following
+        * users can each hold at most one reference: pwrite/pread, pin_ioctl
+        * (via user_pin_count), execbuffer (objects are not allowed multiple
+        * times for the same batchbuffer), and the framebuffer code. When
+        * switching/pageflipping, the framebuffer code has at most two buffers
+        * pinned per crtc.
+        *
+        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+        * bits with absolutely no headroom. So use 4 bits. */
+       unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+       /** Unmap an object from an address space. This usually consists of
+        * setting the valid PTE entries to a reserved scratch page. */
+       void (*unbind_vma)(struct i915_vma *vma);
+       /* Map an object into an address space with the given cache flags. */
+#define GLOBAL_BIND (1<<0)
+       void (*bind_vma)(struct i915_vma *vma,
+                        enum i915_cache_level cache_level,
+                        u32 flags);
+};
+
 struct i915_address_space {
        struct drm_mm mm;
        struct drm_device *dev;
        int (*enable)(struct drm_device *dev);
 };
 
-/**
- * A VMA represents a GEM BO that is bound into an address space. Therefore, a
- * VMA's presence cannot be guaranteed before binding, or after unbinding the
- * object into/from the address space.
- *
- * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
- * will always be <= an objects lifetime. So object refcounting should cover us.
- */
-struct i915_vma {
-       struct drm_mm_node node;
-       struct drm_i915_gem_object *obj;
-       struct i915_address_space *vm;
-
-       /** This object's place on the active/inactive lists */
-       struct list_head mm_list;
-
-       struct list_head vma_link; /* Link in the object's VMA list */
-
-       /** This vma's place in the batchbuffer or on the eviction list */
-       struct list_head exec_list;
-
-       /**
-        * Used for performing relocations during execbuffer insertion.
-        */
-       struct hlist_node exec_node;
-       unsigned long exec_handle;
-       struct drm_i915_gem_exec_object2 *exec_entry;
-
-       /**
-        * How many users have pinned this object in GTT space. The following
-        * users can each hold at most one reference: pwrite/pread, pin_ioctl
-        * (via user_pin_count), execbuffer (objects are not allowed multiple
-        * times for the same batchbuffer), and the framebuffer code. When
-        * switching/pageflipping, the framebuffer code has at most two buffers
-        * pinned per crtc.
-        *
-        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-        * bits with absolutely no headroom. So use 4 bits.
-        */
-       unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
-};
-
 struct i915_ctx_hang_stats {
        /* This context had batch pending when hang was declared */
        unsigned batch_pending;
 
 /* i915_gem_gtt.c */
 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-                           struct drm_i915_gem_object *obj,
-                           enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-                             struct drm_i915_gem_object *obj);
-
 void i915_check_and_clear_faults(struct drm_device *dev);
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-                               enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
 
 
        trace_i915_vma_unbind(vma);
 
-       if (obj->has_global_gtt_mapping)
-               i915_gem_gtt_unbind_object(obj);
-       if (obj->has_aliasing_ppgtt_mapping) {
-               i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
-               obj->has_aliasing_ppgtt_mapping = 0;
-       }
+       vma->unbind_vma(vma);
+
        i915_gem_gtt_finish_object(obj);
 
        list_del(&vma->mm_list);
                                    enum i915_cache_level cache_level)
 {
        struct drm_device *dev = obj->base.dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_vma *vma;
        int ret;
 
                                return ret;
                }
 
-               if (obj->has_global_gtt_mapping)
-                       i915_gem_gtt_bind_object(obj, cache_level);
-               if (obj->has_aliasing_ppgtt_mapping)
-                       i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-                                              obj, cache_level);
+               list_for_each_entry(vma, &obj->vma_list, vma_link)
+                       vma->bind_vma(vma, cache_level, 0);
        }
 
        list_for_each_entry(vma, &obj->vma_list, vma_link)
                    bool map_and_fenceable,
                    bool nonblocking)
 {
+       const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
        struct i915_vma *vma;
        int ret;
 
        }
 
        if (!i915_gem_obj_bound(obj, vm)) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
                ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
                                                 map_and_fenceable,
                                                 nonblocking);
                if (ret)
                        return ret;
 
-               if (!dev_priv->mm.aliasing_ppgtt)
-                       i915_gem_gtt_bind_object(obj, obj->cache_level);
        }
 
-       if (!obj->has_global_gtt_mapping && map_and_fenceable)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+       vma = i915_gem_obj_to_vma(obj, vm);
+
+       vma->bind_vma(vma, obj->cache_level, flags);
 
        i915_gem_obj_to_vma(obj, vm)->pin_count++;
        obj->pin_mappable |= map_and_fenceable;
        return NULL;
 }
 
-static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
-                                             struct i915_address_space *vm)
-{
-       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
-       if (vma == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       INIT_LIST_HEAD(&vma->vma_link);
-       INIT_LIST_HEAD(&vma->mm_list);
-       INIT_LIST_HEAD(&vma->exec_list);
-       vma->vm = vm;
-       vma->obj = obj;
-
-       /* Keep GGTT vmas first to make debug easier */
-       if (i915_is_ggtt(vm))
-               list_add(&vma->vma_link, &obj->vma_list);
-       else
-               list_add_tail(&vma->vma_link, &obj->vma_list);
-
-       return vma;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm)
-{
-       struct i915_vma *vma;
-
-       vma = i915_gem_obj_to_vma(obj, vm);
-       if (!vma)
-               vma = __i915_gem_vma_create(obj, vm);
-
-       return vma;
-}
-
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
        WARN_ON(vma->node.allocated);
 
 static int do_switch(struct i915_hw_context *to)
 {
        struct intel_ring_buffer *ring = to->ring;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct i915_hw_context *from = ring->last_context;
        u32 hw_flags = 0;
        int ret, i;
                return ret;
        }
 
-       if (!to->obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+       if (!to->obj->has_global_gtt_mapping) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+                                                          &dev_priv->gtt.base);
+               vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+       }
 
        if (!to->is_initialized || is_default_context(to))
                hw_flags |= MI_RESTORE_INHIBIT;
 
               struct i915_address_space *vm,
               struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = vm->dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct list_head objects;
        int i, ret = 0;
        i = 0;
        list_for_each_entry(obj, &objects, obj_exec_link) {
                struct i915_vma *vma;
+               struct i915_address_space *bind_vm = vm;
+
+               /* If we have secure dispatch, or the userspace assures us that
+                * they know what they're doing, use the GGTT VM.
+                */
+               if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT ||
+                   ((args->flags & I915_EXEC_SECURE) &&
+                   (i == (args->buffer_count - 1))))
+                       bind_vm = &dev_priv->gtt.base;
 
                /*
                 * NOTE: We can leak any vmas created here when something fails
                 * from the (obj, vm) we don't run the risk of creating
                 * duplicated vmas for the same vm.
                 */
-               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+               vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
                if (IS_ERR(vma)) {
                        DRM_DEBUG("Failed to lookup VMA\n");
                        ret = PTR_ERR(vma);
        if (unlikely(IS_GEN6(dev) &&
            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
            !target_i915_obj->has_global_gtt_mapping)) {
-               i915_gem_gtt_bind_object(target_i915_obj,
-                                        target_i915_obj->cache_level);
+               struct i915_vma *vma = i915_gem_obj_to_vma(target_i915_obj, vm);
+               vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
        }
 
        /* Validate that the target is in a valid r/w GPU domain */
                                struct intel_ring_buffer *ring,
                                bool *need_reloc)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj = vma->obj;
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        bool need_fence, need_mappable;
-       struct drm_i915_gem_object *obj = vma->obj;
+       u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
+               !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
        int ret;
 
        need_fence =
                }
        }
 
-       /* Ensure ppgtt mapping exists if needed */
-       if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
-               i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-                                      obj, obj->cache_level);
-
-               obj->has_aliasing_ppgtt_mapping = 1;
-       }
-
        if (entry->offset != vma->node.start) {
                entry->offset = vma->node.start;
                *need_reloc = true;
                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
        }
 
-       if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
-           !obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+       vma->bind_vma(vma, obj->cache_level, flags);
 
        return 0;
 }
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
-       if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+       if (flags & I915_DISPATCH_SECURE &&
+           !batch_obj->has_global_gtt_mapping) {
+               /* When we have multiple VMs, we'll need to make sure that we
+                * allocate space first */
+               struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
+               BUG_ON(!vma);
+               vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+       }
 
        ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
        if (ret)
 
 #define PPAT_CACHED_INDEX              _PAGE_PAT /* WB LLCeLLC */
 #define PPAT_DISPLAY_ELLC_INDEX                _PAGE_PCD /* WT eLLC */
 
+static void ppgtt_bind_vma(struct i915_vma *vma,
+                          enum i915_cache_level cache_level,
+                          u32 flags);
+static void ppgtt_unbind_vma(struct i915_vma *vma);
+
 static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
                                             enum i915_cache_level level,
                                             bool valid)
        dev_priv->mm.aliasing_ppgtt = NULL;
 }
 
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-                           struct drm_i915_gem_object *obj,
-                           enum i915_cache_level cache_level)
+static void __always_unused
+ppgtt_bind_vma(struct i915_vma *vma,
+              enum i915_cache_level cache_level,
+              u32 flags)
 {
-       ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
-                                  i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                                  cache_level);
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
+
+       WARN_ON(flags);
+
+       vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
 }
 
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-                             struct drm_i915_gem_object *obj)
+static void __always_unused ppgtt_unbind_vma(struct i915_vma *vma)
 {
-       ppgtt->base.clear_range(&ppgtt->base,
-                               i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                               obj->base.size >> PAGE_SHIFT,
-                               true);
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
+
+       vma->vm->clear_range(vma->vm,
+                            entry,
+                            vma->obj->base.size >> PAGE_SHIFT,
+                            true);
 }
 
 extern int intel_iommu_gfx_mapped;
                                       true);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+                                                          &dev_priv->gtt.base);
+               if (!vma)
+                       continue;
+
                i915_gem_clflush_object(obj, obj->pin_display);
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+               /* The bind_vma code tries to be smart about tracking mappings.
+                * Unfortunately above, we've just wiped out the mappings
+                * without telling our object about it. So we need to fake it.
+                */
+               obj->has_global_gtt_mapping = 0;
+               vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
        }
 
        i915_gem_chipset_flush(dev);
        readl(gtt_base);
 }
 
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
-                                    struct sg_table *st,
-                                    unsigned int pg_start,
-                                    enum i915_cache_level cache_level)
+
+static void i915_ggtt_bind_vma(struct i915_vma *vma,
+                              enum i915_cache_level cache_level,
+                              u32 unused)
 {
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
        unsigned int flags = (cache_level == I915_CACHE_NONE) ?
                AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
 
-       intel_gtt_insert_sg_entries(st, pg_start, flags);
-
+       BUG_ON(!i915_is_ggtt(vma->vm));
+       intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+       vma->obj->has_global_gtt_mapping = 1;
 }
 
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
        intel_gtt_clear_range(first_entry, num_entries);
 }
 
+static void i915_ggtt_unbind_vma(struct i915_vma *vma)
+{
+       const unsigned int first = vma->node.start >> PAGE_SHIFT;
+       const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
 
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-                             enum i915_cache_level cache_level)
+       BUG_ON(!i915_is_ggtt(vma->vm));
+       vma->obj->has_global_gtt_mapping = 0;
+       intel_gtt_clear_range(first, size);
+}
+
+static void ggtt_bind_vma(struct i915_vma *vma,
+                         enum i915_cache_level cache_level,
+                         u32 flags)
 {
-       struct drm_device *dev = obj->base.dev;
+       struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+       struct drm_i915_gem_object *obj = vma->obj;
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
 
-       dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
-                                         entry,
-                                         cache_level);
+       /* If there is no aliasing PPGTT, or the caller needs a global mapping,
+        * or we have a global mapping already but the cacheability flags have
+        * changed, set the global PTEs.
+        *
+        * If there is an aliasing PPGTT it is anecdotally faster, so use that
+        * instead if none of the above hold true.
+        *
+        * NB: A global mapping should only be needed for special regions like
+        * "gtt mappable", SNB errata, or if specified via special execbuf
+        * flags. At all other times, the GPU will use the aliasing PPGTT.
+        */
+       if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
+               if (!obj->has_global_gtt_mapping ||
+                   (cache_level != obj->cache_level)) {
+                       vma->vm->insert_entries(vma->vm, obj->pages, entry,
+                                               cache_level);
+                       obj->has_global_gtt_mapping = 1;
+               }
+       }
 
-       obj->has_global_gtt_mapping = 1;
+       if (dev_priv->mm.aliasing_ppgtt &&
+           (!obj->has_aliasing_ppgtt_mapping ||
+            (cache_level != obj->cache_level))) {
+               struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+               appgtt->base.insert_entries(&appgtt->base,
+                                           vma->obj->pages, entry, cache_level);
+               vma->obj->has_aliasing_ppgtt_mapping = 1;
+       }
 }
 
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+static void ggtt_unbind_vma(struct i915_vma *vma)
 {
-       struct drm_device *dev = obj->base.dev;
+       struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
-       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-                                      entry,
-                                      obj->base.size >> PAGE_SHIFT,
-                                      true);
+       struct drm_i915_gem_object *obj = vma->obj;
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
+
+       if (obj->has_global_gtt_mapping) {
+               vma->vm->clear_range(vma->vm, entry,
+                                    vma->obj->base.size >> PAGE_SHIFT,
+                                    true);
+               obj->has_global_gtt_mapping = 0;
+       }
 
-       obj->has_global_gtt_mapping = 0;
+       if (obj->has_aliasing_ppgtt_mapping) {
+               struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+               appgtt->base.clear_range(&appgtt->base,
+                                        entry,
+                                        obj->base.size >> PAGE_SHIFT,
+                                        true);
+               obj->has_aliasing_ppgtt_mapping = 0;
+       }
 }
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 
        dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
        dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
-       dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
        return 0;
 }
 
        return 0;
 }
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                             struct i915_address_space *vm)
+{
+       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       if (vma == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&vma->vma_link);
+       INIT_LIST_HEAD(&vma->mm_list);
+       INIT_LIST_HEAD(&vma->exec_list);
+       vma->vm = vm;
+       vma->obj = obj;
+
+       switch (INTEL_INFO(vm->dev)->gen) {
+       case 8:
+       case 7:
+       case 6:
+               vma->unbind_vma = ggtt_unbind_vma;
+               vma->bind_vma = ggtt_bind_vma;
+               break;
+       case 5:
+       case 4:
+       case 3:
+       case 2:
+               BUG_ON(!i915_is_ggtt(vm));
+               vma->unbind_vma = i915_ggtt_unbind_vma;
+               vma->bind_vma = i915_ggtt_bind_vma;
+               break;
+       default:
+               BUG();
+       }
+
+       /* Keep GGTT vmas first to make debug easier */
+       if (i915_is_ggtt(vm))
+               list_add(&vma->vma_link, &obj->vma_list);
+       else
+               list_add_tail(&vma->vma_link, &obj->vma_list);
+
+       return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+                                 struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+
+       vma = i915_gem_obj_to_vma(obj, vm);
+       if (!vma)
+               vma = __i915_gem_vma_create(obj, vm);
+
+       return vma;
+}