gen8_ppgtt_notify_vgt(ppgtt, true);
 
        ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-       ppgtt->vm.bind_vma = gen8_ppgtt_bind_vma;
-       ppgtt->vm.unbind_vma = ppgtt_unbind_vma;
-       ppgtt->vm.set_pages = ppgtt_set_pages;
-       ppgtt->vm.clear_pages = clear_pages;
        ppgtt->debug_dump = gen8_dump_ppgtt;
 
+       ppgtt->vm.vma_ops.bind_vma    = gen8_ppgtt_bind_vma;
+       ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
+       ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->vm.vma_ops.clear_pages = clear_pages;
+
        return 0;
 
 free_scratch:
 
        ppgtt->vm.clear_range = gen6_ppgtt_clear_range;
        ppgtt->vm.insert_entries = gen6_ppgtt_insert_entries;
-       ppgtt->vm.bind_vma = gen6_ppgtt_bind_vma;
-       ppgtt->vm.unbind_vma = ppgtt_unbind_vma;
-       ppgtt->vm.set_pages = ppgtt_set_pages;
-       ppgtt->vm.clear_pages = clear_pages;
        ppgtt->vm.cleanup = gen6_ppgtt_cleanup;
        ppgtt->debug_dump = gen6_dump_ppgtt;
 
+       ppgtt->vm.vma_ops.bind_vma    = gen6_ppgtt_bind_vma;
+       ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
+       ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->vm.vma_ops.clear_pages = clear_pages;
+
        DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
                         ppgtt->node.size >> 20,
                         ppgtt->node.start / PAGE_SIZE);
 
        i915->mm.aliasing_ppgtt = ppgtt;
 
-       GEM_BUG_ON(ggtt->vm.bind_vma != ggtt_bind_vma);
-       ggtt->vm.bind_vma = aliasing_gtt_bind_vma;
+       GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+       ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
 
-       GEM_BUG_ON(ggtt->vm.unbind_vma != ggtt_unbind_vma);
-       ggtt->vm.unbind_vma = aliasing_gtt_unbind_vma;
+       GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+       ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
 
        return 0;
 
 
        i915_ppgtt_put(ppgtt);
 
-       ggtt->vm.bind_vma = ggtt_bind_vma;
-       ggtt->vm.unbind_vma = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
 }
 
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 
        ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
        ggtt->vm.cleanup = gen6_gmch_remove;
-       ggtt->vm.bind_vma = ggtt_bind_vma;
-       ggtt->vm.unbind_vma = ggtt_unbind_vma;
-       ggtt->vm.set_pages = ggtt_set_pages;
-       ggtt->vm.clear_pages = clear_pages;
        ggtt->vm.insert_page = gen8_ggtt_insert_page;
        ggtt->vm.clear_range = nop_clear_range;
        if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
 
        ggtt->invalidate = gen6_ggtt_invalidate;
 
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
        setup_private_pat(dev_priv);
 
        return ggtt_probe_common(ggtt, size);
        ggtt->vm.clear_range = gen6_ggtt_clear_range;
        ggtt->vm.insert_page = gen6_ggtt_insert_page;
        ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
-       ggtt->vm.bind_vma = ggtt_bind_vma;
-       ggtt->vm.unbind_vma = ggtt_unbind_vma;
-       ggtt->vm.set_pages = ggtt_set_pages;
-       ggtt->vm.clear_pages = clear_pages;
        ggtt->vm.cleanup = gen6_gmch_remove;
 
        ggtt->invalidate = gen6_ggtt_invalidate;
        else
                ggtt->vm.pte_encode = snb_pte_encode;
 
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
        return ggtt_probe_common(ggtt, size);
 }
 
        ggtt->vm.insert_page = i915_ggtt_insert_page;
        ggtt->vm.insert_entries = i915_ggtt_insert_entries;
        ggtt->vm.clear_range = i915_ggtt_clear_range;
-       ggtt->vm.bind_vma = ggtt_bind_vma;
-       ggtt->vm.unbind_vma = ggtt_unbind_vma;
-       ggtt->vm.set_pages = ggtt_set_pages;
-       ggtt->vm.clear_pages = clear_pages;
        ggtt->vm.cleanup = i915_gmch_remove;
 
        ggtt->invalidate = gmch_ggtt_invalidate;
 
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
        if (unlikely(ggtt->do_idle_maps))
                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
 
 
 
 struct drm_i915_file_private;
 struct drm_i915_fence_reg;
+struct i915_vma;
 
 typedef u32 gen6_pte_t;
 typedef u64 gen8_pte_t;
        struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
 };
 
+struct i915_vma_ops {
+       /* Map an object into an address space with the given cache flags. */
+       int (*bind_vma)(struct i915_vma *vma,
+                       enum i915_cache_level cache_level,
+                       u32 flags);
+       /*
+        * Unmap an object from an address space. This usually consists of
+        * setting the valid PTE entries to a reserved scratch page.
+        */
+       void (*unbind_vma)(struct i915_vma *vma);
+
+       int (*set_pages)(struct i915_vma *vma);
+       void (*clear_pages)(struct i915_vma *vma);
+};
+
 struct i915_address_space {
        struct drm_mm mm;
        struct drm_i915_private *i915;
                               enum i915_cache_level cache_level,
                               u32 flags);
        void (*cleanup)(struct i915_address_space *vm);
-       /** Unmap an object from an address space. This usually consists of
-        * setting the valid PTE entries to a reserved scratch page. */
-       void (*unbind_vma)(struct i915_vma *vma);
-       /* Map an object into an address space with the given cache flags. */
-       int (*bind_vma)(struct i915_vma *vma,
-                       enum i915_cache_level cache_level,
-                       u32 flags);
-       int (*set_pages)(struct i915_vma *vma);
-       void (*clear_pages)(struct i915_vma *vma);
+
+       struct i915_vma_ops vma_ops;
 
        I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
        I915_SELFTEST_DECLARE(bool scrub_64K);
 
                init_request_active(&vma->last_read[i], i915_vma_retire);
        init_request_active(&vma->last_fence, NULL);
        vma->vm = vm;
+       vma->ops = &vm->vma_ops;
        vma->obj = obj;
        vma->resv = obj->resv;
        vma->size = obj->base.size;
        GEM_BUG_ON(!vma->pages);
 
        trace_i915_vma_bind(vma, bind_flags);
-       ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+       ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;
 
 
        GEM_BUG_ON(vma->pages);
 
-       ret = vma->vm->set_pages(vma);
+       ret = vma->ops->set_pages(vma);
        if (ret)
                goto err_unpin;
 
        return 0;
 
 err_clear:
-       vma->vm->clear_pages(vma);
+       vma->ops->clear_pages(vma);
 err_unpin:
        if (vma->obj)
                i915_gem_object_unpin_pages(vma->obj);
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
-       vma->vm->clear_pages(vma);
+       vma->ops->clear_pages(vma);
 
        drm_mm_remove_node(&vma->node);
        list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
        if (likely(!vma->vm->closed)) {
                trace_i915_vma_unbind(vma);
-               vma->vm->unbind_vma(vma);
+               vma->ops->unbind_vma(vma);
        }
        vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
 
        ppgtt->vm.clear_range = nop_clear_range;
        ppgtt->vm.insert_page = mock_insert_page;
        ppgtt->vm.insert_entries = mock_insert_entries;
-       ppgtt->vm.bind_vma = mock_bind_ppgtt;
-       ppgtt->vm.unbind_vma = mock_unbind_ppgtt;
-       ppgtt->vm.set_pages = ppgtt_set_pages;
-       ppgtt->vm.clear_pages = clear_pages;
        ppgtt->vm.cleanup = mock_cleanup;
 
+       ppgtt->vm.vma_ops.bind_vma    = mock_bind_ppgtt;
+       ppgtt->vm.vma_ops.unbind_vma  = mock_unbind_ppgtt;
+       ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->vm.vma_ops.clear_pages = clear_pages;
+
        return ppgtt;
 }
 
        ggtt->vm.clear_range = nop_clear_range;
        ggtt->vm.insert_page = mock_insert_page;
        ggtt->vm.insert_entries = mock_insert_entries;
-       ggtt->vm.bind_vma = mock_bind_ggtt;
-       ggtt->vm.unbind_vma = mock_unbind_ggtt;
-       ggtt->vm.set_pages = ggtt_set_pages;
-       ggtt->vm.clear_pages = clear_pages;
        ggtt->vm.cleanup = mock_cleanup;
 
+       ggtt->vm.vma_ops.bind_vma    = mock_bind_ggtt;
+       ggtt->vm.vma_ops.unbind_vma  = mock_unbind_ggtt;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
        i915_address_space_init(&ggtt->vm, i915, "global");
 }