*
  * Check if a mem object has already address space allocated.
  */
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
 {
        return mem->mm_node != NULL;
 }
 static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
                              struct ttm_buffer_object *tbo,
                              const struct ttm_place *place,
-                             struct ttm_mem_reg *mem)
+                             struct ttm_resource *mem)
 {
        struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
        struct amdgpu_gtt_node *node;
  * Free the allocated GTT again.
  */
 static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
-                              struct ttm_mem_reg *mem)
+                              struct ttm_resource *mem)
 {
        struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
        struct amdgpu_gtt_node *node = mem->mm_node;
 
  */
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                           bool evict,
-                          struct ttm_mem_reg *new_mem)
+                          struct ttm_resource *new_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct amdgpu_bo *abo;
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
 
        if (!amdgpu_bo_is_amdgpu_bo(bo))
                return;
 
                           uint64_t *flags);
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                           bool evict,
-                          struct ttm_mem_reg *new_mem);
+                          struct ttm_resource *new_mem);
 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 
  * Assign the memory from new_mem to the memory of the buffer object bo.
  */
 static void amdgpu_move_null(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *new_mem)
+                            struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
 
        BUG_ON(old_mem->mm_node != NULL);
        *old_mem = *new_mem;
  */
 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
                                    struct drm_mm_node *mm_node,
-                                   struct ttm_mem_reg *mem)
+                                   struct ttm_resource *mem)
 {
        uint64_t addr = 0;
 
  * @offset: The offset that drm_mm_node is used for finding.
  *
  */
-static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
                                               uint64_t *offset)
 {
        struct drm_mm_node *mm_node = mem->mm_node;
  * the physical address for local memory.
  */
 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
-                                struct ttm_mem_reg *mem,
+                                struct ttm_resource *mem,
                                 struct drm_mm_node *mm_node,
                                 unsigned num_pages, uint64_t offset,
                                 unsigned window, struct amdgpu_ring *ring,
  */
 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                            bool evict, bool no_wait_gpu,
-                           struct ttm_mem_reg *new_mem,
-                           struct ttm_mem_reg *old_mem)
+                           struct ttm_resource *new_mem,
+                           struct ttm_resource *old_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
  */
 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
                                struct ttm_operation_ctx *ctx,
-                               struct ttm_mem_reg *new_mem)
+                               struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       struct ttm_mem_reg tmp_mem;
+       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource tmp_mem;
        struct ttm_place placements;
        struct ttm_placement placement;
        int r;
  */
 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
                                struct ttm_operation_ctx *ctx,
-                               struct ttm_mem_reg *new_mem)
+                               struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       struct ttm_mem_reg tmp_mem;
+       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource tmp_mem;
        struct ttm_placement placement;
        struct ttm_place placements;
        int r;
  * Called by amdgpu_bo_move()
  */
 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
-                              struct ttm_mem_reg *mem)
+                              struct ttm_resource *mem)
 {
        struct drm_mm_node *nodes = mem->mm_node;
 
        if (mem->mem_type != TTM_PL_VRAM)
                return false;
 
-       /* ttm_mem_reg_ioremap only supports contiguous memory */
+       /* ttm_resource_ioremap only supports contiguous memory */
        if (nodes->size != mem->num_pages)
                return false;
 
  */
 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                          struct ttm_operation_ctx *ctx,
-                         struct ttm_mem_reg *new_mem)
+                         struct ttm_resource *new_mem)
 {
        struct amdgpu_device *adev;
        struct amdgpu_bo *abo;
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
        int r;
 
        /* Can't move a pinned BO */
  *
  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
  */
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct drm_mm_node *mm_node = mem->mm_node;
                        return -EINVAL;
                /* Only physically contiguous buffers apply. In a contiguous
                 * buffer, size of the first mm_node would match the number of
-                * pages in ttm_mem_reg.
+                * pages in ttm_resource.
                 */
                if (adev->mman.aper_base_kaddr &&
                    (mm_node->size == mem->num_pages))
  * This handles binding GTT memory to the device address space.
  */
 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
-                                  struct ttm_mem_reg *bo_mem)
+                                  struct ttm_resource *bo_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct ttm_operation_ctx ctx = { false, false };
        struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
-       struct ttm_mem_reg tmp;
+       struct ttm_resource tmp;
        struct ttm_placement placement;
        struct ttm_place placements;
        uint64_t addr, flags;
  *
  * Figure out the flags to use for a VM PDE (Page Directory Entry).
  */
-uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
 {
        uint64_t flags = 0;
 
  * Figure out the flags to use for a VM PTE (Page Table Entry).
  */
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
-                                struct ttm_mem_reg *mem)
+                                struct ttm_resource *mem)
 {
        uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
 
 
 
 struct amdgpu_copy_mem {
        struct ttm_buffer_object        *bo;
-       struct ttm_mem_reg              *mem;
+       struct ttm_resource             *mem;
        unsigned long                   offset;
 };
 
 int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
 
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
 
 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
-                             struct ttm_mem_reg *mem,
+                             struct ttm_resource *mem,
                              struct device *dev,
                              enum dma_data_direction dir,
                              struct sg_table **sgt);
                                       int *last_invalidated);
 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
-uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
-                                struct ttm_mem_reg *mem);
+                                struct ttm_resource *mem);
 
 int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
 
 
        struct amdgpu_vm *vm = bo_va->base.vm;
        struct amdgpu_bo_va_mapping *mapping;
        dma_addr_t *pages_addr = NULL;
-       struct ttm_mem_reg *mem;
+       struct ttm_resource *mem;
        struct drm_mm_node *nodes;
        struct dma_fence **last_update;
        struct dma_resv *resv;
 
 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct ttm_mem_reg *mem = &bo->tbo.mem;
+       struct ttm_resource *mem = &bo->tbo.mem;
        struct drm_mm_node *nodes = mem->mm_node;
        unsigned pages = mem->num_pages;
        u64 usage;
 /**
  * amdgpu_vram_mgr_virt_start - update virtual start address
  *
- * @mem: ttm_mem_reg to update
+ * @mem: ttm_resource to update
  * @node: just allocated node
  *
  * Calculate a virtual BO start address to easily check if everything is CPU
  * accessible.
  */
-static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
+static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
                                       struct drm_mm_node *node)
 {
        unsigned long start;
 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
                               struct ttm_buffer_object *tbo,
                               const struct ttm_place *place,
-                              struct ttm_mem_reg *mem)
+                              struct ttm_resource *mem)
 {
        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
        struct amdgpu_device *adev = mgr->adev;
  * Free the allocated VRAM again.
  */
 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
-                               struct ttm_mem_reg *mem)
+                               struct ttm_resource *mem)
 {
        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
        struct amdgpu_device *adev = mgr->adev;
  * Allocate and fill a sg table from a VRAM allocation.
  */
 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
-                             struct ttm_mem_reg *mem,
+                             struct ttm_resource *mem,
                              struct device *dev,
                              enum dma_data_direction dir,
                              struct sg_table **sgt)
 
 
 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
                                               bool evict,
-                                              struct ttm_mem_reg *new_mem)
+                                              struct ttm_resource *new_mem)
 {
        struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
 
 
 static void bo_driver_move_notify(struct ttm_buffer_object *bo,
                                  bool evict,
-                                 struct ttm_mem_reg *new_mem)
+                                 struct ttm_resource *new_mem)
 {
        struct drm_gem_vram_object *gbo;
 
 }
 
 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
-                                   struct ttm_mem_reg *mem)
+                                   struct ttm_resource *mem)
 {
        struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
 
 
 
 static int
 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+                 struct ttm_resource *old_reg, struct ttm_resource *new_reg)
 {
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        int ret = RING_SPACE(chan, 10);
 
 static int
 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+                 struct ttm_resource *old_reg, struct ttm_resource *new_reg)
 {
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        u64 src_offset = mem->vma[0].addr;
 
 static int
 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+                 struct ttm_resource *old_reg, struct ttm_resource *new_reg)
 {
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        u64 src_offset = mem->vma[0].addr;
 
 static int
 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+                 struct ttm_resource *old_reg, struct ttm_resource *new_reg)
 {
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        u64 src_offset = mem->vma[0].addr;
 
 static int
 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+                 struct ttm_resource *old_reg, struct ttm_resource *new_reg)
 {
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        int ret = RING_SPACE(chan, 7);
 
 static int
 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+                 struct ttm_resource *old_reg, struct ttm_resource *new_reg)
 {
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        int ret = RING_SPACE(chan, 7);
 
 static int
 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+                 struct ttm_resource *old_reg, struct ttm_resource *new_reg)
 {
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        u64 length = (new_reg->num_pages << PAGE_SHIFT);
 
 static inline uint32_t
 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
-                     struct nouveau_channel *chan, struct ttm_mem_reg *reg)
+                     struct nouveau_channel *chan, struct ttm_resource *reg)
 {
        if (reg->mem_type == TTM_PL_TT)
                return NvDmaTT;
 
 static int
 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
-                 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+                 struct ttm_resource *old_reg, struct ttm_resource *new_reg)
 {
        u32 src_offset = old_reg->start << PAGE_SHIFT;
        u32 dst_offset = new_reg->start << PAGE_SHIFT;
 
 static int
 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
-                    struct ttm_mem_reg *reg)
+                    struct ttm_resource *reg)
 {
        struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
        struct nouveau_mem *new_mem = nouveau_mem(reg);
 
 static int
 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-                    bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+                    bool no_wait_gpu, struct ttm_resource *new_reg)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_channel *chan = drm->ttm.chan;
 
        /* create temporary vmas for the transfer and attach them to the
         * old nvkm_mem node, these will get cleaned up after ttm has
-        * destroyed the ttm_mem_reg
+        * destroyed the ttm_resource
         */
        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
                ret = nouveau_bo_move_prep(drm, bo, new_reg);
                s32 oclass;
                int (*exec)(struct nouveau_channel *,
                            struct ttm_buffer_object *,
-                           struct ttm_mem_reg *, struct ttm_mem_reg *);
+                           struct ttm_resource *, struct ttm_resource *);
                int (*init)(struct nouveau_channel *, u32 handle);
        } _methods[] = {
                {  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
 
 static int
 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+                     bool no_wait_gpu, struct ttm_resource *new_reg)
 {
        struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
        struct ttm_place placement_memtype = {
                .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
        };
        struct ttm_placement placement;
-       struct ttm_mem_reg tmp_reg;
+       struct ttm_resource tmp_reg;
        int ret;
 
        placement.num_placement = placement.num_busy_placement = 1;
 
 static int
 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+                     bool no_wait_gpu, struct ttm_resource *new_reg)
 {
        struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
        struct ttm_place placement_memtype = {
                .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
        };
        struct ttm_placement placement;
-       struct ttm_mem_reg tmp_reg;
+       struct ttm_resource tmp_reg;
        int ret;
 
        placement.num_placement = placement.num_busy_placement = 1;
 
 static void
 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
-                    struct ttm_mem_reg *new_reg)
+                    struct ttm_resource *new_reg)
 {
        struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
        struct nouveau_bo *nvbo = nouveau_bo(bo);
 }
 
 static int
-nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
                   struct nouveau_drm_tile **new_tile)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 static int
 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
                struct ttm_operation_ctx *ctx,
-               struct ttm_mem_reg *new_reg)
+               struct ttm_resource *new_reg)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_bo *nvbo = nouveau_bo(bo);
-       struct ttm_mem_reg *old_reg = &bo->mem;
+       struct ttm_resource *old_reg = &bo->mem;
        struct nouveau_drm_tile *new_tile = NULL;
        int ret = 0;
 
 }
 
 static int
-nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
 {
        struct nouveau_drm *drm = nouveau_bdev(bdev);
        struct nvkm_device *device = nvxx_device(&drm->client.device);
 }
 
 static void
-nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
 {
        struct nouveau_drm *drm = nouveau_bdev(bdev);
        struct nouveau_mem *mem = nouveau_mem(reg);
 
                atomic_t validate_sequence;
                int (*move)(struct nouveau_channel *,
                            struct ttm_buffer_object *,
-                           struct ttm_mem_reg *, struct ttm_mem_reg *);
+                           struct ttm_resource *, struct ttm_resource *);
                struct nouveau_channel *chan;
                struct nvif_object copy;
                int mtrr;
 
 }
 
 int
-nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
+nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
 {
        struct nouveau_mem *mem = nouveau_mem(reg);
        struct nouveau_cli *cli = mem->cli;
 }
 
 int
-nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
+nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
 {
        struct nouveau_mem *mem = nouveau_mem(reg);
        struct nouveau_cli *cli = mem->cli;
 }
 
 void
-nouveau_mem_del(struct ttm_mem_reg *reg)
+nouveau_mem_del(struct ttm_resource *reg)
 {
        struct nouveau_mem *mem = nouveau_mem(reg);
        nouveau_mem_fini(mem);
 
 int
 nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
-               struct ttm_mem_reg *reg)
+               struct ttm_resource *reg)
 {
        struct nouveau_mem *mem;
 
 
 #include <nvif/vmm.h>
 
 static inline struct nouveau_mem *
-nouveau_mem(struct ttm_mem_reg *reg)
+nouveau_mem(struct ttm_resource *reg)
 {
        return reg->mm_node;
 }
 };
 
 int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
-                   struct ttm_mem_reg *);
-void nouveau_mem_del(struct ttm_mem_reg *);
-int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
-int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
+                   struct ttm_resource *);
+void nouveau_mem_del(struct ttm_resource *);
+int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
+int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *);
 void nouveau_mem_fini(struct nouveau_mem *);
 int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
 #endif
 
 }
 
 static int
-nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg)
 {
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct nouveau_mem *mem = nouveau_mem(reg);
 };
 
 static int
-nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_resource *reg)
 {
        struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
        struct nouveau_mem *mem = nouveau_mem(reg);
 
 #include <core/tegra.h>
 
 static void
-nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_mem_reg *reg)
+nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
 {
        nouveau_mem_del(reg);
 }
 nouveau_vram_manager_new(struct ttm_resource_manager *man,
                         struct ttm_buffer_object *bo,
                         const struct ttm_place *place,
-                        struct ttm_mem_reg *reg)
+                        struct ttm_resource *reg)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 nouveau_gart_manager_new(struct ttm_resource_manager *man,
                         struct ttm_buffer_object *bo,
                         const struct ttm_place *place,
-                        struct ttm_mem_reg *reg)
+                        struct ttm_resource *reg)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 nv04_gart_manager_new(struct ttm_resource_manager *man,
                      struct ttm_buffer_object *bo,
                      const struct ttm_place *place,
-                     struct ttm_mem_reg *reg)
+                     struct ttm_resource *reg)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 
 {
        struct nv10_fence_priv *priv = chan->drm->fence;
        struct nv10_fence_chan *fctx;
-       struct ttm_mem_reg *reg = &priv->bo->bo.mem;
+       struct ttm_resource *reg = &priv->bo->bo.mem;
        u32 start = reg->start * PAGE_SIZE;
        u32 limit = start + reg->size - 1;
        int ret = 0;
 
 {
        struct nv10_fence_priv *priv = chan->drm->fence;
        struct nv10_fence_chan *fctx;
-       struct ttm_mem_reg *reg = &priv->bo->bo.mem;
+       struct ttm_resource *reg = &priv->bo->bo.mem;
        u32 start = reg->start * PAGE_SIZE;
        u32 limit = start + reg->size - 1;
        int ret;
 
 int qxl_ttm_init(struct qxl_device *qdev);
 void qxl_ttm_fini(struct qxl_device *qdev);
 int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
-                          struct ttm_mem_reg *mem);
+                          struct ttm_resource *mem);
 
 /* qxl image */
 
 
 }
 
 int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
-                          struct ttm_mem_reg *mem)
+                          struct ttm_resource *mem)
 {
        struct qxl_device *qdev = qxl_get_qdev(bdev);
 
 };
 
 static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
-                               struct ttm_mem_reg *bo_mem)
+                               struct ttm_resource *bo_mem)
 {
        struct qxl_ttm_tt *gtt = (void *)ttm;
 
 }
 
 static void qxl_move_null(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *new_mem)
+                            struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
 
        BUG_ON(old_mem->mm_node != NULL);
        *old_mem = *new_mem;
 
 static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
                       struct ttm_operation_ctx *ctx,
-                      struct ttm_mem_reg *new_mem)
+                      struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
        int ret;
 
        ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
 
 static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
                               bool evict,
-                              struct ttm_mem_reg *new_mem)
+                              struct ttm_resource *new_mem)
 {
        struct qxl_bo *qbo;
        struct qxl_device *qdev;
 
                             struct radeon_vm *vm);
 int radeon_vm_bo_update(struct radeon_device *rdev,
                        struct radeon_bo_va *bo_va,
-                       struct ttm_mem_reg *mem);
+                       struct ttm_resource *mem);
 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
                             struct radeon_bo *bo);
 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
 
 
 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                           bool evict,
-                          struct ttm_mem_reg *new_mem)
+                          struct ttm_resource *new_mem)
 {
        struct radeon_bo *rbo;
 
 
                                bool force_drop);
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
                                  bool evict,
-                                 struct ttm_mem_reg *new_mem);
+                                 struct ttm_resource *new_mem);
 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
 
 }
 
 static void radeon_move_null(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *new_mem)
+                            struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
 
        BUG_ON(old_mem->mm_node != NULL);
        *old_mem = *new_mem;
 
 static int radeon_move_blit(struct ttm_buffer_object *bo,
                        bool evict, bool no_wait_gpu,
-                       struct ttm_mem_reg *new_mem,
-                       struct ttm_mem_reg *old_mem)
+                       struct ttm_resource *new_mem,
+                       struct ttm_resource *old_mem)
 {
        struct radeon_device *rdev;
        uint64_t old_start, new_start;
 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
                                bool evict, bool interruptible,
                                bool no_wait_gpu,
-                               struct ttm_mem_reg *new_mem)
+                               struct ttm_resource *new_mem)
 {
        struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       struct ttm_mem_reg tmp_mem;
+       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource tmp_mem;
        struct ttm_place placements;
        struct ttm_placement placement;
        int r;
 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
                                bool evict, bool interruptible,
                                bool no_wait_gpu,
-                               struct ttm_mem_reg *new_mem)
+                               struct ttm_resource *new_mem)
 {
        struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       struct ttm_mem_reg tmp_mem;
+       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource tmp_mem;
        struct ttm_placement placement;
        struct ttm_place placements;
        int r;
 
 static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
                          struct ttm_operation_ctx *ctx,
-                         struct ttm_mem_reg *new_mem)
+                         struct ttm_resource *new_mem)
 {
        struct radeon_device *rdev;
        struct radeon_bo *rbo;
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
        int r;
 
        r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
        return 0;
 }
 
-static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
 {
        struct radeon_device *rdev = radeon_get_rdev(bdev);
 
 }
 
 static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
-                                  struct ttm_mem_reg *bo_mem)
+                                  struct ttm_resource *bo_mem)
 {
        struct radeon_ttm_tt *gtt = (void*)ttm;
        uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
 
  */
 int radeon_vm_bo_update(struct radeon_device *rdev,
                        struct radeon_bo_va *bo_va,
-                       struct ttm_mem_reg *mem)
+                       struct ttm_resource *mem)
 {
        struct radeon_vm *vm = bo_va->vm;
        struct radeon_ib ib;
 
        struct agp_bridge_data *bridge;
 };
 
-static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
 {
        struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
        struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
 
 }
 
 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
-                                 struct ttm_mem_reg *mem)
+                                 struct ttm_resource *mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_resource_manager *man;
 EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
 
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
-                                 struct ttm_mem_reg *mem, bool evict,
+                                 struct ttm_resource *mem, bool evict,
                                  struct ttm_operation_ctx *ctx)
 {
        struct ttm_bo_device *bdev = bo->bdev;
                        struct ttm_operation_ctx *ctx)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_mem_reg evict_mem;
+       struct ttm_resource evict_mem;
        struct ttm_placement placement;
        int ret = 0;
 
 
 static int ttm_bo_mem_get(struct ttm_buffer_object *bo,
                          const struct ttm_place *place,
-                         struct ttm_mem_reg *mem)
+                         struct ttm_resource *mem)
 {
        struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type);
 
        return man->func->get_node(man, bo, place, mem);
 }
 
-void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem)
 {
        struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, mem->mem_type);
 
  */
 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                                 struct ttm_resource_manager *man,
-                                struct ttm_mem_reg *mem,
+                                struct ttm_resource *mem,
                                 bool no_wait_gpu)
 {
        struct dma_fence *fence;
  */
 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                  const struct ttm_place *place,
-                                 struct ttm_mem_reg *mem,
+                                 struct ttm_resource *mem,
                                  struct ttm_operation_ctx *ctx)
 {
        struct ttm_bo_device *bdev = bo->bdev;
  */
 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
                                const struct ttm_place *place,
-                               struct ttm_mem_reg *mem,
+                               struct ttm_resource *mem,
                                struct ttm_operation_ctx *ctx)
 {
        struct ttm_bo_device *bdev = bo->bdev;
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       struct ttm_mem_reg *mem,
+                       struct ttm_resource *mem,
                        struct ttm_operation_ctx *ctx)
 {
        struct ttm_bo_device *bdev = bo->bdev;
                              struct ttm_operation_ctx *ctx)
 {
        int ret = 0;
-       struct ttm_mem_reg mem;
+       struct ttm_resource mem;
 
        dma_resv_assert_held(bo->base.resv);
 
 
 static bool ttm_bo_places_compat(const struct ttm_place *places,
                                 unsigned num_placement,
-                                struct ttm_mem_reg *mem,
+                                struct ttm_resource *mem,
                                 uint32_t *new_flags)
 {
        unsigned i;
 }
 
 bool ttm_bo_mem_compat(struct ttm_placement *placement,
-                      struct ttm_mem_reg *mem,
+                      struct ttm_resource *mem,
                       uint32_t *new_flags)
 {
        if (ttm_bo_places_compat(placement->placement, placement->num_placement,
        if (bo->mem.mem_type != TTM_PL_SYSTEM ||
            bo->ttm->caching_state != tt_cached) {
                struct ttm_operation_ctx ctx = { false, false };
-               struct ttm_mem_reg evict_mem;
+               struct ttm_resource evict_mem;
 
                evict_mem = bo->mem;
                evict_mem.mm_node = NULL;
 
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
                   struct ttm_operation_ctx *ctx,
-                   struct ttm_mem_reg *new_mem)
+                   struct ttm_resource *new_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
        int ret;
 
        if (old_mem->mem_type != TTM_PL_SYSTEM) {
 }
 
 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
-                      struct ttm_mem_reg *mem)
+                      struct ttm_resource *mem)
 {
        struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
        int ret;
 }
 
 void ttm_mem_io_free(struct ttm_bo_device *bdev,
-                    struct ttm_mem_reg *mem)
+                    struct ttm_resource *mem)
 {
        if (--mem->bus.io_reserved_count)
                return;
 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
 {
        struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
-       struct ttm_mem_reg *mem = &bo->mem;
+       struct ttm_resource *mem = &bo->mem;
        int ret;
 
        if (mem->bus.io_reserved_vm)
 
 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
 {
-       struct ttm_mem_reg *mem = &bo->mem;
+       struct ttm_resource *mem = &bo->mem;
 
        if (!mem->bus.io_reserved_vm)
                return;
        ttm_mem_io_free(bo->bdev, mem);
 }
 
-static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev,
-                              struct ttm_mem_reg *mem,
+static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
+                              struct ttm_resource *mem,
                               void **virtual)
 {
        struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
        return 0;
 }
 
-static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev,
-                               struct ttm_mem_reg *mem,
+static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
+                               struct ttm_resource *mem,
                                void *virtual)
 {
        struct ttm_resource_manager *man;
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
                       struct ttm_operation_ctx *ctx,
-                      struct ttm_mem_reg *new_mem)
+                      struct ttm_resource *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
        struct ttm_tt *ttm = bo->ttm;
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       struct ttm_mem_reg old_copy = *old_mem;
+       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource old_copy = *old_mem;
        void *old_iomap;
        void *new_iomap;
        int ret;
        if (ret)
                return ret;
 
-       ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+       ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
        if (ret)
                return ret;
-       ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+       ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
        if (ret)
                goto out;
 
        }
 
 out1:
-       ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
+       ttm_resource_iounmap(bdev, old_mem, new_iomap);
 out:
-       ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+       ttm_resource_iounmap(bdev, &old_copy, old_iomap);
 
        /*
         * On error, keep the mm node!
                          unsigned long size,
                          struct ttm_bo_kmap_obj *map)
 {
-       struct ttm_mem_reg *mem = &bo->mem;
+       struct ttm_resource *mem = &bo->mem;
 
        if (bo->mem.bus.addr) {
                map->bo_kmap_type = ttm_bo_map_premapped;
                           unsigned long num_pages,
                           struct ttm_bo_kmap_obj *map)
 {
-       struct ttm_mem_reg *mem = &bo->mem;
+       struct ttm_resource *mem = &bo->mem;
        struct ttm_operation_ctx ctx = {
                .interruptible = false,
                .no_wait_gpu = false
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              struct dma_fence *fence,
                              bool evict,
-                             struct ttm_mem_reg *new_mem)
+                             struct ttm_resource *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
        int ret;
        struct ttm_buffer_object *ghost_obj;
 
 
 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
                         struct dma_fence *fence, bool evict,
-                        struct ttm_mem_reg *new_mem)
+                        struct ttm_resource *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
 
        struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type);
        struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type);
 
 static int ttm_range_man_get_node(struct ttm_resource_manager *man,
                                  struct ttm_buffer_object *bo,
                                  const struct ttm_place *place,
-                                 struct ttm_mem_reg *mem)
+                                 struct ttm_resource *mem)
 {
        struct ttm_range_manager *rman = to_range_manager(man);
        struct drm_mm *mm = &rman->mm;
 }
 
 static void ttm_range_man_put_node(struct ttm_resource_manager *man,
-                                  struct ttm_mem_reg *mem)
+                                  struct ttm_resource *mem)
 {
        struct ttm_range_manager *rman = to_range_manager(man);
 
 
        }
 }
 
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem,
                struct ttm_operation_ctx *ctx)
 {
        int ret = 0;
 
  * vmw_bo_move_notify - TTM move_notify_callback
  *
  * @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
+ * @mem: The struct ttm_resource indicating to what memory
  *       region the move is taking place.
  *
  * Detaches cached maps and device bindings that require that the
  * buffer doesn't move.
  */
 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
-                       struct ttm_mem_reg *mem)
+                       struct ttm_resource *mem)
 {
        struct vmw_buffer_object *vbo;
 
 
                                   struct vmw_buffer_object *new_backup,
                                   unsigned long new_backup_offset);
 extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
-                                 struct ttm_mem_reg *mem);
+                                 struct ttm_resource *mem);
 extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
 extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
 extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
-                              struct ttm_mem_reg *mem);
+                              struct ttm_resource *mem);
 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
 extern struct vmw_buffer_object *
 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
 
 static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
                                  struct ttm_buffer_object *bo,
                                  const struct ttm_place *place,
-                                 struct ttm_mem_reg *mem)
+                                 struct ttm_resource *mem)
 {
        struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
        int id;
 }
 
 static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
-                                  struct ttm_mem_reg *mem)
+                                  struct ttm_resource *mem)
 {
        struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
 
 
  * states from the device.
  */
 void vmw_query_move_notify(struct ttm_buffer_object *bo,
-                          struct ttm_mem_reg *mem)
+                          struct ttm_resource *mem)
 {
        struct vmw_buffer_object *dx_query_mob;
        struct ttm_bo_device *bdev = bo->bdev;
 
 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
                                  unsigned long align_pages,
                                  const struct ttm_place *place,
-                                 struct ttm_mem_reg *mem,
+                                 struct ttm_resource *mem,
                                  unsigned long lpfn,
                                  enum drm_mm_insert_mode mode)
 {
 static int vmw_thp_get_node(struct ttm_resource_manager *man,
                            struct ttm_buffer_object *bo,
                            const struct ttm_place *place,
-                           struct ttm_mem_reg *mem)
+                           struct ttm_resource *mem)
 {
        struct vmw_thp_manager *rman = to_thp_manager(man);
        struct drm_mm *mm = &rman->mm;
 
 
 static void vmw_thp_put_node(struct ttm_resource_manager *man,
-                            struct ttm_mem_reg *mem)
+                            struct ttm_resource *mem)
 {
        struct vmw_thp_manager *rman = to_thp_manager(man);
 
 
 }
 
 
-static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
 {
        struct vmw_ttm_tt *vmw_be =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
        return vmw_user_bo_verify_access(bo, tfile);
 }
 
-static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
 {
        struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
 
  * vmw_move_notify - TTM move_notify_callback
  *
  * @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
+ * @mem: The struct ttm_resource indicating to what memory
  *       region the move is taking place.
  *
  * Calls move_notify for all subsystems needing it.
  */
 static void vmw_move_notify(struct ttm_buffer_object *bo,
                            bool evict,
-                           struct ttm_mem_reg *mem)
+                           struct ttm_resource *mem)
 {
        vmw_bo_move_notify(bo, mem);
        vmw_query_move_notify(bo, mem);
 
 
 
 /**
- * struct ttm_mem_reg
+ * struct ttm_resource
  *
  * @mm_node: Memory manager node.
  * @size: Requested size of memory region.
  * buffer object.
  */
 
-struct ttm_mem_reg {
+struct ttm_resource {
        void *mm_node;
        unsigned long start;
        unsigned long size;
         * Members protected by the bo::resv::reserved lock.
         */
 
-       struct ttm_mem_reg mem;
+       struct ttm_resource mem;
        struct file *persistent_swap_storage;
        struct ttm_tt *ttm;
        bool evicted;
  * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
  *
  * @placement:  Return immediately if buffer is busy.
- * @mem:  The struct ttm_mem_reg indicating the region where the bo resides
+ * @mem:  The struct ttm_resource indicating the region where the bo resides
  * @new_flags: Describes compatible placement found
  *
  * Returns true if the placement is compatible
  */
-bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
+bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem,
                       uint32_t *new_flags);
 
 /**
 
         * @bo: Pointer to the buffer object we're allocating space for.
         * @placement: Placement details.
         * @flags: Additional placement flags.
-        * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+        * @mem: Pointer to a struct ttm_resource to be filled in.
         *
         * This function should allocate space in the memory type managed
         * by @man. Placement details if
        int  (*get_node)(struct ttm_resource_manager *man,
                         struct ttm_buffer_object *bo,
                         const struct ttm_place *place,
-                        struct ttm_mem_reg *mem);
+                        struct ttm_resource *mem);
 
        /**
         * struct ttm_resource_manager member put_node
         *
         * @man: Pointer to a memory type manager.
-        * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+        * @mem: Pointer to a struct ttm_resource to be filled in.
         *
         * This function frees memory type resources previously allocated
         * and that are identified by @mem::mm_node and @mem::start. May not
         * be called from within atomic context.
         */
        void (*put_node)(struct ttm_resource_manager *man,
-                        struct ttm_mem_reg *mem);
+                        struct ttm_resource *mem);
 
        /**
         * struct ttm_resource_manager member debug
         */
        int (*move)(struct ttm_buffer_object *bo, bool evict,
                    struct ttm_operation_ctx *ctx,
-                   struct ttm_mem_reg *new_mem);
+                   struct ttm_resource *new_mem);
 
        /**
         * struct ttm_bo_driver_member verify_access
         */
        void (*move_notify)(struct ttm_buffer_object *bo,
                            bool evict,
-                           struct ttm_mem_reg *new_mem);
+                           struct ttm_resource *new_mem);
        /* notify the driver we are taking a fault on this BO
         * and have reserved it */
        int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
         * are balanced.
         */
        int (*io_mem_reserve)(struct ttm_bo_device *bdev,
-                             struct ttm_mem_reg *mem);
+                             struct ttm_resource *mem);
        void (*io_mem_free)(struct ttm_bo_device *bdev,
-                           struct ttm_mem_reg *mem);
+                           struct ttm_resource *mem);
 
        /**
         * Return the pfn for a given page_offset inside the BO.
  */
 
 /**
- * ttm_mem_reg_is_pci
+ * ttm_resource_is_pci
  *
  * @bdev: Pointer to a struct ttm_bo_device.
- * @mem: A valid struct ttm_mem_reg.
+ * @mem: A valid struct ttm_resource.
  *
  * Returns true if the memory described by @mem is PCI memory,
  * false otherwise.
  */
-bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+bool ttm_resource_is_pci(struct ttm_bo_device *bdev, struct ttm_resource *mem);
 
 /**
  * ttm_bo_mem_space
  * @bo: Pointer to a struct ttm_buffer_object. the data of which
  * we want to allocate space for.
  * @proposed_placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_mem_reg.
+ * @mem: A struct ttm_resource.
  * @interruptible: Sleep interruptible when sliping.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                     struct ttm_placement *placement,
-                    struct ttm_mem_reg *mem,
+                    struct ttm_resource *mem,
                     struct ttm_operation_ctx *ctx);
 
-void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_resource *mem);
 
 int ttm_bo_device_release(struct ttm_bo_device *bdev);
 
  */
 
 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
-                      struct ttm_mem_reg *mem);
+                      struct ttm_resource *mem);
 void ttm_mem_io_free(struct ttm_bo_device *bdev,
-                    struct ttm_mem_reg *mem);
+                    struct ttm_resource *mem);
 /**
  * ttm_bo_move_ttm
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_mem_reg indicating where to move.
+ * @new_mem: struct ttm_resource indicating where to move.
  *
  * Optimized move function for a buffer object with both old and
  * new placement backed by a TTM. The function will, if successful,
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
                    struct ttm_operation_ctx *ctx,
-                   struct ttm_mem_reg *new_mem);
+                   struct ttm_resource *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  * @bo: A pointer to a struct ttm_buffer_object.
  * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_mem_reg indicating where to move.
+ * @new_mem: struct ttm_resource indicating where to move.
  *
  * Fallback move function for a mappable buffer object in mappable memory.
  * The function will, if successful,
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
                       struct ttm_operation_ctx *ctx,
-                      struct ttm_mem_reg *new_mem);
+                      struct ttm_resource *new_mem);
 
 /**
  * ttm_bo_free_old_node
  * @bo: A pointer to a struct ttm_buffer_object.
  * @fence: A fence object that signals when moving is complete.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @new_mem: struct ttm_mem_reg indicating where to move.
+ * @new_mem: struct ttm_resource indicating where to move.
  *
  * Accelerated move function to be called when an accelerated move
  * has been scheduled. The function will create a new temporary buffer object
  */
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              struct dma_fence *fence, bool evict,
-                             struct ttm_mem_reg *new_mem);
+                             struct ttm_resource *new_mem);
 
 /**
  * ttm_bo_pipeline_move.
  * @bo: A pointer to a struct ttm_buffer_object.
  * @fence: A fence object that signals when moving is complete.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @new_mem: struct ttm_mem_reg indicating where to move.
+ * @new_mem: struct ttm_resource indicating where to move.
  *
  * Function for pipelining accelerated moves. Either free the memory
  * immediately or hang it on a temporary buffer object.
  */
 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
                         struct dma_fence *fence, bool evict,
-                        struct ttm_mem_reg *new_mem);
+                        struct ttm_resource *new_mem);
 
 /**
  * ttm_bo_pipeline_gutting.
 
 #include <linux/types.h>
 
 struct ttm_tt;
-struct ttm_mem_reg;
+struct ttm_resource;
 struct ttm_buffer_object;
 struct ttm_operation_ctx;
 
         * struct ttm_backend_func member bind
         *
         * @ttm: Pointer to a struct ttm_tt.
-        * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+        * @bo_mem: Pointer to a struct ttm_resource describing the
         * memory type and location for binding.
         *
         * Bind the backend pages into the aperture in the location
         * indicated by @bo_mem. This function should be able to handle
         * differences between aperture and system page sizes.
         */
-       int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+       int (*bind) (struct ttm_tt *ttm, struct ttm_resource *bo_mem);
 
        /**
         * struct ttm_backend_func member unbind
  * ttm_ttm_bind:
  *
  * @ttm: The struct ttm_tt containing backing pages.
- * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ * @bo_mem: The struct ttm_resource identifying the binding location.
  *
  * Bind the pages of @ttm to an aperture location identified by @bo_mem
  */
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem,
                struct ttm_operation_ctx *ctx);
 
 /**