* XXX: This is in the VM bind data path, likely should calculate this once and
  * store, with a recalculation if the BO is moved.
  */
-uint64_t vram_region_io_offset(struct ttm_resource *res)
+uint64_t vram_region_gpu_offset(struct ttm_resource *res)
 {
        struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
        struct xe_gt *gt = mem_type_to_gt(xe, res->mem_type);
        if (res->mem_type == XE_PL_STOLEN)
                return xe_ttm_stolen_gpu_offset(xe);
 
-       return gt->mem.vram.io_start - xe->mem.vram.io_start;
+       return xe->mem.vram.base + gt->mem.vram.base;
 }
 
 /**
                        XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
 
                        place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) -
-                                      vram_region_io_offset(bo->ttm.resource)) >> PAGE_SHIFT;
+                                      vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
                        place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
 
                        spin_lock(&xe->pinned.lock);
 
                xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
                             page_size, &cur);
-               return cur.start + offset + vram_region_io_offset(bo->ttm.resource);
+               return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
        }
 }
 
 
 bool mem_type_is_vram(u32 mem_type);
 bool xe_bo_is_vram(struct xe_bo *bo);
 bool xe_bo_is_stolen(struct xe_bo *bo);
-uint64_t vram_region_io_offset(struct ttm_resource *res);
+uint64_t vram_region_gpu_offset(struct ttm_resource *res);
 
 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
 
 
        } else {
                /* Offset into identity map. */
                *L0_ofs = xe_migrate_vram_ofs(cur->start +
-                                             vram_region_io_offset(res));
+                                             vram_region_gpu_offset(res));
                cmds += cmd_size;
        }
 
                                        addr |= XE_PTE_PS64;
                                }
 
-                               addr += vram_region_io_offset(bo->ttm.resource);
+                               addr += vram_region_gpu_offset(bo->ttm.resource);
                                addr |= XE_PPGTT_PTE_LM;
                        }
                        addr |= PPAT_CACHED | XE_PAGE_PRESENT | XE_PAGE_RW;
 
        int ret;
 
        if (is_vram) {
-               struct xe_gt *bo_gt = xe_bo_to_gt(bo);
-
                xe_walk.default_pte = XE_PPGTT_PTE_LM;
                if (vma && vma->use_atomic_access_pte_bit)
                        xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
-               xe_walk.dma_offset = bo_gt->mem.vram.io_start -
-                       gt_to_xe(gt)->mem.vram.io_start;
+               xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
                xe_walk.cache = XE_CACHE_WB;
        } else {
                if (!xe_vma_is_userptr(vma) && bo->flags & XE_BO_SCANOUT_BIT)