/* Optionally clear bo *and* CCS data in VRAM. */
        if (clear) {
-               fence = xe_migrate_clear(gt->migrate, bo, bo->ttm.resource);
+               fence = xe_migrate_clear(gt_to_tile(gt)->migrate, bo, bo->ttm.resource);
                if (IS_ERR(fence)) {
                        KUNIT_FAIL(test, "Failed to submit bo clear.\n");
                        return PTR_ERR(fence);
        struct xe_bo *bo, *external;
        unsigned int bo_flags = XE_BO_CREATE_USER_BIT |
                XE_BO_CREATE_VRAM_IF_DGFX(gt_to_tile(gt));
-       struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->primary_gt.migrate);
+       struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
        struct ww_acquire_ctx ww;
        int err, i;
 
 
 static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
                      struct kunit *test)
 {
-       struct xe_device *xe = gt_to_xe(m->gt);
+       struct xe_device *xe = tile_to_xe(m->tile);
        u64 retval, expected = 0;
        bool big = bo->size >= SZ_2M;
        struct dma_fence *fence;
        const char *str = big ? "Copying big bo" : "Copying small bo";
        int err;
 
-       struct xe_bo *sysmem = xe_bo_create_locked(xe, gt_to_tile(m->gt), NULL,
+       struct xe_bo *sysmem = xe_bo_create_locked(xe, m->tile, NULL,
                                                   bo->size,
                                                   ttm_bo_type_kernel,
                                                   XE_BO_CREATE_SYSTEM_BIT);
 static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
                           struct kunit *test, bool force_gpu)
 {
-       struct xe_device *xe = gt_to_xe(m->gt);
+       struct xe_device *xe = tile_to_xe(m->tile);
        struct dma_fence *fence;
        u64 retval, expected;
        ktime_t then, now;
 
 static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
 {
-       struct xe_gt *gt = m->gt;
-       struct xe_tile *tile = gt_to_tile(m->gt);
-       struct xe_device *xe = gt_to_xe(gt);
+       struct xe_tile *tile = m->tile;
+       struct xe_device *xe = tile_to_xe(tile);
        struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
        struct xe_res_cursor src_it;
        struct dma_fence *fence;
        u64 retval, expected;
        struct xe_bb *bb;
        int err;
-       u8 id = gt->info.id;
+       u8 id = tile->id;
 
        err = xe_bo_vmap(bo);
        if (err) {
                goto free_pt;
        }
 
-       bb = xe_bb_new(gt, 32, xe->info.supports_usm);
+       bb = xe_bb_new(&tile->primary_gt, 32, xe->info.supports_usm);
        if (IS_ERR(bb)) {
                KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
                           PTR_ERR(bb));
        xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
        expected = 0;
 
-       emit_clear(m->gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
+       emit_clear(&tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
                   IS_DGFX(xe));
        run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
                       test);
 static int migrate_test_run_device(struct xe_device *xe)
 {
        struct kunit *test = xe_cur_kunit();
-       struct xe_gt *gt;
+       struct xe_tile *tile;
        int id;
 
-       for_each_gt(gt, xe, id) {
-               struct xe_migrate *m = gt->migrate;
+       for_each_tile(tile, xe, id) {
+               struct xe_migrate *m = tile->migrate;
                struct ww_acquire_ctx ww;
 
-               kunit_info(test, "Testing gt id %d.\n", id);
+               kunit_info(test, "Testing tile id %d.\n", id);
                xe_vm_lock(m->eng->vm, &ww, 0, true);
                xe_device_mem_access_get(xe);
                xe_migrate_sanity_test(m, test);
 
                tile = mem_type_to_tile(xe, old_mem->mem_type);
 
        XE_BUG_ON(!tile);
-       XE_BUG_ON(!tile->primary_gt.migrate);
+       XE_BUG_ON(!tile->migrate);
 
        trace_xe_bo_move(bo);
        xe_device_mem_access_get(xe);
                }
        } else {
                if (move_lacks_source)
-                       fence = xe_migrate_clear(tile->primary_gt.migrate, bo, new_mem);
+                       fence = xe_migrate_clear(tile->migrate, bo, new_mem);
                else
-                       fence = xe_migrate_copy(tile->primary_gt.migrate,
+                       fence = xe_migrate_copy(tile->migrate,
                                                bo, bo, old_mem, new_mem);
                if (IS_ERR(fence)) {
                        ret = PTR_ERR(fence);
 
 #include "xe_bo.h"
 #include "xe_device.h"
 #include "xe_ggtt.h"
-#include "xe_gt.h"
+#include "xe_tile.h"
 
 /**
  * xe_bo_evict_all - evict all BOs from VRAM
        struct ttm_device *bdev = &xe->ttm;
        struct ww_acquire_ctx ww;
        struct xe_bo *bo;
-       struct xe_gt *gt;
+       struct xe_tile *tile;
        struct list_head still_in_list;
        u32 mem_type;
        u8 id;
         * Wait for all user BO to be evicted as those evictions depend on the
         * memory moved below.
         */
-       for_each_gt(gt, xe, id)
-               xe_gt_migrate_wait(gt);
+       for_each_tile(tile, xe, id)
+               xe_tile_migrate_wait(tile);
 
        spin_lock(&xe->pinned.lock);
        for (;;) {
 {
        struct ww_acquire_ctx ww;
        struct xe_bo *bo;
-       struct xe_gt *gt;
+       struct xe_tile *tile;
        struct list_head still_in_list;
        u8 id;
        int ret;
        spin_unlock(&xe->pinned.lock);
 
        /* Wait for validate to complete */
-       for_each_gt(gt, xe, id)
-               xe_gt_migrate_wait(gt);
+       for_each_tile(tile, xe, id)
+               xe_tile_migrate_wait(tile);
 
        return 0;
 }
 
                 */
                struct xe_sa_manager *kernel_bb_pool;
        } mem;
+
+       /** @migrate: Migration helper for vram blits and clearing */
+       struct xe_migrate *migrate;
 };
 
 /**
 
                        if (XE_IOCTL_ERR(xe, !hwe))
                                return -EINVAL;
 
-                       migrate_vm = xe_migrate_get_vm(gt->migrate);
+                       migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
                        new = xe_engine_create(xe, migrate_vm, logical_mask,
                                               args->width, hwe,
                                               ENGINE_FLAG_PERSISTENT |
 
 #include "xe_wa.h"
 #include "xe_wopcm.h"
 
-struct xe_gt *xe_find_full_gt(struct xe_gt *gt)
-{
-       /*
-        * FIXME: Once the code is prepared for re-enabling, this function will
-        * be gone. Just return the only possible gt for now.
-        */
-       return gt;
-}
-
 int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt)
 {
        XE_BUG_ON(gt->info.type == XE_GT_TYPE_UNINITIALIZED);
 int xe_gt_record_default_lrcs(struct xe_gt *gt)
 {
        struct xe_device *xe = gt_to_xe(gt);
+       struct xe_tile *tile = gt_to_tile(gt);
        struct xe_hw_engine *hwe;
        enum xe_hw_engine_id id;
        int err = 0;
                if (!default_lrc)
                        return -ENOMEM;
 
-               vm = xe_migrate_get_vm(gt->migrate);
+               vm = xe_migrate_get_vm(tile->migrate);
                e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1,
                                     hwe, ENGINE_FLAG_WA);
                if (IS_ERR(e)) {
        }
 
        if (!xe_gt_is_media_type(gt)) {
-               gt->migrate = xe_migrate_init(gt);
-               if (IS_ERR(gt->migrate)) {
-                       err = PTR_ERR(gt->migrate);
+               struct xe_tile *tile = gt_to_tile(gt);
+
+               tile->migrate = xe_migrate_init(tile);
+               if (IS_ERR(tile->migrate)) {
+                       err = PTR_ERR(tile->migrate);
                        goto err_force_wake;
                }
-       } else {
-               gt->migrate = xe_find_full_gt(gt)->migrate;
        }
 
        err = xe_uc_init_hw(>->uc);
        return err;
 }
 
-void xe_gt_migrate_wait(struct xe_gt *gt)
-{
-       xe_migrate_wait(gt->migrate);
-}
-
 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
                                     enum xe_engine_class class,
                                     u16 instance, bool logical)
 
 int xe_gt_suspend(struct xe_gt *gt);
 int xe_gt_resume(struct xe_gt *gt);
 void xe_gt_reset_async(struct xe_gt *gt);
-void xe_gt_migrate_wait(struct xe_gt *gt);
 void xe_gt_sanitize(struct xe_gt *gt);
 
-struct xe_gt *xe_find_full_gt(struct xe_gt *gt);
-
 /**
  * xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
  * first that matches the same reset domain as @class
 
 
        /* Bind VMA only to the GT that has faulted */
        trace_xe_vma_pf_bind(vma);
-       fence = __xe_pt_bind_vma(tile, vma, xe_gt_migrate_engine(gt), NULL, 0,
+       fence = __xe_pt_bind_vma(tile, vma, xe_tile_migrate_engine(tile), NULL, 0,
                                 vma->tile_present & BIT(tile->id));
        if (IS_ERR(fence)) {
                ret = PTR_ERR(fence);
 
        /** @hw_engines: hardware engines on the GT */
        struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES];
 
-       /** @migrate: Migration helper for vram blits and clearing */
-       struct xe_migrate *migrate;
-
        /** @pcode: GT's PCODE */
        struct {
                /** @lock: protecting GT's PCODE mailbox data */
 
 struct xe_migrate {
        /** @eng: Default engine used for migration */
        struct xe_engine *eng;
-       /** @gt: Backpointer to the gt this struct xe_migrate belongs to. */
-       struct xe_gt *gt;
+       /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
+       struct xe_tile *tile;
        /** @job_mutex: Timeline mutex for @eng. */
        struct mutex job_mutex;
        /** @pt_bo: Page-table buffer object. */
 #define NUM_PT_PER_BLIT (MAX_PREEMPTDISABLE_TRANSFER / SZ_2M)
 
 /**
- * xe_gt_migrate_engine() - Get this gt's migrate engine.
- * @gt: The gt.
+ * xe_tile_migrate_engine() - Get this tile's migrate engine.
+ * @tile: The tile.
  *
- * Returns the default migrate engine of this gt.
+ * Returns the default migrate engine of this tile.
  * TODO: Perhaps this function is slightly misplaced, and even unneeded?
  *
  * Return: The default migrate engine
  */
-struct xe_engine *xe_gt_migrate_engine(struct xe_gt *gt)
+struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile)
 {
-       return gt->migrate->eng;
+       return tile->migrate->eng;
 }
 
 static void xe_migrate_fini(struct drm_device *dev, void *arg)
  */
 static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
 {
-       struct xe_gt *gt = m->gt;
-       struct xe_tile *tile = gt_to_tile(gt);
+       struct xe_tile *tile = m->tile;
        struct xe_device *xe = vm->xe;
        size_t cleared_size;
        u64 vram_addr;
        return 0;
 }
 
-static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
+static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
                                 struct xe_vm *vm)
 {
-       u8 id = gt->info.id;
+       struct xe_device *xe = tile_to_xe(tile);
+       u8 id = tile->id;
        u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
        u32 map_ofs, level, i;
-       struct xe_device *xe = gt_to_xe(m->gt);
-       struct xe_tile *tile = gt_to_tile(m->gt);
        struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
        u64 entry;
        int ret;
                m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
 
                if (xe->info.supports_usm) {
-                       batch = gt->usm.bb_pool->bo;
+                       batch = tile->primary_gt.usm.bb_pool->bo;
                        batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE,
                                                &is_vram);
                        m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
 
 /**
  * xe_migrate_init() - Initialize a migrate context
- * @gt: Back-pointer to the gt we're initializing for.
+ * @tile: Back-pointer to the tile we're initializing for.
  *
  * Return: Pointer to a migrate context on success. Error pointer on error.
  */
-struct xe_migrate *xe_migrate_init(struct xe_gt *gt)
+struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
 {
-       struct xe_device *xe = gt_to_xe(gt);
+       struct xe_device *xe = tile_to_xe(tile);
+       struct xe_gt *primary_gt = &tile->primary_gt;
        struct xe_migrate *m;
        struct xe_vm *vm;
        struct ww_acquire_ctx ww;
        int err;
 
-       XE_BUG_ON(xe_gt_is_media_type(gt));
-
        m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
        if (!m)
                return ERR_PTR(-ENOMEM);
 
-       m->gt = gt;
+       m->tile = tile;
 
        /* Special layout, prepared below.. */
        vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
-                         XE_VM_FLAG_SET_GT_ID(gt));
+                         XE_VM_FLAG_SET_TILE_ID(tile));
        if (IS_ERR(vm))
                return ERR_CAST(vm);
 
        xe_vm_lock(vm, &ww, 0, false);
-       err = xe_migrate_prepare_vm(gt, m, vm);
+       err = xe_migrate_prepare_vm(tile, m, vm);
        xe_vm_unlock(vm, &ww);
        if (err) {
                xe_vm_close_and_put(vm);
        }
 
        if (xe->info.supports_usm) {
-               struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
+               struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
                                                           XE_ENGINE_CLASS_COPY,
-                                                          gt->usm.reserved_bcs_instance,
+                                                          primary_gt->usm.reserved_bcs_instance,
                                                           false);
                if (!hwe)
                        return ERR_PTR(-EINVAL);
                                          BIT(hwe->logical_instance), 1,
                                          hwe, ENGINE_FLAG_KERNEL);
        } else {
-               m->eng = xe_engine_create_class(xe, gt, vm,
+               m->eng = xe_engine_create_class(xe, primary_gt, vm,
                                                XE_ENGINE_CLASS_COPY,
                                                ENGINE_FLAG_KERNEL);
        }
                               u64 dst_ofs, bool dst_is_vram, u32 dst_size,
                               u64 ccs_ofs, bool copy_ccs)
 {
-       struct xe_gt *gt = m->gt;
+       struct xe_gt *gt = &m->tile->primary_gt;
        u32 flush_flags = 0;
 
        if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_vram) {
                                  struct ttm_resource *src,
                                  struct ttm_resource *dst)
 {
-       struct xe_gt *gt = m->gt;
+       struct xe_gt *gt = &m->tile->primary_gt;
        struct xe_device *xe = gt_to_xe(gt);
        struct dma_fence *fence = NULL;
        u64 size = src_bo->size;
                                   struct ttm_resource *dst)
 {
        bool clear_vram = mem_type_is_vram(dst->mem_type);
-       struct xe_gt *gt = m->gt;
+       struct xe_gt *gt = &m->tile->primary_gt;
        struct xe_device *xe = gt_to_xe(gt);
        struct dma_fence *fence = NULL;
        u64 size = bo->size;
        for (i = 0; i < num_updates; i++) {
                const struct xe_vm_pgtable_update *update = &updates[i];
 
-               ops->populate(pt_update, gt_to_tile(m->gt), &update->pt_bo->vmap, NULL,
+               ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
                              update->ofs, update->qwords, update);
        }
 
                           struct xe_migrate_pt_update *pt_update)
 {
        const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
-       struct xe_gt *gt = m->gt;
-       struct xe_tile *tile = gt_to_tile(m->gt);
-       struct xe_device *xe = gt_to_xe(gt);
+       struct xe_tile *tile = m->tile;
+       struct xe_gt *gt = &tile->primary_gt;
+       struct xe_device *xe = tile_to_xe(tile);
        struct xe_sched_job *job;
        struct dma_fence *fence;
        struct drm_suballoc *sa_bo = NULL;
 
        struct xe_vma *vma;
 };
 
-struct xe_migrate *xe_migrate_init(struct xe_gt *gt);
+struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
 
 struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                                  struct xe_bo *src_bo,
 
 void xe_migrate_wait(struct xe_migrate *m);
 
-struct xe_engine *xe_gt_migrate_engine(struct xe_gt *gt);
+struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile);
 #endif
 
                        return ERR_PTR(-ENOMEM);
        }
 
-       fence = xe_migrate_update_pgtables(tile->primary_gt.migrate,
+       fence = xe_migrate_update_pgtables(tile->migrate,
                                           vm, vma->bo,
                                           e ? e : vm->eng[tile->id],
                                           entries, num_entries,
         * clear again here. The eviction may have updated pagetables at a
         * lower level, because it needs to be more conservative.
         */
-       fence = xe_migrate_update_pgtables(tile->primary_gt.migrate,
+       fence = xe_migrate_update_pgtables(tile->migrate,
                                           vm, NULL, e ? e :
                                           vm->eng[tile->id],
                                           entries, num_entries,
 
 
 #include "xe_device.h"
 #include "xe_ggtt.h"
+#include "xe_migrate.h"
 #include "xe_sa.h"
 #include "xe_tile.h"
 #include "xe_ttm_vram_mgr.h"
        xe_device_mem_access_put(tile_to_xe(tile));
        return err;
 }
+
+void xe_tile_migrate_wait(struct xe_tile *tile)
+{
+       xe_migrate_wait(tile->migrate);
+}
 
 int xe_tile_alloc(struct xe_tile *tile);
 int xe_tile_init_noalloc(struct xe_tile *tile);
 
+void xe_tile_migrate_wait(struct xe_tile *tile);
+
 #endif
 
                        if (!vm->pt_root[id])
                                continue;
 
-                       migrate_vm = xe_migrate_get_vm(gt->migrate);
+                       migrate_vm = xe_migrate_get_vm(tile->migrate);
                        eng = xe_engine_create_class(xe, gt, migrate_vm,
                                                     XE_ENGINE_CLASS_COPY,
                                                     ENGINE_FLAG_VM);
 
 #define XE_VM_FLAG_SCRATCH_PAGE                BIT(4)
 #define XE_VM_FLAG_FAULT_MODE          BIT(5)
 #define XE_VM_FLAG_GT_ID(flags)                (((flags) >> 6) & 0x3)
-#define XE_VM_FLAG_SET_GT_ID(gt)       ((gt)->info.id << 6)
+#define XE_VM_FLAG_SET_TILE_ID(tile)   ((tile)->id << 6)
        unsigned long flags;
 
        /** @composite_fence_ctx: context composite fence */