* colateral damage, and we should not pretend we can by
                 * exposing the interface.
                 */
-               if (!intel_has_reset_engine(&i915->gt))
+               if (!intel_has_reset_engine(to_gt(i915)))
                        return -ENODEV;
 
                pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
 
        if (!protected) {
                pc->uses_protected_content = false;
-       } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
+       } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
                ret = -ENODEV;
        } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
                   !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
                 */
                pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
-               if (!intel_pxp_is_active(&i915->gt.pxp))
-                       ret = intel_pxp_start(&i915->gt.pxp);
+               if (!intel_pxp_is_active(&to_gt(i915)->pxp))
+                       ret = intel_pxp_start(&to_gt(i915)->pxp);
        }
 
        return ret;
        intel_engine_mask_t prev_mask;
 
        /* FIXME: This is NIY for execlists */
-       if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
+       if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc)))
                return -ENODEV;
 
        if (get_user(slot, &ext->engine_index))
                sseu = &pc->legacy_rcs_sseu;
        }
 
-       ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
+       ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
        if (ret)
                return ret;
 
 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
                                                struct intel_sseu rcs_sseu)
 {
-       const struct intel_gt *gt = &ctx->i915->gt;
+       const struct intel_gt *gt = to_gt(ctx->i915);
        struct intel_engine_cs *engine;
        struct i915_gem_engines *e, *err;
        enum intel_engine_id id;
                 * colateral damage, and we should not pretend we can by
                 * exposing the interface.
                 */
-               if (!intel_has_reset_engine(&ctx->i915->gt))
+               if (!intel_has_reset_engine(to_gt(ctx->i915)))
                        return -ENODEV;
 
                i915_gem_context_clear_persistence(ctx);
        } else if (HAS_FULL_PPGTT(i915)) {
                struct i915_ppgtt *ppgtt;
 
-               ppgtt = i915_ppgtt_create(&i915->gt, 0);
+               ppgtt = i915_ppgtt_create(to_gt(i915), 0);
                if (IS_ERR(ppgtt)) {
                        drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
                                PTR_ERR(ppgtt));
        if (args->flags)
                return -EINVAL;
 
-       ppgtt = i915_ppgtt_create(&i915->gt, 0);
+       ppgtt = i915_ppgtt_create(to_gt(i915), 0);
        if (IS_ERR(ppgtt))
                return PTR_ERR(ppgtt);
 
        if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
                return -EINVAL;
 
-       ret = intel_gt_terminally_wedged(&i915->gt);
+       ret = intel_gt_terminally_wedged(to_gt(i915));
        if (ret)
                return ret;
 
 
        if (ext.flags)
                return -EINVAL;
 
-       if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp))
+       if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
                return -ENODEV;
 
        ext_data->flags |= I915_BO_PROTECTED;
 
        return err;
 }
 
-static int num_vcs_engines(const struct drm_i915_private *i915)
+static int num_vcs_engines(struct drm_i915_private *i915)
 {
-       return hweight_long(VDBOX_MASK(&i915->gt));
+       return hweight_long(VDBOX_MASK(to_gt(i915)));
 }
 
 /*
 
                goto insert;
 
        /* Attempt to reap some mmap space from dead objects */
-       err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
+       err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
                                               NULL);
        if (err)
                goto err;
 
 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping = obj->base.filp->f_mapping;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct scatterlist *sg;
        struct sg_table *st;
        dma_addr_t dma;
                dst += PAGE_SIZE;
        }
 
-       intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+       intel_gt_chipset_flush(to_gt(i915));
 
        /* We're no longer struct page backed */
        obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
 {
        void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
        char __user *user_data = u64_to_user_ptr(args->data_ptr);
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        int err;
 
        err = i915_gem_object_wait(obj,
                return -EFAULT;
 
        drm_clflush_virt_range(vaddr, args->size);
-       intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+       intel_gt_chipset_flush(to_gt(i915));
 
        i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        return 0;
 
         * state. Fortunately, the kernel_context is disposable and we do
         * not rely on its state.
         */
-       intel_gt_suspend_prepare(&i915->gt);
+       intel_gt_suspend_prepare(to_gt(i915));
 
        i915_gem_drain_freed_objects(i915);
 }
         * machine in an unusable condition.
         */
 
-       intel_gt_suspend_late(&i915->gt);
+       intel_gt_suspend_late(to_gt(i915));
 
        spin_lock_irqsave(&i915->mm.obj_lock, flags);
        for (phase = phases; *phase; phase++) {
         * guarantee that the context image is complete. So let's just reset
         * it and start again.
         */
-       intel_gt_resume(&i915->gt);
+       intel_gt_resume(to_gt(i915));
 
        ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
        GEM_WARN_ON(ret);
 
         */
        if (shrink & I915_SHRINK_ACTIVE)
                /* Retire requests to unpin all idle contexts */
-               intel_gt_retire_requests(&i915->gt);
+               intel_gt_retire_requests(to_gt(i915));
 
        /*
         * As we may completely rewrite the (un)bound list whilst unbinding
 
 {
        const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct drm_i915_private *i915 = to_i915(dev);
        struct i915_gem_context *ctx;
        unsigned long idx;
        long ret;
 
        /* ABI: return -EIO if already wedged */
-       ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
+       ret = intel_gt_terminally_wedged(to_gt(i915));
        if (ret)
                return ret;
 
 
        enum i915_cache_level src_level, dst_level;
        int ret;
 
-       if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt))
+       if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915)))
                return ERR_PTR(-EINVAL);
 
        /* With fail_gpu_migration, we always perform a GPU clear. */
                    !I915_SELFTEST_ONLY(fail_gpu_migration))
                        return ERR_PTR(-EINVAL);
 
-               intel_engine_pm_get(i915->gt.migrate.context->engine);
-               ret = intel_context_migrate_clear(i915->gt.migrate.context, dep,
+               intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
+               ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, dep,
                                                  dst_st->sgl, dst_level,
                                                  i915_ttm_gtt_binds_lmem(dst_mem),
                                                  0, &rq);
                        return ERR_CAST(src_rsgt);
 
                src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
-               intel_engine_pm_get(i915->gt.migrate.context->engine);
-               ret = intel_context_migrate_copy(i915->gt.migrate.context,
+               intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
+               ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
                                                 dep, src_rsgt->table.sgl,
                                                 src_level,
                                                 i915_ttm_gtt_binds_lmem(bo->resource),
                i915_refct_sgt_put(src_rsgt);
        }
 
-       intel_engine_pm_put(i915->gt.migrate.context->engine);
+       intel_engine_pm_put(to_gt(i915)->migrate.context->engine);
 
        if (ret && rq) {
                i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
 
                 * On almost all of the older hw, we cannot tell the GPU that
                 * a page is readonly.
                 */
-               if (!dev_priv->gt.vm->has_read_only)
+               if (!to_gt(dev_priv)->vm->has_read_only)
                        return -ENODEV;
        }
 
 
        mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
        mkwrite_device_info(dev_priv)->ppgtt_size = 48;
 
-       ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+       ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
        if (IS_ERR(ppgtt)) {
                err = PTR_ERR(ppgtt);
                goto out_unlock;
                return 0;
        }
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
 
                SUBTEST(igt_client_tiled_blits),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
 
                }
                if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
                        pr_err("Failed to populated %d contexts\n", nctx);
-                       intel_gt_set_wedged(&i915->gt);
+                       intel_gt_set_wedged(to_gt(i915));
                        i915_request_put(rq);
                        err = -EIO;
                        goto out_file;
                        if (i915_request_wait(rq, 0, HZ / 5) < 0) {
                                pr_err("Switching between %ld contexts timed out\n",
                                       prime);
-                               intel_gt_set_wedged(&i915->gt);
+                               intel_gt_set_wedged(to_gt(i915));
                                i915_request_put(rq);
                                break;
                        }
                return 0;
 
        if (flags & TEST_RESET)
-               igt_global_reset_lock(&i915->gt);
+               igt_global_reset_lock(to_gt(i915));
 
        obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
 
 out_unlock:
        if (flags & TEST_RESET)
-               igt_global_reset_unlock(&i915->gt);
+               igt_global_reset_unlock(to_gt(i915));
 
        if (ret)
                pr_err("%s: Failed with %d!\n", name, ret);
                SUBTEST(igt_vm_isolation),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
 
        if (!HAS_LMEM(i915))
                return 0;
 
-       return intel_gt_live_subtests(tests, &i915->gt);
+       return intel_gt_live_subtests(tests, to_gt(i915));
 }
 
                                 struct rnd_state *prng)
 {
        const unsigned long npages = obj->base.size / PAGE_SIZE;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt_view view;
        struct i915_vma *vma;
        unsigned long page;
        if (offset >= obj->base.size)
                goto out;
 
-       intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+       intel_gt_flush_ggtt_writes(to_gt(i915));
 
        p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
        cpu = kmap(p) + offset_in_page(offset);
 {
        const unsigned int nreal = obj->scratch / PAGE_SIZE;
        const unsigned long npages = obj->base.size / PAGE_SIZE;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_vma *vma;
        unsigned long page;
        int err;
                if (offset >= obj->base.size)
                        continue;
 
-               intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+               intel_gt_flush_ggtt_writes(to_gt(i915));
 
                p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
                cpu = kmap(p) + offset_in_page(offset);
 static void disable_retire_worker(struct drm_i915_private *i915)
 {
        i915_gem_driver_unregister__shrinker(i915);
-       intel_gt_pm_get(&i915->gt);
-       cancel_delayed_work_sync(&i915->gt.requests.retire_work);
+       intel_gt_pm_get(to_gt(i915));
+       cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
 }
 
 static void restore_retire_worker(struct drm_i915_private *i915)
 {
        igt_flush_test(i915);
-       intel_gt_pm_put(&i915->gt);
+       intel_gt_pm_put(to_gt(i915));
        i915_gem_driver_register__shrinker(i915);
 }
 
 
        /* Disable background reaper */
        disable_retire_worker(i915);
-       GEM_BUG_ON(!i915->gt.awake);
-       intel_gt_retire_requests(&i915->gt);
+       GEM_BUG_ON(!to_gt(i915)->awake);
+       intel_gt_retire_requests(to_gt(i915));
        i915_gem_drain_freed_objects(i915);
 
        /* Trim the device mmap space to only a page */
 
        /* Now fill with busy dead objects that we expect to reap */
        for (loop = 0; loop < 3; loop++) {
-               if (intel_gt_is_wedged(&i915->gt))
+               if (intel_gt_is_wedged(to_gt(i915)))
                        break;
 
                obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
        }
 
        if (type == I915_MMAP_TYPE_GTT)
-               intel_gt_flush_ggtt_writes(&i915->gt);
+               intel_gt_flush_ggtt_writes(to_gt(i915));
 
        err = wc_check(obj);
        if (err == -ENXIO)
                goto out_unmap;
        }
 
-       intel_gt_flush_ggtt_writes(&i915->gt);
+       intel_gt_flush_ggtt_writes(to_gt(i915));
 
        err = access_process_vm(current, addr, &x, sizeof(x), 0);
        if (err != sizeof(x)) {
                goto out_unmap;
        }
 
-       intel_gt_flush_ggtt_writes(&i915->gt);
+       intel_gt_flush_ggtt_writes(to_gt(i915));
 
        err = __get_user(y, ptr);
        if (err) {
        }
 
        if (type == I915_MMAP_TYPE_GTT)
-               intel_gt_flush_ggtt_writes(&i915->gt);
+               intel_gt_flush_ggtt_writes(to_gt(i915));
 
        for_each_uabi_engine(engine, i915) {
                struct i915_request *rq;