SUBTEST(live_active_barrier),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_subtests(tests, i915);
 
                SUBTEST(igt_gem_ww_ctx),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
 
                return -ENOMEM;
 
        with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-               err = i915_subtests(tests, &i915->gt);
+               err = i915_subtests(tests, to_gt(i915));
 
        mock_destroy_device(i915);
        return err;
                SUBTEST(igt_evict_contexts),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
-       return intel_gt_live_subtests(tests, &i915->gt);
+       return intel_gt_live_subtests(tests, to_gt(i915));
 }
 
        if (!HAS_PPGTT(dev_priv))
                return 0;
 
-       ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+       ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
        if (IS_ERR(ppgtt))
                return PTR_ERR(ppgtt);
 
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
+       ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
        if (IS_ERR(ppgtt)) {
                err = PTR_ERR(ppgtt);
                goto out_free;
 
        if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
                return 0;
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        err = alloc_empty_config(&i915->perf);
 
        __i915_gem_object_flush_map(obj, 0, 64);
        i915_gem_object_unpin_map(obj);
 
-       intel_gt_chipset_flush(&i915->gt);
+       intel_gt_chipset_flush(to_gt(i915));
 
        vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       vma = i915_vma_instance(obj, i915->gt.vm, NULL);
+       vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err;
        __i915_gem_object_flush_map(obj, 0, 64);
        i915_gem_object_unpin_map(obj);
 
-       intel_gt_chipset_flush(&i915->gt);
+       intel_gt_chipset_flush(to_gt(i915));
 
        return vma;
 
                SUBTEST(live_breadcrumbs_smoketest),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_subtests(tests, i915);
                SUBTEST(perf_parallel_engines),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_subtests(tests, i915);
 
        struct drm_i915_private *i915 = data;
 
        /* The selftests expect an idle system */
-       if (intel_gt_pm_wait_for_idle(&i915->gt))
+       if (intel_gt_pm_wait_for_idle(to_gt(i915)))
                return -EIO;
 
-       return intel_gt_terminally_wedged(&i915->gt);
+       return intel_gt_terminally_wedged(to_gt(i915));
 }
 
 int __i915_live_teardown(int err, void *data)
 
 
 int igt_flush_test(struct drm_i915_private *i915)
 {
-       struct intel_gt *gt = &i915->gt;
+       struct intel_gt *gt = to_gt(i915);
        int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
 
        cond_resched();
 
                        const char *func,
                        const char *name)
 {
-       struct intel_gt *gt = &i915->gt;
+       struct intel_gt *gt = to_gt(i915);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        int err;
                return -EIO;
        }
 
-       for_each_engine(engine, &i915->gt, id) {
+       for_each_engine(engine, to_gt(i915), id) {
                if (t->reset_engine[id] ==
                    i915_reset_engine_count(&i915->gpu_error, engine))
                        continue;
 
                return 0;
        }
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
                SUBTEST(perf_memcpy),
        };
 
-       if (intel_gt_is_wedged(&i915->gt))
+       if (intel_gt_is_wedged(to_gt(i915)))
                return 0;
 
        return i915_live_subtests(tests, i915);
 
                SUBTEST(live_forcewake_domains),
        };
 
-       return intel_gt_live_subtests(tests, &i915->gt);
+       return intel_gt_live_subtests(tests, to_gt(i915));
 }
 
 
 void mock_device_flush(struct drm_i915_private *i915)
 {
-       struct intel_gt *gt = &i915->gt;
+       struct intel_gt *gt = to_gt(i915);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
                goto out;
 
        mock_device_flush(i915);
-       intel_gt_driver_remove(&i915->gt);
+       intel_gt_driver_remove(to_gt(i915));
 
        i915_gem_drain_workqueue(i915);
        i915_gem_drain_freed_objects(i915);
        destroy_workqueue(i915->wq);
 
        intel_region_ttm_device_fini(i915);
-       intel_gt_driver_late_release(&i915->gt);
+       intel_gt_driver_late_release(to_gt(i915));
        intel_memory_regions_driver_release(i915);
 
        drm_mode_config_cleanup(&i915->drm);
        spin_lock_init(&i915->gpu_error.lock);
 
        i915_gem_init__mm(i915);
-       intel_gt_init_early(&i915->gt, i915);
-       __intel_gt_init_early(&i915->gt, i915);
+       intel_gt_init_early(to_gt(i915), i915);
+       __intel_gt_init_early(to_gt(i915), i915);
        mock_uncore_init(&i915->uncore, i915);
-       atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
-       i915->gt.awake = -ENODEV;
+       atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
+       to_gt(i915)->awake = -ENODEV;
 
        ret = intel_region_ttm_device_init(i915);
        if (ret)
        mock_init_contexts(i915);
 
        mock_init_ggtt(i915, &i915->ggtt);
-       i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
+       to_gt(i915)->vm = i915_vm_get(&i915->ggtt.vm);
 
        mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
-       i915->gt.info.engine_mask = BIT(0);
+       to_gt(i915)->info.engine_mask = BIT(0);
 
-       i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0);
-       if (!i915->gt.engine[RCS0])
+       to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+       if (!to_gt(i915)->engine[RCS0])
                goto err_unlock;
 
-       if (mock_engine_init(i915->gt.engine[RCS0]))
+       if (mock_engine_init(to_gt(i915)->engine[RCS0]))
                goto err_context;
 
-       __clear_bit(I915_WEDGED, &i915->gt.reset.flags);
+       __clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags);
        intel_engines_driver_register(i915);
 
        i915->do_release = true;
        return i915;
 
 err_context:
-       intel_gt_driver_remove(&i915->gt);
+       intel_gt_driver_remove(to_gt(i915));
 err_unlock:
        destroy_workqueue(i915->wq);
 err_drv:
        intel_region_ttm_device_fini(i915);
 err_ttm:
-       intel_gt_driver_late_release(&i915->gt);
+       intel_gt_driver_late_release(to_gt(i915));
        intel_memory_regions_driver_release(i915);
        drm_mode_config_cleanup(&i915->drm);
        mock_destroy_device(i915);
 
        if (!ppgtt)
                return NULL;
 
-       ppgtt->vm.gt = &i915->gt;
+       ppgtt->vm.gt = to_gt(i915);
        ppgtt->vm.i915 = i915;
        ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
        ppgtt->vm.dma = i915->drm.dev;
 {
        memset(ggtt, 0, sizeof(*ggtt));
 
-       ggtt->vm.gt = &i915->gt;
+       ggtt->vm.gt = to_gt(i915);
        ggtt->vm.i915 = i915;
        ggtt->vm.is_ggtt = true;
 
        ggtt->vm.vma_ops.clear_pages = clear_pages;
 
        i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-       i915->gt.ggtt = ggtt;
+       to_gt(i915)->ggtt = ggtt;
 }
 
 void mock_fini_ggtt(struct i915_ggtt *ggtt)
 
 void mock_uncore_init(struct intel_uncore *uncore,
                      struct drm_i915_private *i915)
 {
-       intel_uncore_init_early(uncore, &i915->gt);
+       intel_uncore_init_early(uncore, to_gt(i915));
 
        ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
        ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);