vm = ctx->vm;
        if (!vm)
-               vm = &ctx->i915->ggtt.vm;
+               vm = &to_gt(ctx->i915)->ggtt->vm;
        vm = i915_vm_get(vm);
 
        return vm;
 
 {
        struct drm_i915_private *i915 =
                container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
-       return &i915->ggtt;
+       return to_gt(i915)->ggtt;
 }
 
 static void reloc_cache_unmap(struct reloc_cache *cache)
 
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *i915 = to_i915(dev);
        struct intel_runtime_pm *rpm = &i915->runtime_pm;
-       struct i915_ggtt *ggtt = &i915->ggtt;
+       struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
        bool write = area->vm_flags & VM_WRITE;
        struct i915_gem_ww_ctx ww;
        intel_wakeref_t wakeref;
        assert_rpm_wakelock_held(rpm);
 
        /* Mark as being mmapped into userspace for later revocation */
-       mutex_lock(&i915->ggtt.vm.mutex);
+       mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
        if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
-               list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
-       mutex_unlock(&i915->ggtt.vm.mutex);
+               list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
+       mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
 
        /* Track the mmo associated with the fenced vma */
        vma->mmo = mmo;
 
        if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
-               intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
+               intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
                                   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
 
        if (write) {
         * wakeref.
         */
        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-       mutex_lock(&i915->ggtt.vm.mutex);
+       mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
 
        if (!obj->userfault_count)
                goto out;
        wmb();
 
 out:
-       mutex_unlock(&i915->ggtt.vm.mutex);
+       mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
                          u32 handle,
                          u64 *offset)
 {
+       struct drm_i915_private *i915 = to_i915(dev);
        enum i915_mmap_type mmap_type;
 
        if (HAS_LMEM(to_i915(dev)))
                mmap_type = I915_MMAP_TYPE_FIXED;
        else if (pat_enabled())
                mmap_type = I915_MMAP_TYPE_WC;
-       else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
+       else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
                return -ENODEV;
        else
                mmap_type = I915_MMAP_TYPE_GTT;
 
        switch (args->flags) {
        case I915_MMAP_OFFSET_GTT:
-               if (!i915_ggtt_has_aperture(&i915->ggtt))
+               if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
                        return -ENODEV;
                type = I915_MMAP_TYPE_GTT;
                break;
 
 {
        GEM_TRACE("%s\n", dev_name(i915->drm.dev));
 
-       intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
+       intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
        flush_workqueue(i915->wq);
 
        /*
 
                                               I915_SHRINK_VMAPS);
 
        /* We also want to clear any cached iomaps as they wrap vmap */
-       mutex_lock(&i915->ggtt.vm.mutex);
+       mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
        list_for_each_entry_safe(vma, next,
-                                &i915->ggtt.vm.bound_list, vm_link) {
+                                &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
                unsigned long count = vma->node.size >> PAGE_SHIFT;
                struct drm_i915_gem_object *obj = vma->obj;
 
 
                i915_gem_object_unlock(obj);
        }
-       mutex_unlock(&i915->ggtt.vm.mutex);
+       mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
 
        *(unsigned long *)ptr += freed_pages;
        return NOTIFY_DONE;
 
 static int i915_adjust_stolen(struct drm_i915_private *i915,
                              struct resource *dsm)
 {
-       struct i915_ggtt *ggtt = &i915->ggtt;
+       struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
        struct intel_uncore *uncore = ggtt->vm.gt->uncore;
        struct resource *r;
 
 
 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 {
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct sg_table *pages =
                i915_pages_create_for_stolen(obj->base.dev,
                                             obj->stolen->start,
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
-       dbg_poison(&to_i915(obj->base.dev)->ggtt,
+       dbg_poison(to_gt(i915)->ggtt,
                   sg_dma_address(pages->sgl),
                   sg_dma_len(pages->sgl),
                   POISON_INUSE);
 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
                                             struct sg_table *pages)
 {
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        /* Should only be called from i915_gem_object_release_stolen() */
 
-       dbg_poison(&to_i915(obj->base.dev)->ggtt,
+       dbg_poison(to_gt(i915)->ggtt,
                   sg_dma_address(pages->sgl),
                   sg_dma_len(pages->sgl),
                   POISON_FREE);
 
 i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
                              int tiling_mode, unsigned int stride)
 {
-       struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
        struct i915_vma *vma, *vn;
        LIST_HEAD(unbind);
        int ret = 0;
        struct drm_i915_gem_object *obj;
        int err;
 
-       if (!dev_priv->ggtt.num_fences)
+       if (!to_gt(dev_priv)->ggtt->num_fences)
                return -EOPNOTSUPP;
 
        obj = i915_gem_object_lookup(file, args->handle);
                args->stride = 0;
        } else {
                if (args->tiling_mode == I915_TILING_X)
-                       args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
+                       args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
                else
-                       args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
+                       args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
 
                /* Hide bit 17 swizzling from the user.  This prevents old Mesa
                 * from aborting the application on sw fallbacks to bit 17,
        struct drm_i915_gem_object *obj;
        int err = -ENOENT;
 
-       if (!dev_priv->ggtt.num_fences)
+       if (!to_gt(dev_priv)->ggtt->num_fences)
                return -EOPNOTSUPP;
 
        rcu_read_lock();
 
        switch (args->tiling_mode) {
        case I915_TILING_X:
-               args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
+               args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
                break;
        case I915_TILING_Y:
-               args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
+               args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
                break;
        default:
        case I915_TILING_NONE:
 
 
 static bool bad_swizzling(struct drm_i915_private *i915)
 {
-       struct i915_ggtt *ggtt = &i915->ggtt;
+       struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
 
        if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
                return true;
 
                goto out_file;
        }
 
-       vm = ctx->vm ?: &i915->ggtt.alias->vm;
+       vm = ctx->vm ?: &to_gt(i915)->ggtt->alias->vm;
        if (!vm || !vm->has_read_only) {
                err = 0;
                goto out_file;
 
        int tiling;
        int err;
 
-       if (!i915_ggtt_has_aperture(&i915->ggtt))
+       if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
                return 0;
 
        /* We want to check the page mapping and fencing of a large object
 
        obj = huge_gem_object(i915,
                              nreal << PAGE_SHIFT,
-                             (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+                             (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
                tile.tiling = tiling;
                switch (tiling) {
                case I915_TILING_X:
-                       tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+                       tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
                        break;
                case I915_TILING_Y:
-                       tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+                       tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
                        break;
                }
 
        IGT_TIMEOUT(end);
        int err;
 
-       if (!i915_ggtt_has_aperture(&i915->ggtt))
+       if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
                return 0;
 
        /*
 
        obj = huge_gem_object(i915,
                              nreal << PAGE_SHIFT,
-                             (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+                             (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
                        break;
 
                case I915_TILING_X:
-                       tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+                       tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
                        break;
                case I915_TILING_Y:
-                       tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+                       tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
                        break;
                }
 
 
 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
 {
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        bool no_map;
 
        if (obj->ops->mmap_offset)
                return false;
 
        if (type == I915_MMAP_TYPE_GTT &&
-           !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
+           !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
                return false;
 
        i915_gem_object_lock(obj, NULL);
 
 
        obj = huge_gem_object(i915,
                              nreal * PAGE_SIZE,
-                             i915->ggtt.vm.total + PAGE_SIZE);
+                             to_gt(i915)->ggtt->vm.total + PAGE_SIZE);
        if (IS_ERR(obj))
                return PTR_ERR(obj);