reg.addr += hwe->mmio_base;
 
-       xe_mmio_write32(hwe->gt, reg, val);
+       xe_mmio_write32(&hwe->gt->mmio, reg, val);
 }
 
 /**
 
        reg.addr += hwe->mmio_base;
 
-       return xe_mmio_read32(hwe->gt, reg);
+       return xe_mmio_read32(&hwe->gt->mmio, reg);
 }
 
 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
                xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
 
        if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
-               xe_mmio_write32(hwe->gt, RCU_MODE,
+               xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
                                _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
 
        xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
            hwe->class != XE_ENGINE_CLASS_RENDER)
                return false;
 
-       return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
+       return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
 }
 
 void
 
        xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
 
-       media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE);
+       media_fuse = xe_mmio_read32(>->mmio, GT_VEBOX_VDBOX_DISABLE);
 
        /*
         * Pre-Xe_HP platforms had register bits representing absent engines,
 
        xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
 
-       bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3);
+       bcs_mask = xe_mmio_read32(>->mmio, MIRROR_FUSE3);
        bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
 
        /* BCS0 is always present; only BCS1-BCS8 may be fused off */
        struct xe_device *xe = gt_to_xe(gt);
        u32 ccs_mask;
 
-       ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4);
+       ccs_mask = xe_mmio_read32(>->mmio, XEHP_FUSE4);
        ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
 
        for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
                gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
 
                /* interrupts where previously enabled, so turn them off */
-               xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, 0);
-               xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~0);
+               xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_ENABLE, 0);
+               xe_mmio_write32(>->mmio, GUNIT_GSC_INTR_MASK, ~0);
 
                drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
        }
                                       struct xe_hw_engine_snapshot *snapshot)
 {
        struct xe_gt *gt = hwe->gt;
+       struct xe_mmio *mmio = >->mmio;
        struct xe_device *xe = gt_to_xe(gt);
        unsigned int dss;
        u16 group, instance;
 
        if (is_slice_common_per_gslice(xe) == false) {
                snapshot->reg.instdone.slice_common[0] =
-                       xe_mmio_read32(gt, SC_INSTDONE);
+                       xe_mmio_read32(mmio, SC_INSTDONE);
                snapshot->reg.instdone.slice_common_extra[0] =
-                       xe_mmio_read32(gt, SC_INSTDONE_EXTRA);
+                       xe_mmio_read32(mmio, SC_INSTDONE_EXTRA);
                snapshot->reg.instdone.slice_common_extra2[0] =
-                       xe_mmio_read32(gt, SC_INSTDONE_EXTRA2);
+                       xe_mmio_read32(mmio, SC_INSTDONE_EXTRA2);
        } else {
                for_each_geometry_dss(dss, gt, group, instance) {
                        snapshot->reg.instdone.slice_common[dss] =
        xe_hw_engine_snapshot_instdone_capture(hwe, snapshot);
 
        if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
-               snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE);
+               snapshot->reg.rcu_mode = xe_mmio_read32(&hwe->gt->mmio, RCU_MODE);
 
        return snapshot;
 }
 
 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
 {
-       return xe_mmio_read64_2x32(hwe->gt, RING_TIMESTAMP(hwe->mmio_base));
+       return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
 }
 
 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)