offset.reg = regs[ring_id];
        for (i = 0; i < 64; i++) {
-               gen9_render_mocs[ring_id][i] = I915_READ(offset);
+               gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
                I915_WRITE(offset, vgpu_vreg(vgpu, offset));
                offset.reg += 4;
        }
        if (ring_id == RCS) {
                l3_offset.reg = 0xb020;
                for (i = 0; i < 32; i++) {
-                       gen9_render_mocs_L3[i] = I915_READ(l3_offset);
-                       I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
+                       gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
+                       I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
                        l3_offset.reg += 4;
                }
        }
 
        offset.reg = regs[ring_id];
        for (i = 0; i < 64; i++) {
-               vgpu_vreg(vgpu, offset) = I915_READ(offset);
-               I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
+               vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
+               I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
                offset.reg += 4;
        }
 
        if (ring_id == RCS) {
                l3_offset.reg = 0xb020;
                for (i = 0; i < 32; i++) {
-                       vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
-                       I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
+                       vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
+                       I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
                        l3_offset.reg += 4;
                }
        }
                if (mmio->ring_id != ring_id)
                        continue;
 
-               mmio->value = I915_READ(mmio->reg);
+               mmio->value = I915_READ_FW(mmio->reg);
 
                /*
                 * if it is an inhibit context, load in_context mmio
                else
                        v = vgpu_vreg(vgpu, mmio->reg);
 
-               I915_WRITE(mmio->reg, v);
+               I915_WRITE_FW(mmio->reg, v);
                last_reg = mmio->reg;
 
                trace_render_mmio(vgpu->id, "load",
 
        /* Make sure the swiched MMIOs has taken effect. */
        if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
-               POSTING_READ(last_reg);
+               I915_READ_FW(last_reg);
 
        handle_tlb_pending_event(vgpu, ring_id);
 }
                if (mmio->ring_id != ring_id)
                        continue;
 
-               vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+               vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
 
                if (mmio->mask) {
                        vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
                if (mmio->in_context)
                        continue;
 
-               I915_WRITE(mmio->reg, v);
+               I915_WRITE_FW(mmio->reg, v);
                last_reg = mmio->reg;
 
                trace_render_mmio(vgpu->id, "restore",
 
        /* Make sure the swiched MMIOs has taken effect. */
        if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
-               POSTING_READ(last_reg);
+               I915_READ_FW(last_reg);
 }
 
 /**
 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
                           struct intel_vgpu *next, int ring_id)
 {
+       struct drm_i915_private *dev_priv;
+
        if (WARN_ON(!pre && !next))
                return;
 
        gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
                       pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
 
+       dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+
+       /**
+        * We are using raw mmio access wrapper to improve the
+        * performace for batch mmio read/write, so we need
+        * handle forcewake mannually.
+        */
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
        /**
         * TODO: Optimize for vGPU to vGPU switch by merging
         * switch_mmio_to_host() and switch_mmio_to_vgpu().
 
        if (next)
                switch_mmio_to_vgpu(next, ring_id);
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }