struct execlist_context_status_pointer_format ctx_status_ptr;
        u32 write_pointer;
        u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
+       unsigned long hwsp_gpa;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
        ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
                        _EL_OFFSET_STATUS_PTR);
        ctx_status_ptr.write_ptr = write_pointer;
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 
+       /* Update the CSB and CSB write pointer in HWSP */
+       hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+                                        vgpu->hws_pga[ring_id]);
+       if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
+               intel_gvt_hypervisor_write_gpa(vgpu,
+                       hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
+                       write_pointer * 8,
+                       status, 8);
+               intel_gvt_hypervisor_write_gpa(vgpu,
+                       hwsp_gpa +
+                       intel_hws_csb_write_index(dev_priv) * 4,
+                       &write_pointer, 4);
+       }
+
        gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
                vgpu->id, write_pointer, offset, status->ldw, status->udw);
 
 
        return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
 }
 
+static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
+               void *p_data, unsigned int bytes)
+{
+       u32 value = *(u32 *)p_data;
+       int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+
+       if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
+               gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n",
+                             vgpu->id, offset, value);
+               return -EINVAL;
+       }
+       /*
+        * Need to emulate all the HWSP register write to ensure host can
+        * update the VM CSB status correctly. Here listed registers can
+        * support BDW, SKL or other platforms with same HWSP registers.
+        */
+       if (unlikely(ring_id < 0 || ring_id > I915_NUM_ENGINES)) {
+               gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
+                            vgpu->id, offset);
+               return -EINVAL;
+       }
+       vgpu->hws_pga[ring_id] = value;
+       gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
+                    vgpu->id, value, offset);
+
+       return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
+}
+
 static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
        MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
 #undef RING_REG
 
-       MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+       MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
 
        MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 
 
        vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
        vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
        vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+
        vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+       vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+
        vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
                vgpu_aperture_gmadr_base(vgpu);
        vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =