struct list_head *q = workload_q_head(vgpu, ring_id);
        struct intel_vgpu_workload *last_workload = get_last_workload(q);
        struct intel_vgpu_workload *workload = NULL;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        u64 ring_context_gpa;
        u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
        int ret;
        workload->complete = complete_execlist_workload;
        workload->status = -EINPROGRESS;
        workload->emulate_schedule_in = emulate_schedule_in;
+       workload->shadowed = false;
 
        if (ring_id == RCS) {
                intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
                return ret;
        }
 
+       /* Only scan and shadow the first workload in the queue
+        * as there is only one pre-allocated buf-obj for shadow.
+        */
+       if (list_empty(workload_q_head(vgpu, ring_id))) {
+               mutex_lock(&dev_priv->drm.struct_mutex);
+               intel_gvt_scan_and_shadow_workload(workload);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
+       }
+
        queue_workload(workload);
        return 0;
 }
 
        struct intel_vgpu *vgpu = workload->vgpu;
        int ret;
 
+       if (workload->shadowed)
+               return 0;
+
        shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
        shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
                                    GEN8_CTX_ADDRESSING_MODE_SHIFT;
        }
 
        ret = populate_shadow_context(workload);
+       if (ret)
+               goto out;
+
+       workload->shadowed = true;
 
 out:
        return ret;