}
 }
 
-static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
-{
-       if (!wa_ctx->indirect_ctx.obj)
-               return;
-
-       i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
-       i915_gem_object_put(wa_ctx->indirect_ctx.obj);
-}
-
 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
 
        return 0;
 }
 
+void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+       if (!wa_ctx->indirect_ctx.obj)
+               return;
+
+       i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+       i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+}
+
 /**
  * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
  * shadow it as well, include ringbuffer,wa_ctx and ctx.
 
        ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
        if (ret)
-               goto out;
+               goto err_scan;
 
        if ((workload->ring_id == RCS) &&
            (workload->wa_ctx.indirect_ctx.size != 0)) {
                ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
                if (ret)
-                       goto out;
+                       goto err_scan;
        }
 
        /* pin shadow context by gvt even the shadow context will be pinned
        if (IS_ERR(ring)) {
                ret = PTR_ERR(ring);
                gvt_vgpu_err("fail to pin shadow context\n");
-               goto out;
+               goto err_shadow;
        }
 
        ret = populate_shadow_context(workload);
        if (ret)
-               goto out;
+               goto err_unpin;
 
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
                gvt_vgpu_err("fail to allocate gem request\n");
                ret = PTR_ERR(rq);
-               goto out;
+               goto err_unpin;
        }
 
        gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
        workload->req = i915_gem_request_get(rq);
        ret = copy_workload_to_ring_buffer(workload);
        if (ret)
-               goto out;
+               goto err_unpin;
        workload->shadowed = true;
+       return 0;
 
-out:
+err_unpin:
+       engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+       release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
        return ret;
 }