if (err)
                goto err_post_unpin;
 
+       intel_engine_pm_might_get(ce->engine);
+
        if (unlikely(intel_context_is_closed(ce))) {
                err = -ENOENT;
                goto err_unlock;
 
 #ifndef INTEL_ENGINE_PM_H
 #define INTEL_ENGINE_PM_H
 
+#include "i915_drv.h"
 #include "i915_request.h"
 #include "intel_engine_types.h"
 #include "intel_wakeref.h"
+#include "intel_gt_pm.h"
 
 static inline bool
 intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
        return intel_wakeref_get_if_active(&engine->wakeref);
 }
 
+static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
+{
+       if (!intel_engine_is_virtual(engine)) {
+               intel_wakeref_might_get(&engine->wakeref);
+       } else {
+               struct intel_gt *gt = engine->gt;
+               struct intel_engine_cs *tengine;
+               intel_engine_mask_t tmp, mask = engine->mask;
+
+               for_each_engine_masked(tengine, gt, mask, tmp)
+                       intel_wakeref_might_get(&tengine->wakeref);
+       }
+       intel_gt_pm_might_get(engine->gt);
+}
+
 static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
 {
        intel_wakeref_put(&engine->wakeref);
        intel_wakeref_unlock_wait(&engine->wakeref);
 }
 
+static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
+{
+       if (!intel_engine_is_virtual(engine)) {
+               intel_wakeref_might_put(&engine->wakeref);
+       } else {
+               struct intel_gt *gt = engine->gt;
+               struct intel_engine_cs *tengine;
+               intel_engine_mask_t tmp, mask = engine->mask;
+
+               for_each_engine_masked(tengine, gt, mask, tmp)
+                       intel_wakeref_might_put(&tengine->wakeref);
+       }
+       intel_gt_pm_might_put(engine->gt);
+}
+
 static inline struct i915_request *
 intel_engine_create_kernel_request(struct intel_engine_cs *engine)
 {
 
        return intel_wakeref_get_if_active(>->wakeref);
 }
 
+static inline void intel_gt_pm_might_get(struct intel_gt *gt)
+{
+       intel_wakeref_might_get(>->wakeref);
+}
+
 static inline void intel_gt_pm_put(struct intel_gt *gt)
 {
        intel_wakeref_put(>->wakeref);
        intel_wakeref_put_async(>->wakeref);
 }
 
+static inline void intel_gt_pm_might_put(struct intel_gt *gt)
+{
+       intel_wakeref_might_put(>->wakeref);
+}
+
 #define with_intel_gt_pm(gt, tmp) \
        for (tmp = 1, intel_gt_pm_get(gt); tmp; \
             intel_gt_pm_put(gt), tmp = 0)
 
 
 static int guc_context_pin(struct intel_context *ce, void *vaddr)
 {
-       return __guc_context_pin(ce, ce->engine, vaddr);
+       int ret = __guc_context_pin(ce, ce->engine, vaddr);
+
+       if (likely(!ret && !intel_context_is_barrier(ce)))
+               intel_engine_pm_get(ce->engine);
+
+       return ret;
 }
 
 static void guc_context_unpin(struct intel_context *ce)
 
        unpin_guc_id(guc, ce);
        lrc_unpin(ce);
+
+       if (likely(!intel_context_is_barrier(ce)))
+               intel_engine_pm_put_async(ce->engine);
 }
 
 static void guc_context_post_unpin(struct intel_context *ce)
 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
 {
        struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+       int ret = __guc_context_pin(ce, engine, vaddr);
+       intel_engine_mask_t tmp, mask = ce->engine->mask;
+
+       if (likely(!ret))
+               for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+                       intel_engine_pm_get(engine);
 
-       return __guc_context_pin(ce, engine, vaddr);
+       return ret;
+}
+
+static void guc_virtual_context_unpin(struct intel_context *ce)
+{
+       intel_engine_mask_t tmp, mask = ce->engine->mask;
+       struct intel_engine_cs *engine;
+       struct intel_guc *guc = ce_to_guc(ce);
+
+       GEM_BUG_ON(context_enabled(ce));
+       GEM_BUG_ON(intel_context_is_barrier(ce));
+
+       unpin_guc_id(guc, ce);
+       lrc_unpin(ce);
+
+       for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+               intel_engine_pm_put_async(engine);
 }
 
 static void guc_virtual_context_enter(struct intel_context *ce)
 
        .pre_pin = guc_virtual_context_pre_pin,
        .pin = guc_virtual_context_pin,
-       .unpin = guc_context_unpin,
+       .unpin = guc_virtual_context_unpin,
        .post_unpin = guc_context_post_unpin,
 
        .ban = guc_context_ban,
 
        __INTEL_WAKEREF_PUT_LAST_BIT__
 };
 
+static inline void
+intel_wakeref_might_get(struct intel_wakeref *wf)
+{
+       might_lock(&wf->mutex);
+}
+
 /**
  * intel_wakeref_put_flags: Release the wakeref
  * @wf: the wakeref
                            FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
 }
 
+static inline void
+intel_wakeref_might_put(struct intel_wakeref *wf)
+{
+       might_lock(&wf->mutex);
+}
+
 /**
  * intel_wakeref_lock: Lock the wakeref (mutex)
  * @wf: the wakeref