#define SCHED_STATE_BLOCKED_SHIFT                      4
 #define SCHED_STATE_BLOCKED            BIT(SCHED_STATE_BLOCKED_SHIFT)
 #define SCHED_STATE_BLOCKED_MASK       (0xfff << SCHED_STATE_BLOCKED_SHIFT)
+
 static inline void init_sched_state(struct intel_context *ce)
 {
        /* Only should be called from guc_lrc_desc_pin() */
        return &ce->guc_blocked;
 }
 
+#define SCHED_STATE_MULTI_BLOCKED_MASK \
+       (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
+#define SCHED_STATE_NO_UNBLOCK \
+       (SCHED_STATE_MULTI_BLOCKED_MASK | \
+        SCHED_STATE_PENDING_DISABLE | \
+        SCHED_STATE_BANNED)
+
+static bool context_cant_unblock(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+
+       return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
+               context_guc_id_invalid(ce) ||
+               !lrc_desc_registered(ce_to_guc(ce), ce->guc_id) ||
+               !intel_context_is_pinned(ce);
+}
+
 static void guc_context_unblock(struct intel_context *ce)
 {
        struct intel_guc *guc = ce_to_guc(ce);
        spin_lock_irqsave(&ce->guc_state.lock, flags);
 
        if (unlikely(submission_disabled(guc) ||
-                    !intel_context_is_pinned(ce) ||
-                    context_pending_disable(ce) ||
-                    context_blocked(ce) > 1)) {
+                    context_cant_unblock(ce))) {
                enable = false;
        } else {
                enable = true;