return intel_guc_slpc_is_used(guc);
 }
 
+static int guc_sched_disable_delay_ms_get(void *data, u64 *val)
+{
+       struct intel_guc *guc = data;
+
+       if (!intel_guc_submission_is_used(guc))
+               return -ENODEV;
+
+       *val = guc->submission_state.sched_disable_delay_ms;
+
+       return 0;
+}
+
+static int guc_sched_disable_delay_ms_set(void *data, u64 val)
+{
+       struct intel_guc *guc = data;
+
+       if (!intel_guc_submission_is_used(guc))
+               return -ENODEV;
+
+       guc->submission_state.sched_disable_delay_ms = val;
+
+       return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(guc_sched_disable_delay_ms_fops,
+                       guc_sched_disable_delay_ms_get,
+                       guc_sched_disable_delay_ms_set, "%lld\n");
+
+static int guc_sched_disable_gucid_threshold_get(void *data, u64 *val)
+{
+       struct intel_guc *guc = data;
+
+       if (!intel_guc_submission_is_used(guc))
+               return -ENODEV;
+
+       *val = guc->submission_state.sched_disable_gucid_threshold;
+       return 0;
+}
+
+static int guc_sched_disable_gucid_threshold_set(void *data, u64 val)
+{
+       struct intel_guc *guc = data;
+
+       if (!intel_guc_submission_is_used(guc))
+               return -ENODEV;
+
+       if (val > intel_guc_sched_disable_gucid_threshold_max(guc))
+               guc->submission_state.sched_disable_gucid_threshold =
+                       intel_guc_sched_disable_gucid_threshold_max(guc);
+       else
+               guc->submission_state.sched_disable_gucid_threshold = val;
+
+       return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(guc_sched_disable_gucid_threshold_fops,
+                       guc_sched_disable_gucid_threshold_get,
+                       guc_sched_disable_gucid_threshold_set, "%lld\n");
+
 void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
 {
        static const struct intel_gt_debugfs_file files[] = {
                { "guc_info", &guc_info_fops, NULL },
                { "guc_registered_contexts", &guc_registered_contexts_fops, NULL },
                { "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support},
+               { "guc_sched_disable_delay_ms", &guc_sched_disable_delay_ms_fops, NULL },
+               { "guc_sched_disable_gucid_threshold", &guc_sched_disable_gucid_threshold_fops,
+                  NULL },
        };
 
        if (!intel_guc_is_supported(guc))
 
  * corresponding G2H returns indicating the scheduling disable operation has
  * completed it is safe to unpin the context. While a disable is in flight it
  * isn't safe to resubmit the context so a fence is used to stall all future
- * requests of that context until the G2H is returned.
+ * requests of that context until the G2H is returned. Because this interaction
+ * with the GuC takes a non-zero amount of time we delay the disabling of
+ * scheduling after the pin count goes to zero by a configurable period of time
+ * (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of
+ * time to resubmit something on the context before doing this costly operation.
+ * This delay is only done if the context isn't closed and the guc_id usage is
+ * less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD).
  *
  * Context deregistration:
  * Before a context can be destroyed or if we steal its guc_id we must
        if (unlikely(ret < 0))
                return ret;
 
+       if (!intel_context_is_parent(ce))
+               ++guc->submission_state.guc_ids_in_use;
+
        ce->guc_id.id = ret;
        return 0;
 }
        GEM_BUG_ON(intel_context_is_child(ce));
 
        if (!context_guc_id_invalid(ce)) {
-               if (intel_context_is_parent(ce))
+               if (intel_context_is_parent(ce)) {
                        bitmap_release_region(guc->submission_state.guc_ids_bitmap,
                                              ce->guc_id.id,
                                              order_base_2(ce->parallel.number_children
                                                           + 1));
-               else
+               } else {
+                       --guc->submission_state.guc_ids_in_use;
                        ida_simple_remove(&guc->submission_state.guc_ids,
                                          ce->guc_id.id);
+               }
                clr_ctx_id_mapping(guc, ce->guc_id.id);
                set_context_guc_id_invalid(ce);
        }
        }
 }
 
-static void guc_context_sched_disable(struct intel_context *ce)
+static void guc_context_sched_disable(struct intel_context *ce);
+
+static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
+                            unsigned long flags)
+       __releases(ce->guc_state.lock)
 {
-       struct intel_guc *guc = ce_to_guc(ce);
-       unsigned long flags;
        struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
        intel_wakeref_t wakeref;
-       u16 guc_id;
 
+       lockdep_assert_held(&ce->guc_state.lock);
+
+       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+       with_intel_runtime_pm(runtime_pm, wakeref)
+               guc_context_sched_disable(ce);
+}
+
+static bool bypass_sched_disable(struct intel_guc *guc,
+                                struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
        GEM_BUG_ON(intel_context_is_child(ce));
 
+       if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
+           !ctx_id_mapped(guc, ce->guc_id.id)) {
+               clr_context_enabled(ce);
+               return true;
+       }
+
+       return !context_enabled(ce);
+}
+
+static void __delay_sched_disable(struct work_struct *wrk)
+{
+       struct intel_context *ce =
+               container_of(wrk, typeof(*ce), guc_state.sched_disable_delay.work);
+       struct intel_guc *guc = ce_to_guc(ce);
+       unsigned long flags;
+
        spin_lock_irqsave(&ce->guc_state.lock, flags);
 
-       /*
-        * We have to check if the context has been disabled by another thread,
-        * check if submssion has been disabled to seal a race with reset and
-        * finally check if any more requests have been committed to the
-        * context ensursing that a request doesn't slip through the
-        * 'context_pending_disable' fence.
-        */
-       if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
-                    context_has_committed_requests(ce))) {
-               clr_context_enabled(ce);
+       if (bypass_sched_disable(guc, ce)) {
                spin_unlock_irqrestore(&ce->guc_state.lock, flags);
-               goto unpin;
+               intel_context_sched_disable_unpin(ce);
+       } else {
+               do_sched_disable(guc, ce, flags);
        }
-       guc_id = prep_context_pending_disable(ce);
+}
 
-       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
+{
+       /*
+        * parent contexts are perma-pinned, if we are unpinning do schedule
+        * disable immediately.
+        */
+       if (intel_context_is_parent(ce))
+               return true;
 
-       with_intel_runtime_pm(runtime_pm, wakeref)
-               __guc_context_sched_disable(guc, ce, guc_id);
+       /*
+        * If we are beyond the threshold for avail guc_ids, do schedule disable immediately.
+        */
+       return guc->submission_state.guc_ids_in_use >
+               guc->submission_state.sched_disable_gucid_threshold;
+}
+
+static void guc_context_sched_disable(struct intel_context *ce)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+       u64 delay = guc->submission_state.sched_disable_delay_ms;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+       if (bypass_sched_disable(guc, ce)) {
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+               intel_context_sched_disable_unpin(ce);
+       } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
+                  delay) {
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+               mod_delayed_work(system_unbound_wq,
+                                &ce->guc_state.sched_disable_delay,
+                                msecs_to_jiffies(delay));
+       } else {
+               do_sched_disable(guc, ce, flags);
+       }
+}
 
-       return;
-unpin:
-       intel_context_sched_disable_unpin(ce);
+static void guc_context_close(struct intel_context *ce)
+{
+       if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
+           cancel_delayed_work(&ce->guc_state.sched_disable_delay))
+               __delay_sched_disable(&ce->guc_state.sched_disable_delay.work);
 }
 
 static inline void guc_lrc_desc_unpin(struct intel_context *ce)
 static const struct intel_context_ops guc_context_ops = {
        .alloc = guc_context_alloc,
 
+       .close = guc_context_close,
+
        .pre_pin = guc_context_pre_pin,
        .pin = guc_context_pin,
        .unpin = guc_context_unpin,
        rcu_read_unlock();
 
        ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
+
+       INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay,
+                         __delay_sched_disable);
+
        set_bit(CONTEXT_GUC_INIT, &ce->flags);
 }
 
        if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
                guc_context_init(ce);
 
+       if (cancel_delayed_work(&ce->guc_state.sched_disable_delay))
+               intel_context_sched_disable_unpin(ce);
+
        /*
         * Call pin_guc_id here rather than in the pinning step as with
         * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
 static const struct intel_context_ops virtual_guc_context_ops = {
        .alloc = guc_virtual_context_alloc,
 
+       .close = guc_context_close,
+
        .pre_pin = guc_virtual_context_pre_pin,
        .pin = guc_virtual_context_pin,
        .unpin = guc_virtual_context_unpin,
 static const struct intel_context_ops virtual_parent_context_ops = {
        .alloc = guc_virtual_context_alloc,
 
+       .close = guc_context_close,
+
        .pre_pin = guc_context_pre_pin,
        .pin = guc_parent_context_pin,
        .unpin = guc_parent_context_unpin,
        return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
 }
 
+int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc)
+{
+       return guc->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc);
+}
+
+/*
+ * This default value of 33 milisecs (+1 milisec round up) ensures 30fps or higher
+ * workloads are able to enjoy the latency reduction when delaying the schedule-disable
+ * operation. This matches the 30fps game-render + encode (real world) workload this
+ * knob was tested against.
+ */
+#define SCHED_DISABLE_DELAY_MS 34
+
+/*
+ * A threshold of 75% is a reasonable starting point considering that real world apps
+ * generally don't get anywhere near this.
+ */
+#define NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(__guc) \
+       (((intel_guc_sched_disable_gucid_threshold_max(guc)) * 3) / 4)
+
 void intel_guc_submission_init_early(struct intel_guc *guc)
 {
        xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
        spin_lock_init(&guc->timestamp.lock);
        INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
 
+       guc->submission_state.sched_disable_delay_ms = SCHED_DISABLE_DELAY_MS;
        guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
+       guc->submission_state.sched_disable_gucid_threshold =
+               NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(guc);
        guc->submission_supported = __guc_submission_supported(guc);
        guc->submission_selected = __guc_submission_selected(guc);
 }