#include "i915_reg.h"
 #include "i915_request.h"
 #include "i915_selftest.h"
-#include "gt/intel_timeline.h"
 #include "intel_engine_types.h"
+#include "intel_gt_types.h"
+#include "intel_timeline.h"
 #include "intel_workarounds.h"
 
 struct drm_printer;
 #define ENGINE_MOCK    1
 #define ENGINE_VIRTUAL 2
 
+static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine)
+{
+       return engine->gt->submission_method >= INTEL_SUBMISSION_GUC;
+}
+
 static inline bool
 intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
 {
 
        enum intel_engine_id id;
        int err;
 
-       if (intel_uc_uses_guc_submission(>->uc))
+       if (intel_uc_uses_guc_submission(>->uc)) {
+               gt->submission_method = INTEL_SUBMISSION_GUC;
                setup = intel_guc_submission_setup;
-       else if (HAS_EXECLISTS(gt->i915))
+       } else if (HAS_EXECLISTS(gt->i915)) {
+               gt->submission_method = INTEL_SUBMISSION_ELSP;
                setup = intel_execlists_submission_setup;
-       else
+       } else {
+               gt->submission_method = INTEL_SUBMISSION_RING;
                setup = intel_ring_submission_setup;
+       }
 
        for_each_engine(engine, gt, id) {
                err = engine_setup_common(engine);
                drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
        }
 
-       if (intel_engine_in_guc_submission_mode(engine)) {
+       if (intel_engine_uses_guc(engine)) {
                /* nothing to print yet */
        } else if (HAS_EXECLISTS(dev_priv)) {
                struct i915_request * const *port, *rq;
 
         */
        GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
                   !reset_in_progress(execlists));
-       GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
 
        /*
         * Note that csb_write, csb_status may be either in HWSP or mmio.
        spin_unlock_irqrestore(&engine->active.lock, flags);
 }
 
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
-       return engine->set_default_submission ==
-              execlists_set_default_submission;
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftest_execlists.c"
 #endif
 
                                     const struct intel_engine_cs *master,
                                     const struct intel_engine_cs *sibling);
 
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
-
 #endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
 
 struct intel_engine_cs;
 struct intel_uncore;
 
+enum intel_submission_method {
+       INTEL_SUBMISSION_RING,
+       INTEL_SUBMISSION_ELSP,
+       INTEL_SUBMISSION_GUC,
+};
+
 struct intel_gt {
        struct drm_i915_private *i915;
        struct intel_uncore *uncore;
        struct intel_engine_cs *engine[I915_NUM_ENGINES];
        struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
                                            [MAX_ENGINE_INSTANCE + 1];
+       enum intel_submission_method submission_method;
 
        /*
         * Default address space (either GGTT or ppGTT depending on arch).
 
 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
 {
        struct intel_gt *gt = engine->gt;
-       bool uses_guc = intel_engine_in_guc_submission_mode(engine);
        int ret;
 
        ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
                           "Resetting %s for %s\n", engine->name, msg);
        atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
 
-       if (!uses_guc)
-               ret = intel_gt_reset_engine(engine);
-       else
+       if (intel_engine_uses_guc(engine))
                ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
+       else
+               ret = intel_gt_reset_engine(engine);
        if (ret) {
                /* If we fail here, we expect to fallback to a global reset */
                ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
 
                SUBTEST(live_virtual_reset),
        };
 
-       if (!HAS_EXECLISTS(i915))
+       if (i915->gt.submission_method != INTEL_SUBMISSION_ELSP)
                return 0;
 
        if (intel_gt_is_wedged(&i915->gt))
 
                SUBTEST(live_ctx_switch_wa),
        };
 
-       if (HAS_EXECLISTS(i915))
+       if (i915->gt.submission_method > INTEL_SUBMISSION_RING)
                return 0;
 
        return intel_gt_live_subtests(tests, &i915->gt);
 
 {
        guc->submission_selected = __guc_submission_selected(guc);
 }
-
-bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
-{
-       return engine->set_default_submission == guc_set_default_submission;
-}
 
 int intel_guc_preempt_work_create(struct intel_guc *guc);
 void intel_guc_preempt_work_destroy(struct intel_guc *guc);
 int intel_guc_submission_setup(struct intel_engine_cs *engine);
-bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
 
 static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
 {
 
        case 8:
        case 9:
        case 10:
-               if (intel_engine_in_execlists_submission_mode(ce->engine)) {
-                       stream->specific_ctx_id_mask =
-                               (1U << GEN8_CTX_ID_WIDTH) - 1;
-                       stream->specific_ctx_id = stream->specific_ctx_id_mask;
-               } else {
+               if (intel_engine_uses_guc(ce->engine)) {
                        /*
                         * When using GuC, the context descriptor we write in
                         * i915 is read by GuC and rewritten before it's
                         */
                        stream->specific_ctx_id_mask =
                                (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+               } else {
+                       stream->specific_ctx_id_mask =
+                               (1U << GEN8_CTX_ID_WIDTH) - 1;
+                       stream->specific_ctx_id = stream->specific_ctx_id_mask;
                }
                break;