true, timeout);
 }
 
-static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
+static int try_context_registration(struct intel_context *ce, bool loop);
 
 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
 
                if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
                             !intel_context_is_banned(ce))) {
-                       ret = guc_lrc_desc_pin(ce, false);
+                       ret = try_context_registration(ce, false);
                        if (unlikely(ret == -EPIPE)) {
                                goto deadlk;
                        } else if (ret == -EBUSY) {
        desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
 }
 
-static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
+static void prepare_context_registration_info(struct intel_context *ce)
 {
        struct intel_engine_cs *engine = ce->engine;
-       struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
        struct intel_guc *guc = &engine->gt->uc.guc;
        u32 desc_idx = ce->guc_id.id;
        struct guc_lrc_desc *desc;
-       bool context_registered;
-       intel_wakeref_t wakeref;
        struct intel_context *child;
-       int ret = 0;
 
        GEM_BUG_ON(!engine->mask);
-       GEM_BUG_ON(!sched_state_is_init(ce));
 
        /*
         * Ensure LRC + CT vmas are is same region as write barrier is done
        GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
                   i915_gem_object_is_lmem(ce->ring->vma->obj));
 
-       context_registered = ctx_id_mapped(guc, desc_idx);
-
-       clr_ctx_id_mapping(guc, desc_idx);
-       set_ctx_id_mapping(guc, desc_idx, ce);
-
        desc = __get_lrc_desc(guc, desc_idx);
        desc->engine_class = engine_class_to_guc_class(engine->class);
        desc->engine_submit_mask = engine->logical_mask;
 
                clear_children_join_go_memory(ce);
        }
+}
+
+static int try_context_registration(struct intel_context *ce, bool loop)
+{
+       struct intel_engine_cs *engine = ce->engine;
+       struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
+       struct intel_guc *guc = &engine->gt->uc.guc;
+       intel_wakeref_t wakeref;
+       u32 desc_idx = ce->guc_id.id;
+       bool context_registered;
+       int ret = 0;
+
+       GEM_BUG_ON(!sched_state_is_init(ce));
+
+       context_registered = ctx_id_mapped(guc, desc_idx);
+
+       clr_ctx_id_mapping(guc, desc_idx);
+       set_ctx_id_mapping(guc, desc_idx, ce);
+
+       prepare_context_registration_info(ce);
 
        /*
         * The context_lookup xarray is used to determine if the hardware
        if (unlikely(ret < 0))
                return ret;
        if (context_needs_register(ce, !!ret)) {
-               ret = guc_lrc_desc_pin(ce, true);
+               ret = try_context_registration(ce, true);
                if (unlikely(ret)) {    /* unwind */
                        if (ret == -EPIPE) {
                                disable_submission(guc);
 static inline void guc_kernel_context_pin(struct intel_guc *guc,
                                          struct intel_context *ce)
 {
+       /*
+        * Note: we purposefully do not check the returns below because
+        * the registration can only fail if a reset is just starting.
+        * This is called at the end of reset so presumably another reset
+        * isn't happening and even it did this code would be run again.
+        */
+
        if (context_guc_id_invalid(ce))
                pin_guc_id(guc, ce);
-       guc_lrc_desc_pin(ce, true);
+
+       try_context_registration(ce, true);
 }
 
 static inline void guc_init_lrc_mapping(struct intel_guc *guc)
         * Also, after a reset the of the GuC we want to make sure that the
         * information shared with GuC is properly reset. The kernel LRCs are
         * not attached to the gem_context, so they need to be added separately.
-        *
-        * Note: we purposefully do not check the return of guc_lrc_desc_pin,
-        * because that function can only fail if a reset is just starting. This
-        * is at the end of reset so presumably another reset isn't happening
-        * and even it did this code would be run again.
         */
-
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;