err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
        if (!enabled && !err) {
+               trace_intel_context_sched_enable(ce);
                atomic_inc(&guc->outstanding_submission_g2h);
                set_context_enabled(ce);
        } else if (!enabled) {
        u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
                ce->guc_id * sizeof(struct guc_lrc_desc);
 
+       trace_intel_context_register(ce);
+
        return __guc_action_register_context(guc, ce->guc_id, offset);
 }
 
 {
        struct intel_guc *guc = ce_to_guc(ce);
 
+       trace_intel_context_deregister(ce);
+
        return __guc_action_deregister_context(guc, guc_id);
 }
 
         * registering this context.
         */
        if (context_registered) {
+               trace_intel_context_steal_guc_id(ce);
                set_context_wait_for_deregister_to_register(ce);
                intel_context_get(ce);
 
 
        GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
 
+       trace_intel_context_sched_disable(ce);
        intel_context_get(ce);
 
        guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
 
        lockdep_assert_held(&ce->guc_state.lock);
 
+       if (!list_empty(&ce->guc_state.fences))
+               trace_intel_context_fence_release(ce);
+
        list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
                i915_sw_fence_complete(&rq->submit);
 
        if (unlikely(!ce))
                return -EPROTO;
 
+       trace_intel_context_deregister_done(ce);
+
        if (context_wait_for_deregister_to_register(ce)) {
                struct intel_runtime_pm *runtime_pm =
                        &ce->engine->gt->i915->runtime_pm;
                return -EPROTO;
        }
 
+       trace_intel_context_sched_done(ce);
+
        if (context_pending_enable(ce)) {
                clr_context_pending_enable(ce);
        } else if (context_pending_disable(ce)) {
 
                              __entry->ctx, __entry->seqno, __entry->completed)
 );
 
+DECLARE_EVENT_CLASS(intel_context,
+                   TP_PROTO(struct intel_context *ce),
+                   TP_ARGS(ce),
+
+                   TP_STRUCT__entry(
+                            __field(u32, guc_id)
+                            __field(int, pin_count)
+                            __field(u32, sched_state)
+                            __field(u32, guc_sched_state_no_lock)
+                            ),
+
+                   TP_fast_assign(
+                          __entry->guc_id = ce->guc_id;
+                          __entry->pin_count = atomic_read(&ce->pin_count);
+                          __entry->sched_state = ce->guc_state.sched_state;
+                          __entry->guc_sched_state_no_lock =
+                          atomic_read(&ce->guc_sched_state_no_lock);
+                          ),
+
+                   TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x",
+                             __entry->guc_id, __entry->pin_count,
+                             __entry->sched_state,
+                             __entry->guc_sched_state_no_lock)
+);
+
+DEFINE_EVENT(intel_context, intel_context_register,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_deregister,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_deregister_done,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_sched_enable,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_sched_disable,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_sched_done,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_create,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_fence_release,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_free,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_steal_guc_id,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_do_pin,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_do_unpin,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
 #else
 #if !defined(TRACE_HEADER_MULTI_READ)
 static inline void
 trace_i915_request_out(struct i915_request *rq)
 {
 }
+
+static inline void
+trace_intel_context_register(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_deregister(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_deregister_done(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_sched_enable(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_sched_disable(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_sched_done(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_create(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_fence_release(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_free(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_steal_guc_id(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_do_pin(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_do_unpin(struct intel_context *ce)
+{
+}
 #endif
 #endif