struct intel_engine_cs *engine = request->engine;
        struct i915_gem_active *active, *next;
 
-       GEM_TRACE("%s fence %llx:%d, global_seqno %d, current %d\n",
+       GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
                  engine->name,
                  request->fence.context, request->fence.seqno,
                  request->global_seqno,
        struct intel_engine_cs *engine = request->engine;
        u32 seqno;
 
-       GEM_TRACE("%s fence %llx:%d -> global_seqno %d, current %d\n",
+       GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
                  engine->name,
                  request->fence.context, request->fence.seqno,
                  engine->timeline->seqno + 1,
 {
        struct intel_engine_cs *engine = request->engine;
 
-       GEM_TRACE("%s fence %llx:%d <- global_seqno %d, current %d\n",
+       GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
                  engine->name,
                  request->fence.context, request->fence.seqno,
                  request->global_seqno,
 
                        desc = execlists_update_context(rq);
                        GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
 
-                       GEM_TRACE("%s in[%d]:  ctx=%d.%d, seqno=%d (current %d), prio=%d\n",
+                       GEM_TRACE("%s in[%d]:  ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
                                  engine->name, n,
                                  port[n].context_id, count,
                                  rq->global_seqno,
+                                 rq->fence.context, rq->fence.seqno,
                                  intel_engine_get_seqno(engine),
                                  rq_prio(rq));
                } else {
        while (num_ports-- && port_isset(port)) {
                struct i915_request *rq = port_request(port);
 
+               GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n",
+                         rq->engine->name,
+                         (unsigned int)(port - execlists->port),
+                         rq->global_seqno,
+                         rq->fence.context, rq->fence.seqno,
+                         intel_engine_get_seqno(rq->engine));
+
                GEM_BUG_ON(!execlists->active);
                intel_engine_context_out(rq->engine);
 
        struct rb_node *rb;
        unsigned long flags;
 
-       GEM_TRACE("%s\n", engine->name);
+       GEM_TRACE("%s current %d\n",
+                 engine->name, intel_engine_get_seqno(engine));
 
        /*
         * Before we call engine->cancel_requests(), we should have exclusive
                                                        EXECLISTS_ACTIVE_USER));
 
                        rq = port_unpack(port, &count);
-                       GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%d (current %d), prio=%d\n",
+                       GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
                                  engine->name,
                                  port->context_id, count,
                                  rq ? rq->global_seqno : 0,
+                                 rq ? rq->fence.context : 0,
+                                 rq ? rq->fence.seqno : 0,
                                  intel_engine_get_seqno(engine),
                                  rq ? rq_prio(rq) : 0);
 
        struct intel_context *ce;
        unsigned long flags;
 
-       GEM_TRACE("%s seqno=%x\n",
-                 engine->name, request ? request->global_seqno : 0);
+       GEM_TRACE("%s request global=%x, current=%d\n",
+                 engine->name, request ? request->global_seqno : 0,
+                 intel_engine_get_seqno(engine));
 
        /* See execlists_cancel_requests() for the irq/spinlock split. */
        local_irq_save(flags);