.destroy = execlists_context_destroy,
 };
 
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+       const struct intel_timeline_cacheline *cl;
+
+       /* Before the request is executed, the timeline/cachline is fixed */
+
+       cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
+       if (cl)
+               return cl->ggtt_offset;
+
+       return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+}
+
 static int gen8_emit_init_breadcrumb(struct i915_request *rq)
 {
        u32 *cs;
        *cs++ = MI_NOOP;
 
        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-       *cs++ = i915_request_timeline(rq)->hwsp_offset;
+       *cs++ = hwsp_offset(rq);
        *cs++ = 0;
        *cs++ = rq->fence.seqno - 1;
 
        return gen8_emit_wa_tail(request, cs);
 }
 
-static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
 {
-       u32 addr = i915_request_active_timeline(request)->hwsp_offset;
-
-       return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
+       return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
 }
 
 static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
        /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
        cs = gen8_emit_ggtt_write_rcs(cs,
                                      request->fence.seqno,
-                                     i915_request_active_timeline(request)->hwsp_offset,
+                                     hwsp_offset(request),
                                      PIPE_CONTROL_FLUSH_ENABLE |
                                      PIPE_CONTROL_CS_STALL);
 
 {
        cs = gen8_emit_ggtt_write_rcs(cs,
                                      request->fence.seqno,
-                                     i915_request_active_timeline(request)->hwsp_offset,
+                                     hwsp_offset(request),
                                      PIPE_CONTROL_CS_STALL |
                                      PIPE_CONTROL_TILE_CACHE_FLUSH |
                                      PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
 {
        cs = gen12_emit_ggtt_write_rcs(cs,
                                       request->fence.seqno,
-                                      i915_request_active_timeline(request)->hwsp_offset,
+                                      hwsp_offset(request),
                                       PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
                                       PIPE_CONTROL_CS_STALL |
                                       PIPE_CONTROL_TILE_CACHE_FLUSH |
 
        return cl;
 }
 
-static void cacheline_acquire(struct intel_timeline_cacheline *cl)
+static void cacheline_acquire(struct intel_timeline_cacheline *cl,
+                             u32 ggtt_offset)
 {
-       if (cl)
-               i915_active_acquire(&cl->active);
+       if (!cl)
+               return;
+
+       cl->ggtt_offset = ggtt_offset;
+       i915_active_acquire(&cl->active);
 }
 
 static void cacheline_release(struct intel_timeline_cacheline *cl)
        GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
                 tl->fence_context, tl->hwsp_offset);
 
-       cacheline_acquire(tl->hwsp_cacheline);
+       cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
        if (atomic_fetch_inc(&tl->pin_count)) {
                cacheline_release(tl->hwsp_cacheline);
                __i915_vma_unpin(tl->hwsp_ggtt);
        GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
                 tl->fence_context, tl->hwsp_offset);
 
-       cacheline_acquire(cl);
+       cacheline_acquire(cl, tl->hwsp_offset);
        tl->hwsp_cacheline = cl;
 
        *seqno = timeline_advance(tl);
        if (err)
                goto out;
 
-       *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
-               ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
-
+       *hwsp = cl->ggtt_offset;
 out:
        i915_active_release(&cl->active);
        return err;