if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
                struct intel_timeline *timeline;
 
-               timeline = intel_timeline_create(&i915->gt, NULL);
+               timeline = intel_timeline_create(&i915->gt);
                if (IS_ERR(timeline)) {
                        context_close(ctx);
                        return ERR_CAST(timeline);
 
 }
 
 static struct intel_context *
-create_kernel_context(struct intel_engine_cs *engine)
+create_pinned_context(struct intel_engine_cs *engine,
+                     unsigned int hwsp,
+                     struct lock_class_key *key,
+                     const char *name)
 {
-       static struct lock_class_key kernel;
        struct intel_context *ce;
        int err;
 
                return ce;
 
        __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+       ce->timeline = page_pack_bits(NULL, hwsp);
 
        err = intel_context_pin(ce); /* perma-pin so it is always available */
        if (err) {
         * should we need to inject GPU operations during their request
         * construction.
         */
-       lockdep_set_class(&ce->timeline->mutex, &kernel);
+       lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
 
        return ce;
 }
 
+static struct intel_context *
+create_kernel_context(struct intel_engine_cs *engine)
+{
+       static struct lock_class_key kernel;
+
+       return create_pinned_context(engine, I915_GEM_HWS_SEQNO_ADDR,
+                                    &kernel, "kernel_context");
+}
+
 /**
  * intel_engines_init_common - initialize cengine state which might require hw access
  * @engine: Engine to initialize.
 
        return 0;
 }
 
+static struct intel_timeline *pinned_timeline(struct intel_context *ce)
+{
+       struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
+
+       return intel_timeline_create_from_engine(ce->engine,
+                                                page_unmask_bits(tl));
+}
+
 static int __execlists_context_alloc(struct intel_context *ce,
                                     struct intel_engine_cs *engine)
 {
                goto error_deref_obj;
        }
 
-       if (!ce->timeline) {
+       if (!page_mask_bits(ce->timeline)) {
                struct intel_timeline *tl;
-               struct i915_vma *hwsp;
 
                /*
                 * Use the static global HWSP for the kernel context, and
                 * a dynamically allocated cacheline for everyone else.
                 */
-               hwsp = NULL;
-               if (unlikely(intel_context_is_barrier(ce)))
-                       hwsp = engine->status_page.vma;
-
-               tl = intel_timeline_create(engine->gt, hwsp);
+               if (unlikely(ce->timeline))
+                       tl = pinned_timeline(ce);
+               else
+                       tl = intel_timeline_create(engine->gt);
                if (IS_ERR(tl)) {
                        ret = PTR_ERR(tl);
                        goto error_deref_obj;
 
                return -ENODEV;
        }
 
-       timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
+       timeline = intel_timeline_create_from_engine(engine,
+                                                    I915_GEM_HWS_SEQNO_ADDR);
        if (IS_ERR(timeline)) {
                err = PTR_ERR(timeline);
                goto err;
 
 
 static int intel_timeline_init(struct intel_timeline *timeline,
                               struct intel_gt *gt,
-                              struct i915_vma *hwsp)
+                              struct i915_vma *hwsp,
+                              unsigned int offset)
 {
        void *vaddr;
 
 
                vaddr = page_mask_bits(cl->vaddr);
        } else {
-               timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
-
+               timeline->hwsp_offset = offset;
                vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
                if (IS_ERR(vaddr))
                        return PTR_ERR(vaddr);
 }
 
 struct intel_timeline *
-intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
+__intel_timeline_create(struct intel_gt *gt,
+                       struct i915_vma *global_hwsp,
+                       unsigned int offset)
 {
        struct intel_timeline *timeline;
        int err;
        if (!timeline)
                return ERR_PTR(-ENOMEM);
 
-       err = intel_timeline_init(timeline, gt, global_hwsp);
+       err = intel_timeline_init(timeline, gt, global_hwsp, offset);
        if (err) {
                kfree(timeline);
                return ERR_PTR(err);
 
 
 #include "i915_active.h"
 #include "i915_syncmap.h"
-#include "gt/intel_timeline_types.h"
+#include "intel_timeline_types.h"
 
 struct intel_timeline *
-intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
+__intel_timeline_create(struct intel_gt *gt,
+                       struct i915_vma *global_hwsp,
+                       unsigned int offset);
+
+static inline struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt)
+{
+       return __intel_timeline_create(gt, NULL, 0);
+}
+
+static inline struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+                                 unsigned int offset)
+{
+       return __intel_timeline_create(engine->gt,
+                                      engine->status_page.vma,
+                                      offset);
+}
 
 static inline struct intel_timeline *
 intel_timeline_get(struct intel_timeline *timeline)
 
                return -ENOMEM;
 
        GEM_BUG_ON(ce->timeline);
-       ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
+       ce->timeline = intel_timeline_create(ce->engine->gt);
        if (IS_ERR(ce->timeline)) {
                kfree(ce->engine);
                return PTR_ERR(ce->timeline);
 
                unsigned long cacheline;
                int err;
 
-               tl = intel_timeline_create(state->gt, NULL);
+               tl = intel_timeline_create(state->gt);
                if (IS_ERR(tl))
                        return PTR_ERR(tl);
 
 {
        struct intel_timeline *tl;
 
-       tl = intel_timeline_create(gt, NULL);
+       tl = intel_timeline_create(gt);
        if (IS_ERR(tl))
                return tl;
 
         * foreign GPU references.
         */
 
-       tl = intel_timeline_create(gt, NULL);
+       tl = intel_timeline_create(gt);
        if (IS_ERR(tl))
                return PTR_ERR(tl);