#define LRC_PPHWSP_SIZE                                SZ_4K
 #define LRC_INDIRECT_RING_STATE_SIZE           SZ_4K
+#define LRC_WA_BB_SIZE                         SZ_4K
 
 static struct xe_device *
 lrc_to_xe(struct xe_lrc *lrc)
 {
        xe_hw_fence_ctx_finish(&lrc->fence_ctx);
        xe_bo_unpin_map_no_vm(lrc->bo);
-       xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo);
 }
 
 /*
                         u32 *batch, size_t max_size);
 };
 
+static size_t wa_bb_offset(struct xe_lrc *lrc)
+{
+       return lrc->bo->size - LRC_WA_BB_SIZE;
+}
+
 static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
 {
-       const size_t max_size = lrc->bb_per_ctx_bo->size;
+       const size_t max_size = LRC_WA_BB_SIZE;
        static const struct wa_bb_setup funcs[] = {
                { .setup = wa_bb_setup_utilization },
        };
        ssize_t remain;
        u32 *cmd, *buf = NULL;
 
-       if (lrc->bb_per_ctx_bo->vmap.is_iomem) {
+       if (lrc->bo->vmap.is_iomem) {
                buf = kmalloc(max_size, GFP_KERNEL);
                if (!buf)
                        return -ENOMEM;
                cmd = buf;
        } else {
-               cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
+               cmd = lrc->bo->vmap.vaddr + wa_bb_offset(lrc);
        }
 
        remain = max_size / sizeof(*cmd);
        *cmd++ = MI_BATCH_BUFFER_END;
 
        if (buf) {
-               xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bb_per_ctx_bo->vmap, 0,
-                                buf, (cmd - buf) * sizeof(*cmd));
+               xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bo->vmap,
+                                wa_bb_offset(lrc), buf,
+                                (cmd - buf) * sizeof(*cmd));
                kfree(buf);
        }
 
-       xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
-                            xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1);
+       xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, xe_bo_ggtt_addr(lrc->bo) +
+                            wa_bb_offset(lrc) + 1);
 
        return 0;
 
         * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
         * via VM bind calls.
         */
-       lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, lrc_size,
+       lrc->bo = xe_bo_create_pin_map(xe, tile, NULL,
+                                      lrc_size + LRC_WA_BB_SIZE,
                                       ttm_bo_type_kernel,
                                       bo_flags);
        if (IS_ERR(lrc->bo))
                return PTR_ERR(lrc->bo);
 
-       lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
-                                                 ttm_bo_type_kernel,
-                                                 bo_flags);
-       if (IS_ERR(lrc->bb_per_ctx_bo)) {
-               err = PTR_ERR(lrc->bb_per_ctx_bo);
-               goto err_lrc_finish;
-       }
-
        lrc->size = lrc_size;
        lrc->ring.size = ring_size;
        lrc->ring.tail = 0;
        snapshot->seqno = xe_lrc_seqno(lrc);
        snapshot->lrc_bo = xe_bo_get(lrc->bo);
        snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
-       snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
+       snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset -
+               LRC_WA_BB_SIZE;
        snapshot->lrc_snapshot = NULL;
        snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
        snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);