INIT_LIST_HEAD(>->tlb_invalidation.pending_fences);
        spin_lock_init(>->tlb_invalidation.pending_lock);
        spin_lock_init(>->tlb_invalidation.lock);
-       gt->tlb_invalidation.fence_context = dma_fence_context_alloc(1);
        INIT_DELAYED_WORK(>->tlb_invalidation.fence_tdr,
                          xe_gt_tlb_fence_timeout);
 
 
                 * xe_gt_tlb_fence_timeout after the timeut interval is over.
                 */
                struct delayed_work fence_tdr;
-               /** @tlb_invalidation.fence_context: context for TLB invalidation fences */
-               u64 fence_context;
-               /**
-                * @tlb_invalidation.fence_seqno: seqno to TLB invalidation fences, protected by
-                * tlb_invalidation.lock
-                */
-               u32 fence_seqno;
                /** @tlb_invalidation.lock: protects TLB invalidation fences */
                spinlock_t lock;
        } tlb_invalidation;
 
        spin_lock_irq(>->tlb_invalidation.lock);
        dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
                       >->tlb_invalidation.lock,
-                      gt->tlb_invalidation.fence_context,
-                      ++gt->tlb_invalidation.fence_seqno);
+                      dma_fence_context_alloc(1), 1);
        spin_unlock_irq(>->tlb_invalidation.lock);
 
        INIT_LIST_HEAD(&ifence->base.link);