}
 
 /**
- * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
+ * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
+ * address range
+ *
  * @gt: graphics tile
  * @fence: invalidation fence which will be signal on TLB invalidation
  * completion, can be NULL
- * @vma: VMA to invalidate
+ * @start: start address
+ * @end: end address
+ * @asid: address space id
  *
  * Issue a range based TLB invalidation if supported, if not fallback to a full
  * TLB invalidation. Completion of TLB is asynchronous and caller can either use
  * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
  * negative error code on error.
  */
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
-                              struct xe_gt_tlb_invalidation_fence *fence,
-                              struct xe_vma *vma)
+int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
+                                struct xe_gt_tlb_invalidation_fence *fence,
+                                u64 start, u64 end, u32 asid)
 {
        struct xe_device *xe = gt_to_xe(gt);
 #define MAX_TLB_INVALIDATION_LEN       7
        u32 action[MAX_TLB_INVALIDATION_LEN];
        int len = 0;
 
-       xe_gt_assert(gt, vma);
-
        /* Execlists not supported */
        if (gt_to_xe(gt)->info.force_execlist) {
                if (fence)
        if (!xe->info.has_range_tlb_invalidation) {
                action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
        } else {
-               u64 start = xe_vma_start(vma);
-               u64 length = xe_vma_size(vma);
+               u64 orig_start = start;
+               u64 length = end - start;
                u64 align, end;
 
                if (length < SZ_4K)
                 * address mask covering the required range.
                 */
                align = roundup_pow_of_two(length);
-               start = ALIGN_DOWN(xe_vma_start(vma), align);
-               end = ALIGN(xe_vma_end(vma), align);
+               start = ALIGN_DOWN(start, align);
+               end = ALIGN(end, align);
                length = align;
                while (start + length < end) {
                        length <<= 1;
-                       start = ALIGN_DOWN(xe_vma_start(vma), length);
+                       start = ALIGN_DOWN(orig_start, length);
                }
 
                /*
                 */
                if (length >= SZ_2M) {
                        length = max_t(u64, SZ_16M, length);
-                       start = ALIGN_DOWN(xe_vma_start(vma), length);
+                       start = ALIGN_DOWN(orig_start, length);
                }
 
                xe_gt_assert(gt, length >= SZ_4K);
                xe_gt_assert(gt, is_power_of_2(length));
-               xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)));
+               xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
+                                                   ilog2(SZ_2M) + 1)));
                xe_gt_assert(gt, IS_ALIGNED(start, length));
 
                action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
-               action[len++] = xe_vma_vm(vma)->usm.asid;
+               action[len++] = asid;
                action[len++] = lower_32_bits(start);
                action[len++] = upper_32_bits(start);
                action[len++] = ilog2(length) - ilog2(SZ_4K);
        return send_tlb_invalidation(>->uc.guc, fence, action, len);
 }
 
+/**
+ * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
+ * @gt: graphics tile
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion, can be NULL
+ * @vma: VMA to invalidate
+ *
+ * Issue a range based TLB invalidation if supported, if not fallback to a full
+ * TLB invalidation. Completion of TLB is asynchronous and caller can either use
+ * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
+ * completion.
+ *
+ * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
+ * negative error code on error.
+ */
+int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
+                              struct xe_gt_tlb_invalidation_fence *fence,
+                              struct xe_vma *vma)
+{
+       xe_gt_assert(gt, vma);
+
+       return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
+                                           xe_vma_end(vma),
+                                           xe_vma_vm(vma)->usm.asid);
+}
+
 /**
  * xe_gt_tlb_invalidation_wait - Wait for TLB to complete
  * @gt: graphics tile
 
 struct invalidation_fence {
        struct xe_gt_tlb_invalidation_fence base;
        struct xe_gt *gt;
-       struct xe_vma *vma;
        struct dma_fence *fence;
        struct dma_fence_cb cb;
        struct work_struct work;
+       u64 start;
+       u64 end;
+       u32 asid;
 };
 
 static const char *
                container_of(w, struct invalidation_fence, work);
 
        trace_xe_gt_tlb_invalidation_fence_work_func(&ifence->base);
-       xe_gt_tlb_invalidation_vma(ifence->gt, &ifence->base, ifence->vma);
+       xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
+                                    ifence->end, ifence->asid);
 }
 
 static int invalidation_fence_init(struct xe_gt *gt,
                                   struct invalidation_fence *ifence,
                                   struct dma_fence *fence,
-                                  struct xe_vma *vma)
+                                  u64 start, u64 end, u32 asid)
 {
        int ret;
 
        dma_fence_get(&ifence->base.base);      /* Ref for caller */
        ifence->fence = fence;
        ifence->gt = gt;
-       ifence->vma = vma;
+       ifence->start = start;
+       ifence->end = end;
+       ifence->asid = asid;
 
        INIT_WORK(&ifence->work, invalidation_fence_work_func);
        ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
 
                /* TLB invalidation must be done before signaling rebind */
                if (ifence) {
-                       int err = invalidation_fence_init(tile->primary_gt, ifence, fence,
-                                                         vma);
+                       int err = invalidation_fence_init(tile->primary_gt,
+                                                         ifence, fence,
+                                                         xe_vma_start(vma),
+                                                         xe_vma_end(vma),
+                                                         xe_vma_vm(vma)->usm.asid);
                        if (err) {
                                dma_fence_put(fence);
                                kfree(ifence);
                        dma_fence_wait(fence, false);
 
                /* TLB invalidation must be done before signaling unbind */
-               err = invalidation_fence_init(tile->primary_gt, ifence, fence, vma);
+               err = invalidation_fence_init(tile->primary_gt, ifence, fence,
+                                             xe_vma_start(vma),
+                                             xe_vma_end(vma),
+                                             xe_vma_vm(vma)->usm.asid);
                if (err) {
                        dma_fence_put(fence);
                        kfree(ifence);