return vma;
 }
 
+static u64 xe_vma_max_pte_size(struct xe_vma *vma)
+{
+       if (vma->gpuva.flags & XE_VMA_PTE_1G)
+               return SZ_1G;
+       else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+               return SZ_2M;
+
+       return SZ_4K;
+}
+
 /*
  * Parse operations list and create any resources needed for the operations
  * prior to fully committing to the operations. This setup can fail.
                                break;
                        }
                        case DRM_GPUVA_OP_REMAP:
+                       {
+                               struct xe_vma *old =
+                                       gpuva_to_vma(op->base.remap.unmap->va);
+
+                               op->remap.start = xe_vma_start(old);
+                               op->remap.range = xe_vma_size(old);
+
                                if (op->base.remap.prev) {
                                        struct xe_vma *vma;
                                        bool read_only =
                                        }
 
                                        op->remap.prev = vma;
+
+                                       /*
+                                        * Userptr creates a new SG mapping so
+                                        * we must also rebind.
+                                        */
+                                       op->remap.skip_prev = !xe_vma_is_userptr(old) &&
+                                               IS_ALIGNED(xe_vma_end(vma),
+                                                          xe_vma_max_pte_size(old));
+                                       if (op->remap.skip_prev) {
+                                               op->remap.range -=
+                                                       xe_vma_end(vma) -
+                                                       xe_vma_start(old);
+                                               op->remap.start = xe_vma_end(vma);
+                                       }
                                }
 
                                if (op->base.remap.next) {
                                        }
 
                                        op->remap.next = vma;
-                               }
 
-                               /* XXX: Support no doing remaps */
-                               op->remap.start =
-                                       xe_vma_start(gpuva_to_vma(op->base.remap.unmap->va));
-                               op->remap.range =
-                                       xe_vma_size(gpuva_to_vma(op->base.remap.unmap->va));
+                                       /*
+                                        * Userptr creates a new SG mapping so
+                                        * we must also rebind.
+                                        */
+                                       op->remap.skip_next = !xe_vma_is_userptr(old) &&
+                                               IS_ALIGNED(xe_vma_start(vma),
+                                                          xe_vma_max_pte_size(old));
+                                       if (op->remap.skip_next)
+                                               op->remap.range -=
+                                                       xe_vma_end(old) -
+                                                       xe_vma_start(vma);
+                               }
                                break;
+                       }
                        case DRM_GPUVA_OP_UNMAP:
                        case DRM_GPUVA_OP_PREFETCH:
                                /* Nothing to do */
        case DRM_GPUVA_OP_REMAP:
                prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
                                 true);
-               if (op->remap.prev)
+
+               if (op->remap.prev) {
                        err |= xe_vm_insert_vma(vm, op->remap.prev);
-               if (op->remap.next)
+                       if (!err && op->remap.skip_prev)
+                               op->remap.prev = NULL;
+               }
+               if (op->remap.next) {
                        err |= xe_vm_insert_vma(vm, op->remap.next);
+                       if (!err && op->remap.skip_next)
+                               op->remap.next = NULL;
+               }
+
+               /* Adjust for partial unbind after removin VMA from VM */
+               if (!err) {
+                       op->base.remap.unmap->va->va.addr = op->remap.start;
+                       op->base.remap.unmap->va->va.range = op->remap.range;
+               }
                break;
        case DRM_GPUVA_OP_UNMAP:
                prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
                bool next = !!op->remap.next;
 
                if (!op->remap.unmap_done) {
-                       vm->async_ops.munmap_rebind_inflight = true;
-                       if (prev || next)
+                       if (prev || next) {
+                               vm->async_ops.munmap_rebind_inflight = true;
                                vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
+                       }
                        err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
                                           op->num_syncs,
                                           !prev && !next ? op->fence : NULL,
 
 #define XE_VMA_ATOMIC_PTE_BIT  (DRM_GPUVA_USERBITS << 2)
 #define XE_VMA_FIRST_REBIND    (DRM_GPUVA_USERBITS << 3)
 #define XE_VMA_LAST_REBIND     (DRM_GPUVA_USERBITS << 4)
+#define XE_VMA_PTE_4K          (DRM_GPUVA_USERBITS << 5)
+#define XE_VMA_PTE_2M          (DRM_GPUVA_USERBITS << 6)
+#define XE_VMA_PTE_1G          (DRM_GPUVA_USERBITS << 7)
 
 struct xe_vma {
        /** @gpuva: Base GPUVA object */
        u64 start;
        /** @range: range of the VMA unmap */
        u64 range;
+       /** @skip_prev: skip prev rebind */
+       bool skip_prev;
+       /** @skip_next: skip next rebind */
+       bool skip_next;
        /** @unmap_done: unmap operation in done */
        bool unmap_done;
 };