}
 
 static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
-                                bool validate)
+                                bool res_evict, bool validate)
 {
        struct xe_bo *bo = xe_vma_bo(vma);
        struct xe_vm *vm = xe_vma_vm(vma);
                        err = drm_exec_lock_obj(exec, &bo->ttm.base);
                if (!err && validate)
                        err = xe_bo_validate(bo, vm,
-                                            !xe_vm_in_preempt_fence_mode(vm), exec);
+                                            !xe_vm_in_preempt_fence_mode(vm) &&
+                                            res_evict, exec);
        }
 
        return err;
 }
 
 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
-                           struct xe_vma_op *op)
+                           struct xe_vma_ops *vops, struct xe_vma_op *op)
 {
        int err = 0;
+       bool res_evict;
+
+       /*
+        * We only allow evicting a BO within the VM if it is not part of an
+        * array of binds, as an array of binds can evict another BO within the
+        * bind.
+        */
+       res_evict = !(vops->flags & XE_VMA_OPS_ARRAY_OF_BINDS);
 
        switch (op->base.op) {
        case DRM_GPUVA_OP_MAP:
                if (!op->map.invalidate_on_bind)
                        err = vma_lock_and_validate(exec, op->map.vma,
+                                                   res_evict,
                                                    !xe_vm_in_fault_mode(vm) ||
                                                    op->map.immediate);
                break;
 
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.remap.unmap->va),
-                                           false);
+                                           res_evict, false);
                if (!err && op->remap.prev)
-                       err = vma_lock_and_validate(exec, op->remap.prev, true);
+                       err = vma_lock_and_validate(exec, op->remap.prev,
+                                                   res_evict, true);
                if (!err && op->remap.next)
-                       err = vma_lock_and_validate(exec, op->remap.next, true);
+                       err = vma_lock_and_validate(exec, op->remap.next,
+                                                   res_evict, true);
                break;
        case DRM_GPUVA_OP_UNMAP:
                err = check_ufence(gpuva_to_vma(op->base.unmap.va));
 
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.unmap.va),
-                                           false);
+                                           res_evict, false);
                break;
        case DRM_GPUVA_OP_PREFETCH:
        {
 
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.prefetch.va),
-                                           false);
+                                           res_evict, false);
                if (!err && !xe_vma_has_no_bo(vma))
                        err = xe_bo_migrate(xe_vma_bo(vma),
                                            region_to_mem_type[region],
                return err;
 
        list_for_each_entry(op, &vops->list, link) {
-               err = op_lock_and_prep(exec, vm, op);
+               err = op_lock_and_prep(exec, vm, vops, op);
                if (err)
                        return err;
        }
        }
 
        xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
+       if (args->num_binds > 1)
+               vops.flags |= XE_VMA_OPS_ARRAY_OF_BINDS;
        for (i = 0; i < args->num_binds; ++i) {
                u64 range = bind_ops[i].range;
                u64 addr = bind_ops[i].addr;