trace_xe_vma_unbind(vma);
 
-       if (vma->ufence) {
-               struct xe_user_fence * const f = vma->ufence;
-
-               if (!xe_sync_ufence_get_status(f))
-                       return ERR_PTR(-EBUSY);
-
-               vma->ufence = NULL;
-               xe_sync_ufence_put(f);
-       }
-
        if (number_tiles > 1) {
                fences = kmalloc_array(number_tiles, sizeof(*fences),
                                       GFP_KERNEL);
        return err;
 }
 
+static int check_ufence(struct xe_vma *vma)
+{
+       if (vma->ufence) {
+               struct xe_user_fence * const f = vma->ufence;
+
+               if (!xe_sync_ufence_get_status(f))
+                       return -EBUSY;
+
+               vma->ufence = NULL;
+               xe_sync_ufence_put(f);
+       }
+
+       return 0;
+}
+
 static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
                            struct xe_vma_op *op)
 {
                                            op->map.immediate);
                break;
        case DRM_GPUVA_OP_REMAP:
+               err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
+               if (err)
+                       break;
+
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.remap.unmap->va),
                                            false);
                        err = vma_lock_and_validate(exec, op->remap.next, true);
                break;
        case DRM_GPUVA_OP_UNMAP:
+               err = check_ufence(gpuva_to_vma(op->base.unmap.va));
+               if (err)
+                       break;
+
                err = vma_lock_and_validate(exec,
                                            gpuva_to_vma(op->base.unmap.va),
                                            false);