}
 
 /**
- * dma_resv_reserve_shared - Reserve space to add shared fences to
+ * dma_resv_reserve_fences - Reserve space to add shared fences to
  * a dma_resv.
  * @obj: reservation object
  * @num_fences: number of fences we want to add
  * RETURNS
  * Zero for success, or -errno
  */
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
 {
        struct dma_resv_list *old, *new;
        unsigned int i, j, k, max;
 
        return 0;
 }
-EXPORT_SYMBOL(dma_resv_reserve_shared);
+EXPORT_SYMBOL(dma_resv_reserve_fences);
 
 #ifdef CONFIG_DEBUG_MUTEXES
 /**
  * @obj: the dma_resv object to reset
  *
  * Reset the number of pre-reserved shared slots to test that drivers do
- * correct slot allocation using dma_resv_reserve_shared(). See also
+ * correct slot allocation using dma_resv_reserve_fences(). See also
  * &dma_resv_list.shared_max.
  */
 void dma_resv_reset_shared_max(struct dma_resv *obj)
  * @fence: the shared fence to add
  *
  * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
- * dma_resv_reserve_shared() has been called.
+ * dma_resv_reserve_fences() has been called.
  *
  * See also &dma_resv.fence for a discussion of the semantics.
  */
 
                goto err_free;
        }
 
-       if (shared) {
-               r = dma_resv_reserve_shared(&resv, 1);
-               if (r) {
-                       pr_err("Resv shared slot allocation failed\n");
-                       goto err_unlock;
-               }
+       r = dma_resv_reserve_fences(&resv, 1);
+       if (r) {
+               pr_err("Resv shared slot allocation failed\n");
+               goto err_unlock;
+       }
 
+       if (shared)
                dma_resv_add_shared_fence(&resv, f);
-       } else {
+       else
                dma_resv_add_excl_fence(&resv, f);
-       }
 
        if (dma_resv_test_signaled(&resv, shared)) {
                pr_err("Resv unexpectedly signaled\n");
                goto err_free;
        }
 
-       if (shared) {
-               r = dma_resv_reserve_shared(&resv, 1);
-               if (r) {
-                       pr_err("Resv shared slot allocation failed\n");
-                       goto err_unlock;
-               }
+       r = dma_resv_reserve_fences(&resv, 1);
+       if (r) {
+               pr_err("Resv shared slot allocation failed\n");
+               goto err_unlock;
+       }
 
+       if (shared)
                dma_resv_add_shared_fence(&resv, f);
-       } else {
+       else
                dma_resv_add_excl_fence(&resv, f);
-       }
 
        r = -ENOENT;
        dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
                goto err_free;
        }
 
-       if (shared) {
-               r = dma_resv_reserve_shared(&resv, 1);
-               if (r) {
-                       pr_err("Resv shared slot allocation failed\n");
-                       dma_resv_unlock(&resv);
-                       goto err_free;
-               }
+       r = dma_resv_reserve_fences(&resv, 1);
+       if (r) {
+               pr_err("Resv shared slot allocation failed\n");
+               dma_resv_unlock(&resv);
+               goto err_free;
+       }
 
+       if (shared)
                dma_resv_add_shared_fence(&resv, f);
-       } else {
+       else
                dma_resv_add_excl_fence(&resv, f);
-       }
        dma_resv_unlock(&resv);
 
        r = -ENOENT;
                goto err_resv;
        }
 
-       if (shared) {
-               r = dma_resv_reserve_shared(&resv, 1);
-               if (r) {
-                       pr_err("Resv shared slot allocation failed\n");
-                       dma_resv_unlock(&resv);
-                       goto err_resv;
-               }
+       r = dma_resv_reserve_fences(&resv, 1);
+       if (r) {
+               pr_err("Resv shared slot allocation failed\n");
+               dma_resv_unlock(&resv);
+               goto err_resv;
+       }
 
+       if (shared)
                dma_resv_add_shared_fence(&resv, f);
-       } else {
+       else
                dma_resv_add_excl_fence(&resv, f);
-       }
        dma_resv_unlock(&resv);
 
        r = dma_resv_get_fences(&resv, shared, &i, &fences);
 
                                  AMDGPU_FENCE_OWNER_KFD, false);
        if (ret)
                goto wait_pd_fail;
-       ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
+       ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
        if (ret)
                goto reserve_shared_fail;
        amdgpu_bo_fence(vm->root.bo,
         * Add process eviction fence to bo so they can
         * evict each other.
         */
-       ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
+       ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
        if (ret)
                goto reserve_shared_fail;
        amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
 
                     bool shared)
 {
        struct dma_resv *resv = bo->tbo.base.resv;
+       int r;
+
+       r = dma_resv_reserve_fences(resv, 1);
+       if (r) {
+               /* As last resort on OOM we block for the fence */
+               dma_fence_wait(fence, false);
+               return;
+       }
 
        if (shared)
                dma_resv_add_shared_fence(resv, fence);
 
        if (r)
                goto error_free_root;
 
-       r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
+       r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
        if (r)
                goto error_unreserve;
 
                value = 0;
        }
 
-       r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+       r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
        if (r) {
                pr_debug("failed %d to reserve fence slot\n", r);
                goto error_unlock;
 
                goto reserve_bo_failed;
        }
 
-       r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
+       r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
        if (r) {
                pr_debug("failed %d to reserve bo\n", r);
                amdgpu_bo_unreserve(bo);
 
                struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
                struct dma_resv *robj = bo->obj->base.resv;
 
-               if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
-                       ret = dma_resv_reserve_shared(robj, 1);
-                       if (ret)
-                               return ret;
-               }
+               ret = dma_resv_reserve_fences(robj, 1);
+               if (ret)
+                       return ret;
 
                if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
                        continue;
 
        trace_i915_gem_object_clflush(obj);
 
        clflush = NULL;
-       if (!(flags & I915_CLFLUSH_SYNC))
+       if (!(flags & I915_CLFLUSH_SYNC) &&
+           dma_resv_reserve_fences(obj->base.resv, 1) == 0)
                clflush = clflush_work_create(obj);
        if (clflush) {
                i915_sw_fence_await_reservation(&clflush->base.chain,
 
                        }
                }
 
-               if (!(ev->flags & EXEC_OBJECT_WRITE)) {
-                       err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
-                       if (err)
-                               return err;
-               }
+               err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+               if (err)
+                       return err;
 
                GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
                           eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
        if (IS_ERR(batch))
                return PTR_ERR(batch);
 
-       err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
+       err = dma_resv_reserve_fences(shadow->obj->base.resv, 1);
        if (err)
                return err;
 
 
        assert_object_held(src);
        i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
 
-       ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
+       ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
+       if (ret)
+               return ret;
+
+       ret = dma_resv_reserve_fences(dst_bo->base.resv, 1);
        if (ret)
                return ret;
 
 
                                          i915_gem_object_is_lmem(obj),
                                          0xdeadbeaf, &rq);
                if (rq) {
-                       dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+                       err = dma_resv_reserve_fences(obj->base.resv, 1);
+                       if (!err)
+                               dma_resv_add_excl_fence(obj->base.resv,
+                                                       &rq->fence);
                        i915_gem_object_set_moving_fence(obj, &rq->fence);
                        i915_request_put(rq);
                }
 
                        intel_frontbuffer_put(front);
                }
 
+               if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+                       err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+                       if (unlikely(err))
+                               return err;
+               }
+
                if (fence) {
                        dma_resv_add_excl_fence(vma->obj->base.resv, fence);
                        obj->write_domain = I915_GEM_DOMAIN_RENDER;
                }
        } else {
                if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
-                       err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
+                       err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
                        if (unlikely(err))
                                return err;
                }
        if (!obj->mm.rsgt)
                return -EBUSY;
 
-       err = dma_resv_reserve_shared(obj->base.resv, 1);
+       err = dma_resv_reserve_fences(obj->base.resv, 1);
        if (err)
                return -EBUSY;
 
 
        }
 
        i915_gem_object_lock(obj, NULL);
+
+       err = dma_resv_reserve_fences(obj->base.resv, 1);
+       if (err) {
+               i915_gem_object_unlock(obj);
+               goto out_put;
+       }
+
        /* Put the pages into a known state -- from the gpu for added fun */
        intel_engine_pm_get(engine);
        err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
 
 static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
                            bool write, bool explicit)
 {
-       int err = 0;
+       int err;
 
-       if (!write) {
-               err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
-               if (err)
-                       return err;
-       }
+       err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
+       if (err)
+               return err;
 
        /* explicit sync use user passed dep fence */
        if (explicit)
 
                struct drm_gem_object *obj = &submit->bos[i].obj->base;
                bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
 
-               if (!write) {
-                       /* NOTE: _reserve_shared() must happen before
-                        * _add_shared_fence(), which makes this a slightly
-                        * strange place to call it.  OTOH this is a
-                        * convenient can-fail point to hook it in.
-                        */
-                       ret = dma_resv_reserve_shared(obj->resv, 1);
-                       if (ret)
-                               return ret;
-               }
+               /* NOTE: _reserve_shared() must happen before
+                * _add_shared_fence(), which makes this a slightly
+                * strange place to call it.  OTOH this is a
+                * convenient can-fail point to hook it in.
+                */
+               ret = dma_resv_reserve_fences(obj->resv, 1);
+               if (ret)
+                       return ret;
 
                /* exclusive fences must be ordered */
                if (no_implicit && !write)
 
        struct dma_resv *resv = nvbo->bo.base.resv;
        int i, ret;
 
-       if (!exclusive) {
-               ret = dma_resv_reserve_shared(resv, 1);
-               if (ret)
-                       return ret;
-       }
+       ret = dma_resv_reserve_fences(resv, 1);
+       if (ret)
+               return ret;
 
        /* Waiting for the exclusive fence first causes performance regressions
         * under some circumstances. So manually wait for the shared ones first.
 
        int i, ret;
 
        for (i = 0; i < bo_count; i++) {
+               ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+               if (ret)
+                       return ret;
+
                /* panfrost always uses write mode in its current uapi */
                ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
                                                              true);
 
                        return ret;
        }
 
-       ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
+       ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
        if (ret)
                return ret;
 
 
                        return r;
 
                radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
+
+               r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
+               if (r)
+                       return r;
        }
 
        return radeon_vm_clear_invalids(rdev, vm);
 
                     bool shared)
 {
        struct dma_resv *resv = bo->tbo.base.resv;
+       int r;
+
+       r = dma_resv_reserve_fences(resv, 1);
+       if (r) {
+               /* As last resort on OOM we block for the fence */
+               dma_fence_wait(&fence->base, false);
+               return;
+       }
 
        if (shared)
                dma_resv_add_shared_fence(resv, &fence->base);
 
                int r;
 
                radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
-               r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
+               r = dma_resv_reserve_fences(pt->tbo.base.resv, 1);
                if (r)
                        return r;
 
 
                }
        }
 
+       ret = dma_resv_reserve_fences(bo->base.resv, 1);
+       if (ret)
+               goto out_err;
+
        ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
        if (ret) {
                if (ret == -EMULTIHOP)
 
        dma_resv_add_shared_fence(bo->base.resv, fence);
 
-       ret = dma_resv_reserve_shared(bo->base.resv, 1);
+       ret = dma_resv_reserve_fences(bo->base.resv, 1);
        if (unlikely(ret)) {
                dma_fence_put(fence);
                return ret;
        bool type_found = false;
        int i, ret;
 
-       ret = dma_resv_reserve_shared(bo->base.resv, 1);
+       ret = dma_resv_reserve_fences(bo->base.resv, 1);
        if (unlikely(ret))
                return ret;
 
 
 
        fbo->base = *bo;
 
-       ttm_bo_get(bo);
-       fbo->bo = bo;
-
        /**
         * Fix up members that we shouldn't copy directly:
         * TODO: Explicit member copy would probably be better here.
        ret = dma_resv_trylock(&fbo->base.base._resv);
        WARN_ON(!ret);
 
+       ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
+       if (ret) {
+               kfree(fbo);
+               return ret;
+       }
+
+       ttm_bo_get(bo);
+       fbo->bo = bo;
+
        ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
 
        *new_obj = &fbo->base;
 
 
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
+               unsigned int num_fences;
 
                ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
                if (ret == -EALREADY && dups) {
                        continue;
                }
 
+               num_fences = min(entry->num_shared, 1u);
                if (!ret) {
-                       if (!entry->num_shared)
-                               continue;
-
-                       ret = dma_resv_reserve_shared(bo->base.resv,
-                                                               entry->num_shared);
+                       ret = dma_resv_reserve_fences(bo->base.resv,
+                                                     num_fences);
                        if (!ret)
                                continue;
                }
                        ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
                }
 
-               if (!ret && entry->num_shared)
-                       ret = dma_resv_reserve_shared(bo->base.resv,
-                                                               entry->num_shared);
+               if (!ret)
+                       ret = dma_resv_reserve_fences(bo->base.resv,
+                                                     num_fences);
 
                if (unlikely(ret != 0)) {
                        if (ticket) {
 
                return ret;
 
        for (i = 0; i < job->bo_count; i++) {
+               ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
+               if (ret)
+                       goto fail;
+
                ret = drm_sched_job_add_implicit_dependencies(&job->base,
                                                              job->bo[i], true);
-               if (ret) {
-                       drm_gem_unlock_reservations(job->bo, job->bo_count,
-                                                   acquire_ctx);
-                       return ret;
-               }
+               if (ret)
+                       goto fail;
        }
 
        return 0;
+
+fail:
+       drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+       return ret;
 }
 
 /**
 
        for (i = 0; i < exec->bo_count; i++) {
                bo = &exec->bo[i]->base;
 
-               ret = dma_resv_reserve_shared(bo->resv, 1);
+               ret = dma_resv_reserve_fences(bo->resv, 1);
                if (ret) {
                        vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
                        return ret;
 
        }
 
        /* Expose the fence via the dma-buf */
-       ret = 0;
        dma_resv_lock(resv, NULL);
-       if (arg->flags & VGEM_FENCE_WRITE)
-               dma_resv_add_excl_fence(resv, fence);
-       else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
-               dma_resv_add_shared_fence(resv, fence);
+       ret = dma_resv_reserve_fences(resv, 1);
+       if (!ret) {
+               if (arg->flags & VGEM_FENCE_WRITE)
+                       dma_resv_add_excl_fence(resv, fence);
+               else
+                       dma_resv_add_shared_fence(resv, fence);
+       }
        dma_resv_unlock(resv);
 
        /* Record the fence in our idr for later signaling */
 
 
 int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
 {
+       unsigned int i;
        int ret;
 
        if (objs->nents == 1) {
                ret = drm_gem_lock_reservations(objs->objs, objs->nents,
                                                &objs->ticket);
        }
+       if (ret)
+               return ret;
+
+       for (i = 0; i < objs->nents; ++i) {
+               ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
+               if (ret)
+                       return ret;
+       }
        return ret;
 }
 
 
                         struct vmw_fence_obj *fence)
 {
        struct ttm_device *bdev = bo->bdev;
-
        struct vmw_private *dev_priv =
                container_of(bdev, struct vmw_private, bdev);
+       int ret;
 
-       if (fence == NULL) {
+       if (fence == NULL)
                vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+       else
+               dma_fence_get(&fence->base);
+
+       ret = dma_resv_reserve_fences(bo->base.resv, 1);
+       if (!ret)
                dma_resv_add_excl_fence(bo->base.resv, &fence->base);
-               dma_fence_put(&fence->base);
-       } else
-               dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+       else
+               /* Last resort fallback when we are OOM */
+               dma_fence_wait(&fence->base, false);
+       dma_fence_put(&fence->base);
 }
 
 
 
         * A new fence is added by calling dma_resv_add_shared_fence(). Since
         * this often needs to be done past the point of no return in command
         * submission it cannot fail, and therefore sufficient slots need to be
-        * reserved by calling dma_resv_reserve_shared().
+        * reserved by calling dma_resv_reserve_fences().
         *
         * Note that actual semantics of what an exclusive or shared fence mean
         * is defined by the user, for reservation objects shared across drivers
 
 void dma_resv_init(struct dma_resv *obj);
 void dma_resv_fini(struct dma_resv *obj);
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
                             struct dma_fence *fence);