struct i915_request *rq)
 {
        struct i915_active_request *active;
+       int err = 0;
+
+       /* Prevent reaping in case we malloc/wait while building the tree */
+       i915_active_acquire(ref);
 
        active = active_instance(ref, timeline);
-       if (IS_ERR(active))
-               return PTR_ERR(active);
+       if (IS_ERR(active)) {
+               err = PTR_ERR(active);
+               goto out;
+       }
 
        if (!i915_active_request_isset(active))
                ref->count++;
        __i915_active_request_set(active, rq);
 
        GEM_BUG_ON(!ref->count);
-       return 0;
+out:
+       i915_active_release(ref);
+       return err;
 }
 
 bool i915_active_acquire(struct i915_active *ref)
 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
 {
        struct active_node *it, *n;
-       int ret;
+       int err = 0;
 
-       ret = i915_request_await_active_request(rq, &ref->last);
-       if (ret)
-               return ret;
+       /* await allocates and so we need to avoid hitting the shrinker */
+       if (i915_active_acquire(ref))
+               goto out; /* was idle */
+
+       err = i915_request_await_active_request(rq, &ref->last);
+       if (err)
+               goto out;
 
        rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-               ret = i915_request_await_active_request(rq, &it->base);
-               if (ret)
-                       return ret;
+               err = i915_request_await_active_request(rq, &it->base);
+               if (err)
+                       goto out;
        }
 
-       return 0;
+out:
+       i915_active_release(ref);
+       return err;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)