void i915_gem_flush_free_objects(struct drm_i915_private *i915)
 {
-       struct llist_node *freed;
-
-       /* Free the oldest, most stale object to keep the free_list short */
-       freed = NULL;
-       if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
-               /* Only one consumer of llist_del_first() allowed */
-               spin_lock(&i915->mm.free_lock);
-               freed = llist_del_first(&i915->mm.free_list);
-               spin_unlock(&i915->mm.free_lock);
-       }
-       if (unlikely(freed)) {
-               freed->next = NULL;
+       struct llist_node *freed = llist_del_all(&i915->mm.free_list);
+
+       if (unlikely(freed))
                __i915_gem_free_objects(i915, freed);
-       }
 }
 
 static void __i915_gem_free_work(struct work_struct *work)
 {
        struct drm_i915_private *i915 =
                container_of(work, struct drm_i915_private, mm.free_work);
-       struct llist_node *freed;
-
-       /*
-        * All file-owned VMA should have been released by this point through
-        * i915_gem_close_object(), or earlier by i915_gem_context_close().
-        * However, the object may also be bound into the global GTT (e.g.
-        * older GPUs without per-process support, or for direct access through
-        * the GTT either for the user or for scanout). Those VMA still need to
-        * unbound now.
-        */
-
-       spin_lock(&i915->mm.free_lock);
-       while ((freed = llist_del_all(&i915->mm.free_list))) {
-               spin_unlock(&i915->mm.free_lock);
 
-               __i915_gem_free_objects(i915, freed);
-               if (need_resched())
-                       return;
-
-               spin_lock(&i915->mm.free_lock);
-       }
-       spin_unlock(&i915->mm.free_lock);
+       i915_gem_flush_free_objects(i915);
 }
 
 void i915_gem_free_object(struct drm_gem_object *gem_obj)