*/
 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
 {
-       assert_object_held(obj);
+       assert_object_held_shared(obj);
 
        if (!list_empty(&obj->vma.list)) {
                struct i915_vma *vma;
                        continue;
                }
 
-               if (!i915_gem_object_trylock(obj, NULL)) {
-                       /* busy, toss it back to the pile */
-                       if (llist_add(&obj->freed, &i915->mm.free_list))
-                               queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
-                       continue;
-               }
-
                __i915_gem_object_pages_fini(obj);
-               i915_gem_object_unlock(obj);
                __i915_gem_free_object(obj);
 
                /* But keep the pointer alive for RCU-protected lookups */
 static void __i915_gem_free_work(struct work_struct *work)
 {
        struct drm_i915_private *i915 =
-               container_of(work, struct drm_i915_private, mm.free_work.work);
+               container_of(work, struct drm_i915_private, mm.free_work);
 
        i915_gem_flush_free_objects(i915);
 }
         */
 
        if (llist_add(&obj->freed, &i915->mm.free_list))
-               queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
+               queue_work(i915->wq, &i915->mm.free_work);
 }
 
 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
 
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
-       INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
+       INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
 }
 
 void i915_objects_module_exit(void)
 
         * List of objects which are pending destruction.
         */
        struct llist_head free_list;
-       struct delayed_work free_work;
+       struct work_struct free_work;
        /**
         * Count of objects pending destructions. Used to skip needlessly
         * waiting on an RCU barrier if no objects are waiting to be freed.
         * armed the work again.
         */
        while (atomic_read(&i915->mm.free_count)) {
-               flush_delayed_work(&i915->mm.free_work);
+               flush_work(&i915->mm.free_work);
                flush_delayed_work(&i915->bdev.wq);
                rcu_barrier();
        }