/*
  * GEM objects.
  */
-struct amdgpu_gem {
-       struct mutex            mutex;
-       struct list_head        objects;
-};
-
-int amdgpu_gem_init(struct amdgpu_device *adev);
-void amdgpu_gem_fini(struct amdgpu_device *adev);
+void amdgpu_gem_force_release(struct amdgpu_device *adev);
 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
                                int alignment, u32 initial_domain,
                                u64 flags, bool kernel,
 
        /* memory management */
        struct amdgpu_mman              mman;
-       struct amdgpu_gem               gem;
        struct amdgpu_vram_scratch      vram_scratch;
        struct amdgpu_wb                wb;
        atomic64_t                      vram_usage;
 
         * can recall function without having locking issues */
        mutex_init(&adev->vm_manager.lock);
        atomic_set(&adev->irq.ih.lock, 0);
-       mutex_init(&adev->gem.mutex);
        mutex_init(&adev->pm.mutex);
        mutex_init(&adev->gfx.gpu_clock_mutex);
        mutex_init(&adev->srbm_mutex);
 
        }
        *obj = &robj->gem_base;
 
-       mutex_lock(&adev->gem.mutex);
-       list_add_tail(&robj->list, &adev->gem.objects);
-       mutex_unlock(&adev->gem.mutex);
-
        return 0;
 }
 
-int amdgpu_gem_init(struct amdgpu_device *adev)
+void amdgpu_gem_force_release(struct amdgpu_device *adev)
 {
-       INIT_LIST_HEAD(&adev->gem.objects);
-       return 0;
-}
+       struct drm_device *ddev = adev->ddev;
+       struct drm_file *file;
 
-void amdgpu_gem_fini(struct amdgpu_device *adev)
-{
-       amdgpu_bo_force_delete(adev);
+       mutex_lock(&ddev->struct_mutex);
+
+       list_for_each_entry(file, &ddev->filelist, lhead) {
+               struct drm_gem_object *gobj;
+               int handle;
+
+               WARN_ONCE(1, "Still active user space clients!\n");
+               spin_lock(&file->table_lock);
+               idr_for_each_entry(&file->object_idr, gobj, handle) {
+                       WARN_ONCE(1, "And also active allocations!\n");
+                       drm_gem_object_unreference(gobj);
+               }
+               idr_destroy(&file->object_idr);
+               spin_unlock(&file->table_lock);
+       }
+
+       mutex_unlock(&ddev->struct_mutex);
 }
 
 /*
 
 
        amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);
 
-       mutex_lock(&bo->adev->gem.mutex);
-       list_del_init(&bo->list);
-       mutex_unlock(&bo->adev->gem.mutex);
        drm_gem_object_release(&bo->gem_base);
        amdgpu_bo_unref(&bo->parent);
        kfree(bo->metadata);
        return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
 }
 
-void amdgpu_bo_force_delete(struct amdgpu_device *adev)
-{
-       struct amdgpu_bo *bo, *n;
-
-       if (list_empty(&adev->gem.objects)) {
-               return;
-       }
-       dev_err(adev->dev, "Userspace still has active objects !\n");
-       list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
-               dev_err(adev->dev, "%p %p %lu %lu force free\n",
-                       &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
-                       *((unsigned long *)&bo->gem_base.refcount));
-               mutex_lock(&bo->adev->gem.mutex);
-               list_del_init(&bo->list);
-               mutex_unlock(&bo->adev->gem.mutex);
-               /* this should unref the ttm bo */
-               drm_gem_object_unreference_unlocked(&bo->gem_base);
-       }
-}
-
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
        /* Add an MTRR for the VRAM */
 
                             u64 *gpu_addr);
 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
-void amdgpu_bo_force_delete(struct amdgpu_device *adev);
 int amdgpu_bo_init(struct amdgpu_device *adev);
 void amdgpu_bo_fini(struct amdgpu_device *adev);
 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
 
        if (ret)
                return ERR_PTR(ret);
 
-       mutex_lock(&adev->gem.mutex);
-       list_add_tail(&bo->list, &adev->gem.objects);
-       mutex_unlock(&adev->gem.mutex);
-
        return &bo->gem_base;
 }
 
 
        int dma_bits;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = amdgpu_gem_init(adev);
-       if (r)
-               return r;
-
        r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
        if (r)
                return r;
                adev->vm_manager.enabled = false;
        }
        gmc_v7_0_gart_fini(adev);
-       amdgpu_gem_fini(adev);
+       amdgpu_gem_force_release(adev);
        amdgpu_bo_fini(adev);
 
        return 0;
 
        int dma_bits;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = amdgpu_gem_init(adev);
-       if (r)
-               return r;
-
        r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
        if (r)
                return r;
                adev->vm_manager.enabled = false;
        }
        gmc_v8_0_gart_fini(adev);
-       amdgpu_gem_fini(adev);
+       amdgpu_gem_force_release(adev);
        amdgpu_bo_fini(adev);
 
        return 0;