};
 
 int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
+void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev);
 
 /*
  * CGS
 
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
        amdgpu_ib_pool_fini(adev);
-       amdgpu_fw_reserve_vram_fini(adev);
        amdgpu_fence_driver_fini(adev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
 
 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
        /* Allocate pages table */
        adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
-       if (adev->gart.pages == NULL) {
-               amdgpu_gart_fini(adev);
+       if (adev->gart.pages == NULL)
                return -ENOMEM;
-       }
 #endif
 
        return 0;
  */
 void amdgpu_gart_fini(struct amdgpu_device *adev)
 {
-       if (adev->gart.ready) {
-               /* unbind pages */
-               amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
-       }
-       adev->gart.ready = false;
 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
        vfree(adev->gart.pages);
        adev->gart.pages = NULL;
 
 
        amdgpu_ttm_debugfs_fini(adev);
        amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+       amdgpu_fw_reserve_vram_fini(adev);
 
        ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
        ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
        if (adev->gds.oa.total_size)
                ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
        ttm_bo_device_release(&adev->mman.bdev);
-       amdgpu_gart_fini(adev);
        amdgpu_ttm_global_fini(adev);
        adev->mman.initialized = false;
        DRM_INFO("amdgpu: ttm finalized\n");
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        gmc_v6_0_gart_fini(adev);
-       amdgpu_gem_force_release(adev);
        amdgpu_bo_fini(adev);
        release_firmware(adev->mc.fw);
        adev->mc.fw = NULL;
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        gmc_v7_0_gart_fini(adev);
-       amdgpu_gem_force_release(adev);
        amdgpu_bo_fini(adev);
        release_firmware(adev->mc.fw);
        adev->mc.fw = NULL;
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        gmc_v8_0_gart_fini(adev);
-       amdgpu_gem_force_release(adev);
        amdgpu_bo_fini(adev);
        release_firmware(adev->mc.fw);
        adev->mc.fw = NULL;
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        gmc_v9_0_gart_fini(adev);
-       amdgpu_gem_force_release(adev);
        amdgpu_bo_fini(adev);
 
        return 0;