vmw_kms_close(dev_priv);
 out_no_kms:
        if (dev_priv->has_mob)
-               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+               vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
        if (dev_priv->has_gmr)
-               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+               vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
        vmw_vram_manager_fini(dev_priv);
 out_no_vram:
        (void)ttm_bo_device_release(&dev_priv->bdev);
        vmw_overlay_close(dev_priv);
 
        if (dev_priv->has_gmr)
-               (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
-       (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+               vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
 
        vmw_release_device_early(dev_priv);
        if (dev_priv->has_mob)
-               (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+               vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
        vmw_vram_manager_fini(dev_priv);
        (void) ttm_bo_device_release(&dev_priv->bdev);
        drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
 
        return 0;
 }
 
-static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
+void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
 {
+       struct ttm_mem_type_manager *man = &dev_priv->bdev.man[type];
        struct vmwgfx_gmrid_man *gman =
                (struct vmwgfx_gmrid_man *)man->priv;
 
+       ttm_mem_type_manager_disable(man);
+
+       ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man);
+
        if (gman) {
                ida_destroy(&gman->gmr_ida);
                kfree(gman);
        }
-       return 0;
+
+       ttm_mem_type_manager_cleanup(man);
 }
 
 static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
-       .takedown = vmw_gmrid_man_takedown,
        .get_node = vmw_gmrid_man_get_node,
        .put_node = vmw_gmrid_man_put_node,
 };