* replaces them with the dummy page (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                        int pages)
 {
        unsigned t;
        uint64_t flags = 0;
        int idx;
 
-       if (!adev->gart.ready) {
-               WARN(1, "trying to unbind memory from uninitialized GART !\n");
-               return -EINVAL;
-       }
+       if (WARN_ON(!adev->gart.ptr))
+               return;
 
        if (!drm_dev_enter(adev_to_drm(adev), &idx))
-               return 0;
+               return;
 
        t = offset / AMDGPU_GPU_PAGE_SIZE;
        p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
 
        drm_dev_exit(idx);
-       return 0;
 }
 
 /**
  * Map the dma_addresses into GART entries (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
                    int pages, dma_addr_t *dma_addr, uint64_t flags,
                    void *dst)
 {
        unsigned i, j, t;
        int idx;
 
-       if (!adev->gart.ready) {
-               WARN(1, "trying to bind memory to uninitialized GART !\n");
-               return -EINVAL;
-       }
-
        if (!drm_dev_enter(adev_to_drm(adev), &idx))
-               return 0;
+               return;
 
        t = offset / AMDGPU_GPU_PAGE_SIZE;
 
                }
        }
        drm_dev_exit(idx);
-       return 0;
 }
 
 /**
  * (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, dma_addr_t *dma_addr,
                     uint64_t flags)
 {
-       if (!adev->gart.ready) {
-               WARN(1, "trying to bind memory to uninitialized GART !\n");
-               return -EINVAL;
-       }
-
-       if (!adev->gart.ptr)
-               return 0;
+       if (WARN_ON(!adev->gart.ptr))
+               return;
 
-       return amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
-                              adev->gart.ptr);
+       amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr);
 }
 
 /**
 
        unsigned                        num_gpu_pages;
        unsigned                        num_cpu_pages;
        unsigned                        table_size;
-       bool                            ready;
 
        /* Asic default pte flags */
        uint64_t                        gart_pte_flags;
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
-                      int pages);
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
-                   int pages, dma_addr_t *dma_addr, uint64_t flags,
-                   void *dst);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
-                    int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+                       int pages);
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+                    int pages, dma_addr_t *dma_addr, uint64_t flags,
+                    void *dst);
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+                     int pages, dma_addr_t *dma_addr, uint64_t flags);
 void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
 #endif
 
  *
  * Re-init the gart for each known BO in the GTT.
  */
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
 {
        struct amdgpu_gtt_node *node;
        struct drm_mm_node *mm_node;
        struct amdgpu_device *adev;
-       int r = 0;
 
        adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
        spin_lock(&mgr->lock);
        drm_mm_for_each_node(mm_node, &mgr->mm) {
                node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
-               r = amdgpu_ttm_recover_gart(node->tbo);
-               if (r)
-                       break;
+               amdgpu_ttm_recover_gart(node->tbo);
        }
        spin_unlock(&mgr->lock);
 
        amdgpu_gart_invalidate_tlb(adev);
-
-       return r;
 }
 
 /**
 
                dma_addr_t *dma_addr;
 
                dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
-               r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
-                                   cpu_addr);
-               if (r)
-                       goto error_free;
+               amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
        } else {
                dma_addr_t dma_address;
 
                dma_address += adev->vm_manager.vram_base_offset;
 
                for (i = 0; i < num_pages; ++i) {
-                       r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
-                                           &dma_address, flags, cpu_addr);
-                       if (r)
-                               goto error_free;
-
+                       amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
+                                       flags, cpu_addr);
                        dma_address += PAGE_SIZE;
                }
        }
 #endif
 }
 
-static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
-                               struct ttm_buffer_object *tbo,
-                               uint64_t flags)
+static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+                                struct ttm_buffer_object *tbo,
+                                uint64_t flags)
 {
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
        struct ttm_tt *ttm = tbo->ttm;
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
 
        if (amdgpu_bo_encrypted(abo))
                flags |= AMDGPU_PTE_TMZ;
        if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
                uint64_t page_idx = 1;
 
-               r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
-                               gtt->ttm.dma_address, flags);
-               if (r)
-                       goto gart_bind_fail;
+               amdgpu_gart_bind(adev, gtt->offset, page_idx,
+                                gtt->ttm.dma_address, flags);
 
                /* The memory type of the first page defaults to UC. Now
                 * modify the memory type to NC from the second page of
                flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
                flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
 
-               r = amdgpu_gart_bind(adev,
-                               gtt->offset + (page_idx << PAGE_SHIFT),
-                               ttm->num_pages - page_idx,
-                               &(gtt->ttm.dma_address[page_idx]), flags);
+               amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
+                                ttm->num_pages - page_idx,
+                                &(gtt->ttm.dma_address[page_idx]), flags);
        } else {
-               r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
-                                    gtt->ttm.dma_address, flags);
+               amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+                                gtt->ttm.dma_address, flags);
        }
-
-gart_bind_fail:
-       if (r)
-               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
-
-       return r;
 }
 
 /*
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
        uint64_t flags;
-       int r = 0;
+       int r;
 
        if (!bo_mem)
                return -EINVAL;
 
        /* bind pages into GART page tables */
        gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
-       r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
-               gtt->ttm.dma_address, flags);
-
-       if (r)
-               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
+       amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+                        gtt->ttm.dma_address, flags);
        gtt->bound = true;
-       return r;
+       return 0;
 }
 
 /*
 
        /* Bind pages */
        gtt->offset = (u64)tmp->start << PAGE_SHIFT;
-       r = amdgpu_ttm_gart_bind(adev, bo, flags);
-       if (unlikely(r)) {
-               ttm_resource_free(bo, &tmp);
-               return r;
-       }
-
+       amdgpu_ttm_gart_bind(adev, bo, flags);
        amdgpu_gart_invalidate_tlb(adev);
        ttm_resource_free(bo, &bo->resource);
        ttm_bo_assign_mem(bo, tmp);
  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
  * rebind GTT pages during a GPU reset.
  */
-int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        uint64_t flags;
-       int r;
 
        if (!tbo->ttm)
-               return 0;
+               return;
 
        flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
-       r = amdgpu_ttm_gart_bind(adev, tbo, flags);
-
-       return r;
+       amdgpu_ttm_gart_bind(adev, tbo, flags);
 }
 
 /*
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
 
        /* if the pages have userptr pinning then clear that first */
        if (gtt->userptr) {
                return;
 
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
-       r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
-       if (r)
-               DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
-                         gtt->ttm.num_pages, gtt->offset);
+       amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
        gtt->bound = false;
 }
 
 
 
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
 
 uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
 
                        struct dma_fence **fence);
 
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
-int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
 
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 
                return -EINVAL;
        }
 
-       if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
-               goto skip_pin_bo;
-
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
-skip_pin_bo:
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 
-       adev->gart.ready = true;
-
        return 0;
 }
 
 
 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
 
 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 tmp, field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
        /* Setup TLB control */
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
 
 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 tmp, field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
        /* Setup TLB control */
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
 
                return -EINVAL;
        }
 
-       if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
-               goto skip_pin_bo;
-
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
-skip_pin_bo:
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
        DRM_INFO("PTB located at 0x%016llX\n",
                        (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 
-       adev->gart.ready = true;
        return 0;
 }
 
 
 
        cpu_addr = &job->ibs[0].ptr[num_dw];
 
-       r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
-       if (r)
-               goto error_free;
-
+       amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
        r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
        if (r)