if (r)
                goto init_failed;
 
+       if (adev->mman.buffer_funcs_ring->sched.ready)
+               amdgpu_ttm_set_buffer_funcs_status(adev, true);
+
        /* Don't init kfd if whole hive need to be reset during init */
        if (!adev->gmc.xgmi.pending_reset) {
                kgd2kfd_init_zone_device(adev);
                amdgpu_virt_request_full_gpu(adev, false);
        }
 
+       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
        r = amdgpu_device_ip_suspend_phase1(adev);
        if (r)
                return r;
 
        r = amdgpu_device_ip_resume_phase2(adev);
 
+       if (adev->mman.buffer_funcs_ring->sched.ready)
+               amdgpu_ttm_set_buffer_funcs_status(adev, true);
+
        return r;
 }
 
        /* disable ras feature must before hw fini */
        amdgpu_ras_pre_fini(adev);
 
+       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
        amdgpu_device_ip_fini_early(adev);
 
        amdgpu_irq_fini_hw(adev);
 
        amdgpu_ras_suspend(adev);
 
+       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
        amdgpu_device_ip_suspend_phase1(adev);
 
        if (!adev->in_s0ix)
                                if (r)
                                        goto out;
 
+                               if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
+                                       amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
+
                                if (vram_lost)
                                        amdgpu_device_fill_reset_magic(tmp_adev);
 
 
        return err;
 }
 
-void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
-{
-       struct amdgpu_ring *sdma;
-       int i;
-
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               if (adev->sdma.has_page_queue) {
-                       sdma = &adev->sdma.instance[i].page;
-                       if (adev->mman.buffer_funcs_ring == sdma) {
-                               amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                               break;
-                       }
-               }
-               sdma = &adev->sdma.instance[i].ring;
-               if (adev->mman.buffer_funcs_ring == sdma) {
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                       break;
-               }
-       }
-}
-
 int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
 {
        int err = 0;
 
                               bool duplicate);
 void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
         bool duplicate);
-void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
 int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
 
 #endif
 
        u32 rb_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0);
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
                        r = amdgpu_ring_test_helper(page);
                        if (r)
                                return r;
-
-                       if (adev->mman.buffer_funcs_ring == page)
-                               amdgpu_ttm_set_buffer_funcs_status(adev, true);
                }
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* disable the scheduler for SDMA */
-               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+       if (amdgpu_sriov_vf(adev))
                return 0;
-       }
 
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
                for (i = 0; i < adev->sdma.num_instances; i++) {
        if (adev->in_s0ix) {
                sdma_v4_0_enable(adev, true);
                sdma_v4_0_gfx_enable(adev, true);
-               amdgpu_ttm_set_buffer_funcs_status(adev, true);
                return 0;
        }
 
 
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* disable the scheduler for SDMA */
-               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+       if (amdgpu_sriov_vf(adev))
                return 0;
-       }
 
        sdma_v5_0_ctx_switch_enable(adev, false);
        sdma_v5_0_enable(adev, false);
 
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* disable the scheduler for SDMA */
-               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+       if (amdgpu_sriov_vf(adev))
                return 0;
-       }
 
        sdma_v5_2_ctx_switch_enable(adev, false);
        sdma_v5_2_enable(adev, false);
 
        u32 rb_cntl, ib_cntl;
        int i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0);
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev)) {
-               /* disable the scheduler for SDMA */
-               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+       if (amdgpu_sriov_vf(adev))
                return 0;
-       }
 
        sdma_v6_0_ctxempty_int_enable(adev, false);
        sdma_v6_0_enable(adev, false);
 
        u32 rb_cntl;
        unsigned i;
 
-       amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
        for (i = 0; i < adev->sdma.num_instances; i++) {
                /* dma0 */
                rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, true);
        }
 
        return 0;