int r, i, j, k;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v11_0_set_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_unset_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return gfx_v11_0_cp_resume(adev);
}
if (amdgpu_sriov_vf(adev))
return -EINVAL;
- gfx_v11_0_set_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
r = -ETIMEDOUT;
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- gfx_v11_0_unset_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (r) {
dev_err(adev->dev, "fail to wait on hqd deactivate\n");
return r;