}
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset)
+ enum amdgpu_ras_block block, uint32_t reset)
{
amdgpu_umc_poison_handler(adev, block, reset);
}
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset);
+ enum amdgpu_ras_block block, uint32_t reset);
bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
}
}
- amdgpu_umc_poison_handler(adev, obj->head.block, false);
+ amdgpu_umc_poison_handler(adev, obj->head.block, 0);
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
atomic_dec(&con->page_retirement_req_cnt);
amdgpu_umc_bad_page_polling_timeout(adev,
- false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
+ 0, MAX_UMC_POISON_POLLING_TIME_ASYNC);
}
return 0;
static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
void *ras_error_status,
struct amdgpu_iv_entry *entry,
- bool reset)
+ uint32_t reset)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
amdgpu_umc_handle_bad_pages(adev, ras_error_status);
if (err_data->ue_count && reset) {
- /* use mode-2 reset for poison consumption */
- if (!entry)
- con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
}
int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- bool reset, uint32_t timeout_ms)
+ uint32_t reset, uint32_t timeout_ms)
{
struct ras_err_data err_data;
struct ras_common_if head = {
if (reset) {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- /* use mode-2 reset for poison consumption */
- con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
}
}
int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset)
+ enum amdgpu_ras_block block, uint32_t reset)
{
int ret = AMDGPU_RAS_SUCCESS;
void *ras_error_status,
struct amdgpu_iv_entry *entry)
{
- return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
+ return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry,
+ AMDGPU_RAS_GPU_RESET_MODE1_RESET);
}
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
- enum amdgpu_ras_block block, bool reset);
+ enum amdgpu_ras_block block, uint32_t reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
umc_func func, void *data);
int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
- bool reset, uint32_t timeout_ms);
+ uint32_t reset, uint32_t timeout_ms);
#endif
{
enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ if (ret)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
default:
break;
/* resetting queue passes, do page retirement without gpu reset
* resetting queue fails, fallback to gpu reset solution
*/
- if (!ret) {
+ if (!ret)
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- } else {
+ else
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
- }
+
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
}
static bool event_interrupt_isr_v10(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
int ret = -EINVAL;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
if (dev->dqm->ops.reset_queues)
ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ if (ret)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
case SOC21_INTSRC_SDMA_ECC:
default:
block = AMDGPU_RAS_BLOCK__GFX;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
}
/* resetting queue passes, do page retirement without gpu reset
resetting queue fails, fallback to gpu reset solution */
- if (!ret)
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- else
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
}
static bool event_interrupt_isr_v11(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
int old_poison, ret = -EINVAL;
+ uint32_t reset = 0;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
block = AMDGPU_RAS_BLOCK__GFX;
+ if (ret)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ break;
+ case SOC15_IH_CLIENTID_VMC:
+ case SOC15_IH_CLIENTID_VMC1:
+ ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+ block = AMDGPU_RAS_BLOCK__MMHUB;
+ if (ret)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
block = AMDGPU_RAS_BLOCK__SDMA;
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
break;
default:
break;
/* resetting queue passes, do page retirement without gpu reset
* resetting queue fails, fallback to gpu reset solution
*/
- if (!ret) {
+ if (!ret)
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
- } else {
+ else
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
- amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
- }
+
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset);
}
static bool context_id_expected(struct kfd_dev *dev)