}
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
- int xcc_id)
+ int hub_inst, int hub_type)
{
- if (adev->gfxhub.funcs->query_utcl2_poison_status)
- return adev->gfxhub.funcs->query_utcl2_poison_status(adev, xcc_id);
- else
- return false;
+ if (!hub_type) {
+ if (adev->gfxhub.funcs->query_utcl2_poison_status)
+ return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst);
+ else
+ return false;
+ } else {
+ if (adev->mmhub.funcs->query_utcl2_poison_status)
+ return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst);
+ else
+ return false;
+ }
}
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
- int xcc_id);
+ int hub_inst, int hub_type);
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id);
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
- /* for gfx fed error, kfd will handle it, return directly */
+ /* for fed error, kfd will handle it, return directly */
if (fed && amdgpu_ras_is_poison_mode_supported(adev) &&
- (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)) &&
- (vmhub < AMDGPU_MMHUB0_START))
+ (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
return 0;
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
- int xcc_id = 0;
+ int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
+ /* gfxhub */
if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
- xcc_id = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
+ hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
node_id);
- if (xcc_id < 0)
- xcc_id = 0;
+ if (hub_inst < 0)
+ hub_inst = 0;
}
- if (client_id == SOC15_IH_CLIENTID_UTCL2 && !vmid_type &&
- amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, xcc_id)) {
+ /* mmhub */
+ if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
+ hub_inst = node_id / 4;
+
+ if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
+ hub_inst, vmid_type)) {
event_interrupt_poison_consumption(dev, pasid, client_id);
return;
}
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry);
- int xcc_id = 0;
+ int hub_inst = 0;
struct kfd_hsa_memory_exception_data exception_data;
+ /* gfxhub */
if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) {
- xcc_id = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
+ hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev,
node_id);
- if (xcc_id < 0)
- xcc_id = 0;
+ if (hub_inst < 0)
+ hub_inst = 0;
}
- if (client_id == SOC15_IH_CLIENTID_UTCL2 && !vmid_type &&
- amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, xcc_id)) {
+ /* mmhub */
+ if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC)
+ hub_inst = node_id / 4;
+
+ if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev,
+ hub_inst, vmid_type)) {
event_interrupt_poison_consumption_v9(dev, pasid, client_id);
return;
}