]> www.infradead.org Git - nvme.git/commitdiff
drm/amdgpu: add cp queue registers for gfx12 ipdump
authorSunil Khatri <sunil.khatri@amd.com>
Thu, 27 Jun 2024 13:09:55 +0000 (18:39 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 8 Jul 2024 20:47:15 +0000 (16:47 -0400)
Add gfx12 support of CP queue registers for all queues
to be used by devcoredump.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c

index fcb63fbe8dcc06baabc3d3bb1a1ea60fbf365c35..ce9c89ad45b125ac5247e92977002d43886367de 100644 (file)
@@ -130,6 +130,49 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
        SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
 };
 
+static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = {
+       /* compute registers */
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
+       SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+};
+
 #define DEFAULT_SH_MEM_CONFIG \
        ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
         (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -1200,6 +1243,7 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
 {
        uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
        uint32_t *ptr;
+       uint32_t inst;
 
        ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
        if (ptr == NULL) {
@@ -1208,6 +1252,19 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
        } else {
                adev->gfx.ip_dump_core = ptr;
        }
+
+       /* Allocate memory for compute queue registers for all the instances */
+       reg_count = ARRAY_SIZE(gc_cp_reg_list_12);
+       inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
+               adev->gfx.mec.num_queue_per_pipe;
+
+       ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
+       if (ptr == NULL) {
+               DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
+               adev->gfx.ip_dump_compute_queues = NULL;
+       } else {
+               adev->gfx.ip_dump_compute_queues = ptr;
+       }
 }
 
 static int gfx_v12_0_sw_init(void *handle)
@@ -1404,6 +1461,7 @@ static int gfx_v12_0_sw_fini(void *handle)
        gfx_v12_0_free_microcode(adev);
 
        kfree(adev->gfx.ip_dump_core);
+       kfree(adev->gfx.ip_dump_compute_queues);
 
        return 0;
 }
@@ -4759,7 +4817,7 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring)
 static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       uint32_t i;
+       uint32_t i, j, k, reg, index = 0;
        uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
 
        if (!adev->gfx.ip_dump_core)
@@ -4769,12 +4827,36 @@ static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
                drm_printf(p, "%-50s \t 0x%08x\n",
                           gc_reg_list_12_0[i].reg_name,
                           adev->gfx.ip_dump_core[i]);
+
+       /* print compute queue registers for all instances */
+       if (!adev->gfx.ip_dump_compute_queues)
+               return;
+
+       reg_count = ARRAY_SIZE(gc_cp_reg_list_12);
+       drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
+                  adev->gfx.mec.num_mec,
+                  adev->gfx.mec.num_pipe_per_mec,
+                  adev->gfx.mec.num_queue_per_pipe);
+
+       for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+               for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+                       for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
+                               drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
+                               for (reg = 0; reg < reg_count; reg++) {
+                                       drm_printf(p, "%-50s \t 0x%08x\n",
+                                                  gc_cp_reg_list_12[reg].reg_name,
+                                                  adev->gfx.ip_dump_compute_queues[index + reg]);
+                               }
+                               index += reg_count;
+                       }
+               }
+       }
 }
 
 static void gfx_v12_ip_dump(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       uint32_t i;
+       uint32_t i, j, k, reg, index = 0;
        uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
 
        if (!adev->gfx.ip_dump_core)
@@ -4784,6 +4866,31 @@ static void gfx_v12_ip_dump(void *handle)
        for (i = 0; i < reg_count; i++)
                adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_12_0[i]));
        amdgpu_gfx_off_ctrl(adev, true);
+
+       /* dump compute queue registers for all instances */
+       if (!adev->gfx.ip_dump_compute_queues)
+               return;
+
+       reg_count = ARRAY_SIZE(gc_cp_reg_list_12);
+       amdgpu_gfx_off_ctrl(adev, false);
+       mutex_lock(&adev->srbm_mutex);
+       for (i = 0; i < adev->gfx.mec.num_mec; i++) {
+               for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
+                       for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
+                               /* ME0 is for GFX so start from 1 for CP */
+                               soc24_grbm_select(adev, 1+i, j, k, 0);
+                               for (reg = 0; reg < reg_count; reg++) {
+                                       adev->gfx.ip_dump_compute_queues[index + reg] =
+                                               RREG32(SOC15_REG_ENTRY_OFFSET(
+                                                       gc_cp_reg_list_12[reg]));
+                               }
+                               index += reg_count;
+                       }
+               }
+       }
+       soc24_grbm_select(adev, 0, 0, 0, 0);
+       mutex_unlock(&adev->srbm_mutex);
+       amdgpu_gfx_off_ctrl(adev, true);
 }
 
 static const struct amd_ip_funcs gfx_v12_0_ip_funcs = {