]> www.infradead.org Git - users/hch/block.git/commitdiff
drm/amdgpu/vcn: identify unified queue in sw init
authorBoyuan Zhang <boyuan.zhang@amd.com>
Thu, 11 Jul 2024 20:19:54 +0000 (16:19 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 16 Jul 2024 15:43:48 +0000 (11:43 -0400)
Determine whether VCN using unified queue in sw_init, instead of calling
functions later on.

v2: fix coding style

Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Ruijing Dong <ruijing.dong@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h

index 8d65b096db90097a618ead38fd6f8c5be5dae81b..69b2c2503d551a360c29f124fc352d199f44112d 100644 (file)
@@ -147,6 +147,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                }
        }
 
+       /* from vcn4 and above, only unified queue is used */
+       adev->vcn.using_unified_queue =
+               amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
+
        hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
        adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 
@@ -275,18 +279,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        return 0;
 }
 
-/* from vcn4 and above, only unified queue is used */
-static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
-{
-       struct amdgpu_device *adev = ring->adev;
-       bool ret = false;
-
-       if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))
-               ret = true;
-
-       return ret;
-}
-
 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
 {
        bool ret = false;
@@ -724,12 +716,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
-       bool sq = amdgpu_vcn_using_unified_queue(ring);
        uint32_t *ib_checksum;
        uint32_t ib_pack_in_dw;
        int i, r;
 
-       if (sq)
+       if (adev->vcn.using_unified_queue)
                ib_size_dw += 8;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -742,7 +733,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
        ib->length_dw = 0;
 
        /* single queue headers */
-       if (sq) {
+       if (adev->vcn.using_unified_queue) {
                ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
                                                + 4 + 2; /* engine info + decoding ib in dw */
                ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
@@ -761,7 +752,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       if (sq)
+       if (adev->vcn.using_unified_queue)
                amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
 
        r = amdgpu_job_submit_direct(job, ring, &f);
@@ -851,15 +842,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
                                         struct dma_fence **fence)
 {
        unsigned int ib_size_dw = 16;
+       struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
        uint32_t *ib_checksum = NULL;
        uint64_t addr;
-       bool sq = amdgpu_vcn_using_unified_queue(ring);
        int i, r;
 
-       if (sq)
+       if (adev->vcn.using_unified_queue)
                ib_size_dw += 8;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -873,7 +864,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
 
        ib->length_dw = 0;
 
-       if (sq)
+       if (adev->vcn.using_unified_queue)
                ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 
        ib->ptr[ib->length_dw++] = 0x00000018;
@@ -895,7 +886,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       if (sq)
+       if (adev->vcn.using_unified_queue)
                amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 
        r = amdgpu_job_submit_direct(job, ring, &f);
@@ -918,15 +909,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
                                          struct dma_fence **fence)
 {
        unsigned int ib_size_dw = 16;
+       struct amdgpu_device *adev = ring->adev;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
        uint32_t *ib_checksum = NULL;
        uint64_t addr;
-       bool sq = amdgpu_vcn_using_unified_queue(ring);
        int i, r;
 
-       if (sq)
+       if (adev->vcn.using_unified_queue)
                ib_size_dw += 8;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
@@ -940,7 +931,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 
        ib->length_dw = 0;
 
-       if (sq)
+       if (adev->vcn.using_unified_queue)
                ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
 
        ib->ptr[ib->length_dw++] = 0x00000018;
@@ -962,7 +953,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       if (sq)
+       if (adev->vcn.using_unified_queue)
                amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
 
        r = amdgpu_job_submit_direct(job, ring, &f);
index 9f06def236fdc0b2c5cdc5615f1f9cd1262de8a7..1a5439abd1a043d68c378c3fbb41c9e167c4c60f 100644 (file)
@@ -329,6 +329,7 @@ struct amdgpu_vcn {
 
        uint16_t inst_mask;
        uint8_t num_inst_per_aid;
+       bool using_unified_queue;
 };
 
 struct amdgpu_fw_shared_rb_ptrs_struct {