They are constant as well.
v2: update uvd and vce phys ring structures as well
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
 {
        /* Align requested size with padding so unlock_commit can
         * pad safely */
-       ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+       ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
 
        /* Make sure we aren't trying to allocate more space
         * than the maximum for one submission
        int i;
 
        for (i = 0; i < count; i++)
-               amdgpu_ring_write(ring, ring->nop);
+               amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
  */
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 {
-       while (ib->length_dw & ring->align_mask)
-               ib->ptr[ib->length_dw++] = ring->nop;
+       while (ib->length_dw & ring->funcs->align_mask)
+               ib->ptr[ib->length_dw++] = ring->funcs->nop;
 }
 
 /**
        uint32_t count;
 
        /* We pad to match fetch size */
-       count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
-       count %= ring->align_mask + 1;
+       count = ring->funcs->align_mask + 1 -
+               (ring->wptr & ring->funcs->align_mask);
+       count %= ring->funcs->align_mask + 1;
        ring->funcs->insert_nop(ring, count);
 
        mb();
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-                    unsigned max_dw, u32 nop, u32 align_mask,
-                    struct amdgpu_irq_src *irq_src, unsigned irq_type)
+                    unsigned max_dw, struct amdgpu_irq_src *irq_src,
+                    unsigned irq_type)
 {
        int r;
 
 
        ring->ring_size = roundup_pow_of_two(max_dw * 4 *
                                             amdgpu_sched_hw_submission);
-       ring->align_mask = align_mask;
-       ring->nop = nop;
 
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
 
 /* provided by hw blocks that expose a ring buffer for commands */
 struct amdgpu_ring_funcs {
        enum amdgpu_ring_type   type;
+       uint32_t                align_mask;
+       u32                     nop;
 
        /* ring read/write ptr handling */
        u32 (*get_rptr)(struct amdgpu_ring *ring);
        unsigned                max_dw;
        int                     count_dw;
        uint64_t                gpu_addr;
-       uint32_t                align_mask;
        uint32_t                ptr_mask;
        bool                    ready;
-       u32                     nop;
        u32                     idx;
        u32                     me;
        u32                     pipe;
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-                    unsigned ring_size, u32 nop, u32 align_mask,
-                    struct amdgpu_irq_src *irq_src, unsigned irq_type);
+                    unsigned ring_size, struct amdgpu_irq_src *irq_src,
+                    unsigned irq_type);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
 
 #endif
 
 
        for (i = 0; i < count; i++)
                if (sdma && sdma->burst_nop && (i == 0))
-                       amdgpu_ring_write(ring, ring->nop |
+                       amdgpu_ring_write(ring, ring->funcs->nop |
                                          SDMA_NOP_COUNT(count - 1));
                else
-                       amdgpu_ring_write(ring, ring->nop);
+                       amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
                ring->ring_obj = NULL;
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_TRAP0 :
 
 static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
        .type = AMDGPU_RING_TYPE_SDMA,
+       .align_mask = 0xf,
+       .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
        .get_rptr = cik_sdma_ring_get_rptr,
        .get_wptr = cik_sdma_ring_get_wptr,
        .set_wptr = cik_sdma_ring_set_wptr,
 
                ring->ring_obj = NULL;
                sprintf(ring->name, "gfx");
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    0x80000000, 0xff,
                                     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
                if (r)
                        return r;
                sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
                irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    0x80000000, 0xff,
                                     &adev->gfx.eop_irq, irq_type);
                if (r)
                        return r;
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
        .type = AMDGPU_RING_TYPE_GFX,
+       .align_mask = 0xff,
+       .nop = 0x80000000,
        .get_rptr = gfx_v6_0_ring_get_rptr,
        .get_wptr = gfx_v6_0_ring_get_wptr,
        .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
 
 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
        .type = AMDGPU_RING_TYPE_COMPUTE,
+       .align_mask = 0xff,
+       .nop = 0x80000000,
        .get_rptr = gfx_v6_0_ring_get_rptr,
        .get_wptr = gfx_v6_0_ring_get_wptr,
        .set_wptr = gfx_v6_0_ring_set_wptr_compute,
 
                ring->ring_obj = NULL;
                sprintf(ring->name, "gfx");
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
                                     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
                if (r)
                        return r;
                irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
                /* type-2 packets are deprecated on MEC, use type-3 instead */
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
                                     &adev->gfx.eop_irq, irq_type);
                if (r)
                        return r;
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .type = AMDGPU_RING_TYPE_GFX,
+       .align_mask = 0xff,
+       .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .get_rptr = gfx_v7_0_ring_get_rptr,
        .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
        .type = AMDGPU_RING_TYPE_COMPUTE,
+       .align_mask = 0xff,
+       .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .get_rptr = gfx_v7_0_ring_get_rptr,
        .get_wptr = gfx_v7_0_ring_get_wptr_compute,
        .set_wptr = gfx_v7_0_ring_set_wptr_compute,
 
                        ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
                }
 
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
-                                    &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+                                    AMDGPU_CP_IRQ_GFX_EOP);
                if (r)
                        return r;
        }
                sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
                irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
                /* type-2 packets are deprecated on MEC, use type-3 instead */
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    PACKET3(PACKET3_NOP, 0x3FFF), 0xff,
-                                    &adev->gfx.eop_irq, irq_type);
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
+                                    irq_type);
                if (r)
                        return r;
        }
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .type = AMDGPU_RING_TYPE_GFX,
+       .align_mask = 0xff,
+       .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .get_rptr = gfx_v8_0_ring_get_rptr,
        .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .type = AMDGPU_RING_TYPE_COMPUTE,
+       .align_mask = 0xff,
+       .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .get_rptr = gfx_v8_0_ring_get_rptr,
        .get_wptr = gfx_v8_0_ring_get_wptr_compute,
        .set_wptr = gfx_v8_0_ring_set_wptr_compute,
 
 
        for (i = 0; i < count; i++)
                if (sdma && sdma->burst_nop && (i == 0))
-                       amdgpu_ring_write(ring, ring->nop |
+                       amdgpu_ring_write(ring, ring->funcs->nop |
                                SDMA_PKT_NOP_HEADER_COUNT(count - 1));
                else
-                       amdgpu_ring_write(ring, ring->nop);
+                       amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
                ring->use_doorbell = false;
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_TRAP0 :
 
 static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
        .type = AMDGPU_RING_TYPE_SDMA,
+       .align_mask = 0xf,
+       .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
        .get_rptr = sdma_v2_4_ring_get_rptr,
        .get_wptr = sdma_v2_4_ring_get_wptr,
        .set_wptr = sdma_v2_4_ring_set_wptr,
 
 
        for (i = 0; i < count; i++)
                if (sdma && sdma->burst_nop && (i == 0))
-                       amdgpu_ring_write(ring, ring->nop |
+                       amdgpu_ring_write(ring, ring->funcs->nop |
                                SDMA_PKT_NOP_HEADER_COUNT(count - 1));
                else
-                       amdgpu_ring_write(ring, ring->nop);
+                       amdgpu_ring_write(ring, ring->funcs->nop);
 }
 
 /**
 
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_TRAP0 :
 
 static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
        .type = AMDGPU_RING_TYPE_SDMA,
+       .align_mask = 0xf,
+       .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
        .get_rptr = sdma_v3_0_ring_get_rptr,
        .get_wptr = sdma_v3_0_ring_get_wptr,
        .set_wptr = sdma_v3_0_ring_set_wptr,
 
                ring->use_doorbell = false;
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024,
-                                    DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
                                     &adev->sdma.trap_irq,
                                     (i == 0) ?
                                     AMDGPU_SDMA_IRQ_TRAP0 :
 
 static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
        .type = AMDGPU_RING_TYPE_SDMA,
+       .align_mask = 0xf,
+       .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
        .get_rptr = si_dma_ring_get_rptr,
        .get_wptr = si_dma_ring_get_wptr,
        .set_wptr = si_dma_ring_set_wptr,
 
 
        ring = &adev->uvd.ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-                            &adev->uvd.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
        return r;
 }
 
 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
        .type = AMDGPU_RING_TYPE_UVD,
+       .align_mask = 0xf,
+       .nop = PACKET0(mmUVD_NO_OP, 0),
        .get_rptr = uvd_v4_2_ring_get_rptr,
        .get_wptr = uvd_v4_2_ring_get_wptr,
        .set_wptr = uvd_v4_2_ring_set_wptr,
 
 
        ring = &adev->uvd.ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-                            &adev->uvd.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
        return r;
 }
 
 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
        .type = AMDGPU_RING_TYPE_UVD,
+       .align_mask = 0xf,
+       .nop = PACKET0(mmUVD_NO_OP, 0),
        .get_rptr = uvd_v5_0_ring_get_rptr,
        .get_wptr = uvd_v5_0_ring_get_wptr,
        .set_wptr = uvd_v5_0_ring_set_wptr,
 
 
        ring = &adev->uvd.ring;
        sprintf(ring->name, "uvd");
-       r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
-                            &adev->uvd.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 
        return r;
 }
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
        .type = AMDGPU_RING_TYPE_UVD,
+       .align_mask = 0xf,
+       .nop = PACKET0(mmUVD_NO_OP, 0),
        .get_rptr = uvd_v6_0_ring_get_rptr,
        .get_wptr = uvd_v6_0_ring_get_wptr,
        .set_wptr = uvd_v6_0_ring_set_wptr,
 
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_UVD,
+       .align_mask = 0xf,
+       .nop = PACKET0(mmUVD_NO_OP, 0),
        .get_rptr = uvd_v6_0_ring_get_rptr,
        .get_wptr = uvd_v6_0_ring_get_wptr,
        .set_wptr = uvd_v6_0_ring_set_wptr,
 
        for (i = 0; i < adev->vce.num_rings; i++) {
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+               r = amdgpu_ring_init(adev, ring, 512,
                                     &adev->vce.irq, 0);
                if (r)
                        return r;
 
 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
        .type = AMDGPU_RING_TYPE_VCE,
+       .align_mask = 0xf,
+       .nop = VCE_CMD_NO_OP,
        .get_rptr = vce_v2_0_ring_get_rptr,
        .get_wptr = vce_v2_0_ring_get_wptr,
        .set_wptr = vce_v2_0_ring_set_wptr,
 
        for (i = 0; i < adev->vce.num_rings; i++) {
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
-                                    &adev->vce.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
                if (r)
                        return r;
        }
 
 static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
        .type = AMDGPU_RING_TYPE_VCE,
+       .align_mask = 0xf,
+       .nop = VCE_CMD_NO_OP,
        .get_rptr = vce_v3_0_ring_get_rptr,
        .get_wptr = vce_v3_0_ring_get_wptr,
        .set_wptr = vce_v3_0_ring_set_wptr,
 
 static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_VCE,
+       .align_mask = 0xf,
+       .nop = VCE_CMD_NO_OP,
        .get_rptr = vce_v3_0_ring_get_rptr,
        .get_wptr = vce_v3_0_ring_get_wptr,
        .set_wptr = vce_v3_0_ring_set_wptr,