num_rings = 1;
                        break;
                case AMDGPU_HW_IP_VCN_DEC:
-                       rings[0] = &adev->vcn.ring_dec;
+                       rings[0] = &adev->vcn.inst[0].ring_dec;
                        num_rings = 1;
                        break;
                case AMDGPU_HW_IP_VCN_ENC:
-                       rings[0] = &adev->vcn.ring_enc[0];
+                       rings[0] = &adev->vcn.inst[0].ring_enc[0];
                        num_rings = 1;
                        break;
                case AMDGPU_HW_IP_VCN_JPEG:
-                       rings[0] = &adev->vcn.ring_jpeg;
+                       rings[0] = &adev->vcn.inst[0].ring_jpeg;
                        num_rings = 1;
                        break;
                }
 
                break;
        case AMDGPU_HW_IP_VCN_DEC:
                type = AMD_IP_BLOCK_TYPE_VCN;
-               if (adev->vcn.ring_dec.sched.ready)
+               if (adev->vcn.inst[0].ring_dec.sched.ready)
                        ++num_rings;
                ib_start_alignment = 16;
                ib_size_alignment = 16;
        case AMDGPU_HW_IP_VCN_ENC:
                type = AMD_IP_BLOCK_TYPE_VCN;
                for (i = 0; i < adev->vcn.num_enc_rings; i++)
-                       if (adev->vcn.ring_enc[i].sched.ready)
+                       if (adev->vcn.inst[0].ring_enc[i].sched.ready)
                                ++num_rings;
                ib_start_alignment = 64;
                ib_size_alignment = 1;
                break;
        case AMDGPU_HW_IP_VCN_JPEG:
                type = AMD_IP_BLOCK_TYPE_VCN;
-               if (adev->vcn.ring_jpeg.sched.ready)
+               if (adev->vcn.inst[0].ring_jpeg.sched.ready)
                        ++num_rings;
                ib_start_alignment = 16;
                ib_size_alignment = 16;
 
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
                bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
        r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
-                                   AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
-                                   &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
+                                   AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[0].vcpu_bo,
+                                   &adev->vcn.inst[0].gpu_addr, &adev->vcn.inst[0].cpu_addr);
        if (r) {
                dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
                return r;
 {
        int i;
 
-       kvfree(adev->vcn.saved_bo);
+       kvfree(adev->vcn.inst[0].saved_bo);
 
        if (adev->vcn.indirect_sram) {
                amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
                              (void **)&adev->vcn.dpg_sram_cpu_addr);
        }
 
-       amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
-                             &adev->vcn.gpu_addr,
-                             (void **)&adev->vcn.cpu_addr);
+       amdgpu_bo_free_kernel(&adev->vcn.inst[0].vcpu_bo,
+                             &adev->vcn.inst[0].gpu_addr,
+                             (void **)&adev->vcn.inst[0].cpu_addr);
 
-       amdgpu_ring_fini(&adev->vcn.ring_dec);
+       amdgpu_ring_fini(&adev->vcn.inst[0].ring_dec);
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
-               amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
+               amdgpu_ring_fini(&adev->vcn.inst[0].ring_enc[i]);
 
-       amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+       amdgpu_ring_fini(&adev->vcn.inst[0].ring_jpeg);
 
        release_firmware(adev->vcn.fw);
 
 
        cancel_delayed_work_sync(&adev->vcn.idle_work);
 
-       if (adev->vcn.vcpu_bo == NULL)
+       if (adev->vcn.inst[0].vcpu_bo == NULL)
                return 0;
 
-       size = amdgpu_bo_size(adev->vcn.vcpu_bo);
-       ptr = adev->vcn.cpu_addr;
+       size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
+       ptr = adev->vcn.inst[0].cpu_addr;
 
-       adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
-       if (!adev->vcn.saved_bo)
+       adev->vcn.inst[0].saved_bo = kvmalloc(size, GFP_KERNEL);
+       if (!adev->vcn.inst[0].saved_bo)
                return -ENOMEM;
 
-       memcpy_fromio(adev->vcn.saved_bo, ptr, size);
+       memcpy_fromio(adev->vcn.inst[0].saved_bo, ptr, size);
 
        return 0;
 }
        unsigned size;
        void *ptr;
 
-       if (adev->vcn.vcpu_bo == NULL)
+       if (adev->vcn.inst[0].vcpu_bo == NULL)
                return -EINVAL;
 
-       size = amdgpu_bo_size(adev->vcn.vcpu_bo);
-       ptr = adev->vcn.cpu_addr;
+       size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
+       ptr = adev->vcn.inst[0].cpu_addr;
 
-       if (adev->vcn.saved_bo != NULL) {
-               memcpy_toio(ptr, adev->vcn.saved_bo, size);
-               kvfree(adev->vcn.saved_bo);
-               adev->vcn.saved_bo = NULL;
+       if (adev->vcn.inst[0].saved_bo != NULL) {
+               memcpy_toio(ptr, adev->vcn.inst[0].saved_bo, size);
+               kvfree(adev->vcn.inst[0].saved_bo);
+               adev->vcn.inst[0].saved_bo = NULL;
        } else {
                const struct common_firmware_header *hdr;
                unsigned offset;
                hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
                if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
                        offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-                       memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+                       memcpy_toio(adev->vcn.inst[0].cpu_addr, adev->vcn.fw->data + offset,
                                    le32_to_cpu(hdr->ucode_size_bytes));
                        size -= le32_to_cpu(hdr->ucode_size_bytes);
                        ptr += le32_to_cpu(hdr->ucode_size_bytes);
        unsigned int i;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+               fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]);
        }
 
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
                else
                        new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-               if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
+               if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg))
                        new_state.jpeg = VCN_DPG_STATE__PAUSE;
                else
                        new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
                adev->vcn.pause_dpg_mode(adev, &new_state);
        }
 
-       fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
-       fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
+       fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg);
+       fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_dec);
 
        if (fences == 0) {
                amdgpu_gfx_off_ctrl(adev, true);
                unsigned int i;
 
                for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-                       fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+                       fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]);
                }
                if (fences)
                        new_state.fw_based = VCN_DPG_STATE__PAUSE;
                else
                        new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-               if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
+               if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg))
                        new_state.jpeg = VCN_DPG_STATE__PAUSE;
                else
                        new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
        unsigned i;
        int r;
 
-       WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
+       WREG32(adev->vcn.inst[0].external.scratch9, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
        if (r)
                return r;
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
        for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(adev->vcn.external.scratch9);
+               tmp = RREG32(adev->vcn.inst[0].external.scratch9);
                if (tmp == 0xDEADBEEF)
                        break;
                udelay(1);
        unsigned i;
        int r;
 
-       WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
+       WREG32(adev->vcn.inst[0].external.jpeg_pitch, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
        if (r)
                return r;
        amdgpu_ring_commit(ring);
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(adev->vcn.external.jpeg_pitch);
+               tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch);
                if (tmp == 0xDEADBEEF)
                        break;
                udelay(1);
        }
 
        for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(adev->vcn.external.jpeg_pitch);
+               tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch);
                if (tmp == 0xDEADBEEF)
                        break;
                udelay(1);
 
 #define AMDGPU_VCN_FIRMWARE_OFFSET     256
 #define AMDGPU_VCN_MAX_ENC_RINGS       3
 
+#define AMDGPU_MAX_VCN_INSTANCES       2
+
 #define VCN_DEC_CMD_FENCE              0x00000000
 #define VCN_DEC_CMD_TRAP               0x00000001
 #define VCN_DEC_CMD_WRITE_REG          0x00000004
        unsigned        jpeg_pitch;
 };
 
-struct amdgpu_vcn {
+struct amdgpu_vcn_inst {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
-       unsigned                fw_version;
        void                    *saved_bo;
-       struct delayed_work     idle_work;
-       const struct firmware   *fw;    /* VCN firmware */
        struct amdgpu_ring      ring_dec;
        struct amdgpu_ring      ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
        struct amdgpu_ring      ring_jpeg;
        struct amdgpu_irq_src   irq;
+       struct amdgpu_vcn_reg   external;
+};
+
+struct amdgpu_vcn {
+       unsigned                fw_version;
+       struct delayed_work     idle_work;
+       const struct firmware   *fw;    /* VCN firmware */
        unsigned                num_enc_rings;
        enum amd_powergating_state cur_state;
        struct dpg_pause_state pause_state;
-       struct amdgpu_vcn_reg   internal, external;
-       int (*pause_dpg_mode)(struct amdgpu_device *adev,
-               struct dpg_pause_state *new_state);
 
        bool                    indirect_sram;
        struct amdgpu_bo        *dpg_sram_bo;
        void                    *dpg_sram_cpu_addr;
        uint64_t                dpg_sram_gpu_addr;
        uint32_t                *dpg_sram_curr_addr;
+
+       uint8_t num_vcn_inst;
+       struct amdgpu_vcn_inst  inst[AMDGPU_MAX_VCN_INSTANCES];
+       struct amdgpu_vcn_reg   internal;
+
+       int (*pause_dpg_mode)(struct amdgpu_device *adev,
+               struct dpg_pause_state *new_state);
 };
 
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vcn.num_vcn_inst = 1;
        adev->vcn.num_enc_rings = 2;
 
        vcn_v1_0_set_dec_ring_funcs(adev);
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* VCN DEC TRAP */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+                       VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
        if (r)
                return r;
 
        /* VCN ENC TRAP */
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
-                                       &adev->vcn.irq);
+                                       &adev->vcn.inst->irq);
                if (r)
                        return r;
        }
 
        /* VCN JPEG TRAP */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq);
        if (r)
                return r;
 
        if (r)
                return r;
 
-       ring = &adev->vcn.ring_dec;
+       ring = &adev->vcn.inst->ring_dec;
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
        if (r)
                return r;
 
-       adev->vcn.internal.scratch9 = adev->vcn.external.scratch9 =
+       adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
-       adev->vcn.internal.data0 = adev->vcn.external.data0 =
+       adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
-       adev->vcn.internal.data1 = adev->vcn.external.data1 =
+       adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
-       adev->vcn.internal.cmd = adev->vcn.external.cmd =
+       adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
-       adev->vcn.internal.nop = adev->vcn.external.nop =
+       adev->vcn.internal.nop = adev->vcn.inst->external.nop =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
                if (r)
                        return r;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        sprintf(ring->name, "vcn_jpeg");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
        if (r)
                return r;
 
        adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
-       adev->vcn.internal.jpeg_pitch = adev->vcn.external.jpeg_pitch =
+       adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch =
                SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
 
        return 0;
 static int vcn_v1_0_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i, r;
 
        r = amdgpu_ring_test_helper(ring);
                goto done;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                ring->sched.ready = true;
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        goto done;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        r = amdgpu_ring_test_helper(ring);
        if (r)
                goto done;
 static int vcn_v1_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                RREG32_SOC15(VCN, 0, mmUVD_STATUS))
                offset = 0;
        } else {
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr));
+                       lower_32_bits(adev->vcn.inst->gpu_addr));
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr));
+                       upper_32_bits(adev->vcn.inst->gpu_addr));
                offset = size;
                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
                             AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 
        /* cache window 1: stack */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-                    lower_32_bits(adev->vcn.gpu_addr + offset));
+                    lower_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-                    upper_32_bits(adev->vcn.gpu_addr + offset));
+                    upper_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 
        /* cache window 2: context */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-                    lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+                    lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-                    upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+                    upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 
                offset = 0;
        } else {
                WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+                       lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
                WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0);
+                       upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
                offset = size;
                WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
                             AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
 
        /* cache window 1: stack */
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-                    lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+                    lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-                    upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0);
+                    upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
                             0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
 
        /* cache window 2: context */
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-                    lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
+                    lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
                             0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-                    upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
+                    upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
                             0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
        WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
  */
 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
        int i, j, r;
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
                        ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 
-       ring = &adev->vcn.ring_enc[0];
+       ring = &adev->vcn.inst->ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 
-       ring = &adev->vcn.ring_enc[1];
+       ring = &adev->vcn.inst->ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
        WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
                        UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 
 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
 
                        ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 
        /* initialize JPEG wptr */
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
 
        /* copy patch commands to the jpeg ring */
                                                   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
                                /* Restore */
-                               ring = &adev->vcn.ring_enc[0];
+                               ring = &adev->vcn.inst->ring_enc[0];
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 
-                               ring = &adev->vcn.ring_enc[1];
+                               ring = &adev->vcn.inst->ring_enc[1];
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 
-                               ring = &adev->vcn.ring_dec;
+                               ring = &adev->vcn.inst->ring_dec;
                                WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                                                   RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
                                SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
                                                        UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
 
                                /* Restore */
-                               ring = &adev->vcn.ring_jpeg;
+                               ring = &adev->vcn.inst->ring_jpeg;
                                WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
                                WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
                                                        UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
                                WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
                                                        UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 
-                               ring = &adev->vcn.ring_dec;
+                               ring = &adev->vcn.inst->ring_dec;
                                WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                                                   RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
                                SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst->ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst->ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst->ring_enc[0])
                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
                        lower_32_bits(ring->wptr));
        else
 
        switch (entry->src_id) {
        case 124:
-               amdgpu_fence_process(&adev->vcn.ring_dec);
+               amdgpu_fence_process(&adev->vcn.inst->ring_dec);
                break;
        case 119:
-               amdgpu_fence_process(&adev->vcn.ring_enc[0]);
+               amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
                break;
        case 120:
-               amdgpu_fence_process(&adev->vcn.ring_enc[1]);
+               amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
                break;
        case 126:
-               amdgpu_fence_process(&adev->vcn.ring_jpeg);
+               amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
                break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
 
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
+       adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
        DRM_INFO("VCN decode is enabled in VM mode\n");
 }
 
        int i;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
-               adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
+               adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
 
        DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
+       adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
        DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
 }
 
 
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
-       adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
+       adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+       adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vcn.num_vcn_inst = 1;
        adev->vcn.num_enc_rings = 2;
 
        vcn_v2_0_set_dec_ring_funcs(adev);
        /* VCN DEC TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
                              VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
-                             &adev->vcn.irq);
+                             &adev->vcn.inst->irq);
        if (r)
                return r;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
                                      i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
-                                     &adev->vcn.irq);
+                                     &adev->vcn.inst->irq);
                if (r)
                        return r;
        }
 
        /* VCN JPEG TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                             VCN_2_0__SRCID__JPEG_DECODE,
-                             &adev->vcn.irq);
+                             VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq);
        if (r)
                return r;
 
        if (r)
                return r;
 
-       ring = &adev->vcn.ring_dec;
+       ring = &adev->vcn.inst->ring_dec;
 
        ring->use_doorbell = true;
        ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
 
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
        if (r)
                return r;
 
        adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
 
        adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
-       adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
+       adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
        adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
-       adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
+       adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
        adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
-       adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
+       adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
        adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
-       adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
+       adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
        adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
-       adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
+       adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                ring->use_doorbell = true;
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
                if (r)
                        return r;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        ring->use_doorbell = true;
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "vcn_jpeg");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
        if (r)
                return r;
 
        adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
 
        adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
-       adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
+       adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
 
        return 0;
 }
 static int vcn_v2_0_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i, r;
 
        adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
        }
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                ring->sched.ready = true;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                }
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        ring->sched.ready = true;
        r = amdgpu_ring_test_ring(ring);
        if (r) {
 static int vcn_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i;
 
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
        ring->sched.ready = false;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst->ring_enc[i];
                ring->sched.ready = false;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst->ring_jpeg;
        ring->sched.ready = false;
 
        return 0;
                offset = 0;
        } else {
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr));
+                       lower_32_bits(adev->vcn.inst->gpu_addr));
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr));
+                       upper_32_bits(adev->vcn.inst->gpu_addr));
                offset = size;
                /* No signed header for now from firmware
                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
 
        /* cache window 1: stack */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-               lower_32_bits(adev->vcn.gpu_addr + offset));
+               lower_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-               upper_32_bits(adev->vcn.gpu_addr + offset));
+               upper_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 
        /* cache window 2: context */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-               lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+               lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-               upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+               upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 
        } else {
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
-                       lower_32_bits(adev->vcn.gpu_addr), 0, indirect);
+                       lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
-                       upper_32_bits(adev->vcn.gpu_addr), 0, indirect);
+                       upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
                offset = size;
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
        if (!indirect) {
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
-                       lower_32_bits(adev->vcn.gpu_addr + offset), 0, indirect);
+                       lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
-                       upper_32_bits(adev->vcn.gpu_addr + offset), 0, indirect);
+                       upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
                WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                        UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
        } else {
        /* cache window 2: context */
        WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
-               lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+               lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
-               upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
+               upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
                UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
  */
 static int jpeg_v2_0_start(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_jpeg;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg;
        uint32_t tmp;
        int r = 0;
 
 
 static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
 
        vcn_v2_0_enable_static_power_gating(adev);
 
 static int vcn_v2_0_start(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
        int i, j, r;
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                        lower_32_bits(ring->wptr));
 
-       ring = &adev->vcn.ring_enc[0];
+       ring = &adev->vcn.inst->ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 
-       ring = &adev->vcn.ring_enc[1];
+       ring = &adev->vcn.inst->ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 
                                /* Restore */
-                               ring = &adev->vcn.ring_enc[0];
+                               ring = &adev->vcn.inst->ring_enc[0];
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 
-                               ring = &adev->vcn.ring_enc[1];
+                               ring = &adev->vcn.inst->ring_enc[1];
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst->ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0]) {
+       if (ring == &adev->vcn.inst->ring_enc[0]) {
                if (ring->use_doorbell)
                        return adev->wb.wb[ring->wptr_offs];
                else
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0]) {
+       if (ring == &adev->vcn.inst->ring_enc[0]) {
                if (ring->use_doorbell) {
                        adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 
        switch (entry->src_id) {
        case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
-               amdgpu_fence_process(&adev->vcn.ring_dec);
+               amdgpu_fence_process(&adev->vcn.inst->ring_dec);
                break;
        case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
-               amdgpu_fence_process(&adev->vcn.ring_enc[0]);
+               amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
                break;
        case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
-               amdgpu_fence_process(&adev->vcn.ring_enc[1]);
+               amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
                break;
        case VCN_2_0__SRCID__JPEG_DECODE:
-               amdgpu_fence_process(&adev->vcn.ring_jpeg);
+               amdgpu_fence_process(&adev->vcn.inst->ring_jpeg);
                break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
 
 static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
+       adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
        DRM_INFO("VCN decode is enabled in VM mode\n");
 }
 
        int i;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
-               adev->vcn.ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
+               adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
 
        DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
 static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
+       adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs;
        DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
 }
 
 
 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
-       adev->vcn.irq.funcs = &vcn_v2_0_irq_funcs;
+       adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+       adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
 
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vcn.num_vcn_inst = 1;
        adev->vcn.num_enc_rings = 2;
 
        vcn_v2_5_set_dec_ring_funcs(adev);
 
        /* VCN DEC TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                       VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
+                       VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[0].irq);
        if (r)
                return r;
 
        /* VCN ENC TRAP */
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                       i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.irq);
+                       i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[0].irq);
                if (r)
                        return r;
        }
 
        /* VCN JPEG TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                       VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.irq);
+                       VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[0].irq);
        if (r)
                return r;
 
        if (r)
                return r;
 
-       ring = &adev->vcn.ring_dec;
+       ring = &adev->vcn.inst[0].ring_dec;
        ring->use_doorbell = true;
        ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
        sprintf(ring->name, "vcn_dec");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0);
        if (r)
                return r;
 
        adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
 
        adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
-       adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
+       adev->vcn.inst[0].external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
        adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
-       adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
+       adev->vcn.inst[0].external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
        adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
-       adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
+       adev->vcn.inst[0].external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
        adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
-       adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
+       adev->vcn.inst[0].external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
        adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
-       adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
+       adev->vcn.inst[0].external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst[0].ring_enc[i];
                ring->use_doorbell = true;
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
                sprintf(ring->name, "vcn_enc%d", i);
-               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0);
                if (r)
                        return r;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst[0].ring_jpeg;
        ring->use_doorbell = true;
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "vcn_jpeg");
-       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0);
        if (r)
                return r;
 
        adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
-       adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
+       adev->vcn.inst[0].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
 
        return 0;
 }
 static int vcn_v2_5_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
        int i, r;
 
        adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
        }
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst[0].ring_enc[i];
                ring->sched.ready = false;
                continue;
                r = amdgpu_ring_test_ring(ring);
                }
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst[0].ring_jpeg;
        r = amdgpu_ring_test_ring(ring);
        if (r) {
                ring->sched.ready = false;
 static int vcn_v2_5_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
        int i;
 
        if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
        ring->sched.ready = false;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               ring = &adev->vcn.ring_enc[i];
+               ring = &adev->vcn.inst[0].ring_enc[i];
                ring->sched.ready = false;
        }
 
-       ring = &adev->vcn.ring_jpeg;
+       ring = &adev->vcn.inst[0].ring_jpeg;
        ring->sched.ready = false;
 
        return 0;
                offset = 0;
        } else {
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-                       lower_32_bits(adev->vcn.gpu_addr));
+                       lower_32_bits(adev->vcn.inst[0].gpu_addr));
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-                       upper_32_bits(adev->vcn.gpu_addr));
+                       upper_32_bits(adev->vcn.inst[0].gpu_addr));
                offset = size;
                /* No signed header for now from firmware
                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
 
        /* cache window 1: stack */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
-               lower_32_bits(adev->vcn.gpu_addr + offset));
+               lower_32_bits(adev->vcn.inst[0].gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
-               upper_32_bits(adev->vcn.gpu_addr + offset));
+               upper_32_bits(adev->vcn.inst[0].gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 
        /* cache window 2: context */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
-               lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+               lower_32_bits(adev->vcn.inst[0].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
-               upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+               upper_32_bits(adev->vcn.inst[0].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 }
  */
 static int jpeg_v2_5_start(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_jpeg;
+       struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_jpeg;
        uint32_t tmp;
 
        /* disable anti hang mechanism */
 
 static int vcn_v2_5_start(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *ring = &adev->vcn.ring_dec;
+       struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec;
        uint32_t rb_bufsz, tmp;
        int i, j, r;
 
        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                        lower_32_bits(ring->wptr));
-       ring = &adev->vcn.ring_enc[0];
+       ring = &adev->vcn.inst[0].ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 
-       ring = &adev->vcn.ring_enc[1];
+       ring = &adev->vcn.inst[0].ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0])
+       if (ring == &adev->vcn.inst[0].ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0]) {
+       if (ring == &adev->vcn.inst[0].ring_enc[0]) {
                if (ring->use_doorbell)
                        return adev->wb.wb[ring->wptr_offs];
                else
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vcn.ring_enc[0]) {
+       if (ring == &adev->vcn.inst[0].ring_enc[0]) {
                if (ring->use_doorbell) {
                        adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 
 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
+       adev->vcn.inst[0].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
        DRM_INFO("VCN decode is enabled in VM mode\n");
 }
 
        int i;
 
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
-               adev->vcn.ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
+               adev->vcn.inst[0].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
 
        DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
+       adev->vcn.inst[0].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
        DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
 }
 
 
        switch (entry->src_id) {
        case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
-               amdgpu_fence_process(&adev->vcn.ring_dec);
+               amdgpu_fence_process(&adev->vcn.inst[0].ring_dec);
                break;
        case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
-               amdgpu_fence_process(&adev->vcn.ring_enc[0]);
+               amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[0]);
                break;
        case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
-               amdgpu_fence_process(&adev->vcn.ring_enc[1]);
+               amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[1]);
                break;
        case VCN_2_0__SRCID__JPEG_DECODE:
-               amdgpu_fence_process(&adev->vcn.ring_jpeg);
+               amdgpu_fence_process(&adev->vcn.inst[0].ring_jpeg);
                break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
 
 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2;
-       adev->vcn.irq.funcs = &vcn_v2_5_irq_funcs;
+       adev->vcn.inst[0].irq.num_types = adev->vcn.num_enc_rings + 2;
+       adev->vcn.inst[0].irq.funcs = &vcn_v2_5_irq_funcs;
 }
 
 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {