return 0;
 }
 
+static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+       struct amdgpu_vcn5_fw_shared *fw_shared;
+
+       fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+
+       if (fw_shared->sq.is_enabled)
+               return;
+       fw_shared->present_flag_0 =
+               cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+       fw_shared->sq.is_enabled = 1;
+
+       if (amdgpu_vcnfw_log)
+               amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
+}
+
 /**
  * vcn_v5_0_1_sw_init - sw init for VCN block
  *
                return r;
 
        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
-               volatile struct amdgpu_vcn5_fw_shared *fw_shared;
-
                vcn_inst = GET_INST(VCN, i);
 
                r = amdgpu_vcn_sw_init(adev, i);
                if (r)
                        return r;
 
-               fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
-               fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
-               fw_shared->sq.is_enabled = true;
-
-               if (amdgpu_vcnfw_log)
-                       amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+               vcn_v5_0_1_fw_shared_init(adev, i);
        }
 
        /* TODO: Add queue reset mask when FW fully supports it */
                                 9 * vcn_inst),
                                adev->vcn.inst[i].aid_id);
 
+               /* Re-init fw_shared, if required */
+               vcn_v5_0_1_fw_shared_init(adev, i);
+
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;