cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (!amdgpu_sriov_vf(adev)) {
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
- }
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
return ret;
}
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->jpeg.cur_state)
return 0;
#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
-#define MMSCH_VF_MAILBOX_RESP__OK 0x1
-#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
-
-#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
-
-#define MMSCH_VF_MAILBOX_RESP__OK 0x1
-#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3
+#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4
+#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5
#define MMSCH_V4_0_VCN_INSTANCES 0x2