struct amdgpu_virt_ops {
        int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
        int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+       int (*req_init_data)(struct amdgpu_device *adev);
        int (*reset_gpu)(struct amdgpu_device *adev);
        int (*wait_reset)(struct amdgpu_device *adev);
        void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
        struct amdgpu_virt_fw_reserve   fw_reserve;
        uint32_t gim_feature;
        uint32_t reg_access_mode;
+       int req_init_data_ver;
 };
 
 #define amdgpu_sriov_enabled(adev) \
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
 int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
 
                timeout -= 10;
        } while (timeout > 1);
 
-       pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
 
        return -ETIME;
 }
                                        enum idh_request req)
 {
        int r;
+       enum idh_event event = -1;
 
        xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
 
-       /* start to check msg if request is idh_req_gpu_init_access */
-       if (req == IDH_REQ_GPU_INIT_ACCESS ||
-               req == IDH_REQ_GPU_FINI_ACCESS ||
-               req == IDH_REQ_GPU_RESET_ACCESS) {
-               r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+       switch (req) {
+       case IDH_REQ_GPU_INIT_ACCESS:
+       case IDH_REQ_GPU_FINI_ACCESS:
+       case IDH_REQ_GPU_RESET_ACCESS:
+               event = IDH_READY_TO_ACCESS_GPU;
+               break;
+       case IDH_REQ_GPU_INIT_DATA:
+               event = IDH_REQ_GPU_INIT_DATA_READY;
+               break;
+       default:
+               break;
+       }
+
+       if (event != -1) {
+               r = xgpu_nv_poll_msg(adev, event);
                if (r) {
-                       pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
-                       return r;
+                       if (req != IDH_REQ_GPU_INIT_DATA) {
+                               pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+                               return r;
+                       }
+                       else /* host doesn't support REQ_GPU_INIT_DATA handshake */
+                               adev->virt.req_init_data_ver = 0;
+               } else {
+                       if (req == IDH_REQ_GPU_INIT_DATA)
+                       {
+                               adev->virt.req_init_data_ver =
+                                       RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+                                               mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW1));
+
+                               /* assume V1 in case host doesn't set version number */
+                               if (adev->virt.req_init_data_ver < 1)
+                                       adev->virt.req_init_data_ver = 1;
+                       }
                }
+
                /* Retrieve checksum from mailbox2 */
                if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
                        adev->virt.fw_reserve.checksum_key =
        return r;
 }
 
+static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
+{
+       return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        struct amdgpu_iv_entry *entry)
 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
        .req_full_gpu   = xgpu_nv_request_full_gpu_access,
        .rel_full_gpu   = xgpu_nv_release_full_gpu_access,
+       .req_init_data  = xgpu_nv_request_init_data,
        .reset_gpu = xgpu_nv_request_reset,
        .wait_reset = NULL,
        .trans_msg = xgpu_nv_mailbox_trans_msg,