u16 msg[8];
 };
 
+/* used by VF to store the received Async responses from PF */
+struct hclgevf_mbx_arq_ring {
+#define HCLGE_MBX_MAX_ARQ_MSG_SIZE     8
+#define HCLGE_MBX_MAX_ARQ_MSG_NUM      1024
+       struct hclgevf_dev *hdev;
+       u32 head;
+       u32 tail;
+       u32 count;
+       u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
+};
+
 #define hclge_mbx_ring_ptr_move_crq(crq) \
        (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#define hclge_mbx_tail_ptr_move_arq(arq) \
+       (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+#define hclge_mbx_head_ptr_move_arq(arq) \
+               (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
 #endif
 
        }
 }
 
-static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
 {
-       if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+       if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
+           !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
+               set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
                schedule_work(&hdev->mbx_service_task);
+       }
 }
 
 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
 
 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
 {
+       /* if we have any pending mailbox event then schedule the mbx task */
+       if (hdev->mbx_event_pending)
+               hclgevf_mbx_task_schedule(hdev);
+
        if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
                hclgevf_reset_task_schedule(hdev);
 }
 
        clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
 
-       hclgevf_mbx_handler(hdev);
+       hclgevf_mbx_async_handler(hdev);
 
        clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
 }
        if (!hclgevf_check_event_cause(hdev, &clearval))
                goto skip_sched;
 
-       /* schedule the VF mailbox service task, if not already scheduled */
-       hclgevf_mbx_task_schedule(hdev);
+       hclgevf_mbx_handler(hdev);
 
        hclgevf_clear_event_cause(hdev, clearval);
 
 
        int *vector_irq;
 
        bool accept_mta_mc; /* whether to accept mta filter multicast */
+       bool mbx_event_pending;
        struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+       struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
 
        struct timer_list service_timer;
        struct work_struct service_task;
                         const u8 *msg_data, u8 msg_len, bool need_resp,
                         u8 *resp_data, u16 resp_len);
 void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
+
 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
                                 u8 duplex);
 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
 #endif
 
        struct hclge_mbx_pf_to_vf_cmd *req;
        struct hclgevf_cmq_ring *crq;
        struct hclgevf_desc *desc;
-       u16 link_status, flag;
-       u32 speed;
-       u8 duplex;
+       u16 *msg_q;
+       u16 flag;
        u8 *temp;
        int i;
 
                desc = &crq->desc[crq->next_to_use];
                req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
 
+               /* synchronous messages are time critical and need preferential
+                * treatment. Therefore, we need to acknowledge all the sync
+                * responses as quickly as possible so that waiting tasks do not
+                * timeout and simultaneously queue the async messages for later
+                * prcessing in context of mailbox task i.e. the slow path.
+                */
                switch (req->msg[0]) {
                case HCLGE_MBX_PF_VF_RESP:
                        if (resp->received_resp)
                        }
                        break;
                case HCLGE_MBX_LINK_STAT_CHANGE:
-                       link_status = le16_to_cpu(req->msg[1]);
-                       memcpy(&speed, &req->msg[2], sizeof(speed));
-                       duplex = (u8)le16_to_cpu(req->msg[4]);
+                       /* set this mbx event as pending. This is required as we
+                        * might loose interrupt event when mbx task is busy
+                        * handling. This shall be cleared when mbx task just
+                        * enters handling state.
+                        */
+                       hdev->mbx_event_pending = true;
 
-                       /* update upper layer with new link link status */
-                       hclgevf_update_link_status(hdev, link_status);
-                       hclgevf_update_speed_duplex(hdev, speed, duplex);
+                       /* we will drop the async msg if we find ARQ as full
+                        * and continue with next message
+                        */
+                       if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+                               dev_warn(&hdev->pdev->dev,
+                                        "Async Q full, dropping msg(%d)\n",
+                                        req->msg[1]);
+                               break;
+                       }
+
+                       /* tail the async message in arq */
+                       msg_q = hdev->arq.msg_q[hdev->arq.tail];
+                       memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE);
+                       hclge_mbx_tail_ptr_move_arq(hdev->arq);
+                       hdev->arq.count++;
+
+                       hclgevf_mbx_task_schedule(hdev);
 
                        break;
                default:
        hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
                          crq->next_to_use);
 }
+
+void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
+{
+       u16 link_status;
+       u16 *msg_q;
+       u8 duplex;
+       u32 speed;
+       u32 tail;
+
+       /* we can safely clear it now as we are at start of the async message
+        * processing
+        */
+       hdev->mbx_event_pending = false;
+
+       tail = hdev->arq.tail;
+
+       /* process all the async queue messages */
+       while (tail != hdev->arq.head) {
+               msg_q = hdev->arq.msg_q[hdev->arq.head];
+
+               switch (msg_q[0]) {
+               case HCLGE_MBX_LINK_STAT_CHANGE:
+                       link_status = le16_to_cpu(msg_q[1]);
+                       memcpy(&speed, &msg_q[2], sizeof(speed));
+                       duplex = (u8)le16_to_cpu(msg_q[4]);
+
+                       /* update upper layer with new link link status */
+                       hclgevf_update_link_status(hdev, link_status);
+                       hclgevf_update_speed_duplex(hdev, speed, duplex);
+
+                       break;
+               default:
+                       dev_err(&hdev->pdev->dev,
+                               "fetched unsupported(%d) message from arq\n",
+                               msg_q[0]);
+                       break;
+               }
+
+               hclge_mbx_head_ptr_move_arq(hdev->arq);
+               hdev->arq.count--;
+               msg_q = hdev->arq.msg_q[hdev->arq.head];
+       }
+}