u16 msg[8];
 };
 
+struct hclge_vf_rst_cmd {
+       u8 dest_vfid;
+       u8 vf_rst;
+       u8 rsv[22];
+};
+
 /* used by VF to store the received Async responses from PF */
 struct hclgevf_mbx_arq_ring {
 #define HCLGE_MBX_MAX_ARQ_MSG_SIZE     8
 
 enum hnae3_reset_type {
        HNAE3_VF_RESET,
        HNAE3_VF_FUNC_RESET,
+       HNAE3_VF_PF_FUNC_RESET,
        HNAE3_VF_FULL_RESET,
        HNAE3_FUNC_RESET,
        HNAE3_CORE_RESET,
 
 static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
 {
        return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
-                          ae_dev->reset_type == HNAE3_VF_FUNC_RESET));
+                          ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
+                          ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
 }
 
 #define hns3_read_dev(a, reg) \
 
        return 0;
 }
 
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+       struct hclge_vf_rst_cmd *req;
+       struct hclge_desc desc;
+
+       req = (struct hclge_vf_rst_cmd *)desc.data;
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+       req->dest_vfid = func_id;
+
+       if (reset)
+               req->vf_rst = 0x1;
+
+       return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+       int i;
+
+       for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+               struct hclge_vport *vport = &hdev->vport[i];
+               int ret;
+
+               /* Send cmd to set/clear VF's FUNC_RST_ING */
+               ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "set vf(%d) rst failded %d!\n",
+                               vport->vport_id, ret);
+                       return ret;
+               }
+
+               if (!reset)
+                       continue;
+
+               /* Inform VF to process the reset.
+                * hclge_inform_reset_assert_to_vf may fail if VF
+                * driver is not loaded.
+                */
+               ret = hclge_inform_reset_assert_to_vf(vport);
+               if (ret)
+                       dev_warn(&hdev->pdev->dev,
+                                "inform reset to vf(%d) failded %d!\n",
+                                vport->vport_id, ret);
+       }
+
+       return 0;
+}
+
 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
 {
        struct hclge_desc desc;
        hclge_enable_vector(&hdev->misc_vector, true);
 }
 
+static int hclge_reset_prepare_down(struct hclge_dev *hdev)
+{
+       int ret = 0;
+
+       switch (hdev->reset_type) {
+       case HNAE3_FUNC_RESET:
+               ret = hclge_set_all_vf_rst(hdev, true);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
 {
        int ret = 0;
 
        switch (hdev->reset_type) {
        case HNAE3_FUNC_RESET:
+               /* There is no mechanism for PF to know if VF has stopped IO
+                * for now, just wait 100 ms for VF to stop IO
+                */
+               msleep(100);
                ret = hclge_func_reset_cmd(hdev, 0);
                if (ret) {
                        dev_err(&hdev->pdev->dev,
        return false;
 }
 
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+       int ret = 0;
+
+       switch (hdev->reset_type) {
+       case HNAE3_FUNC_RESET:
+               ret = hclge_set_all_vf_rst(hdev, false);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
 static void hclge_reset(struct hclge_dev *hdev)
 {
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
        if (ret)
                goto err_reset;
 
+       ret = hclge_reset_prepare_down(hdev);
+       if (ret)
+               goto err_reset;
+
        rtnl_lock();
        ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
        if (ret)
 
        hclge_clear_reset_cause(hdev);
 
+       ret = hclge_reset_prepare_up(hdev);
+       if (ret)
+               goto err_reset_lock;
+
        ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
        if (ret)
                goto err_reset_lock;
 
 int hclge_rss_init_hw(struct hclge_dev *hdev);
 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
 
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
 void hclge_mbx_handler(struct hclge_dev *hdev);
 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
 
 
 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
 {
+       struct hclge_dev *hdev = vport->back;
+       enum hnae3_reset_type reset_type;
        u8 msg_data[2];
        u8 dest_vfid;
 
        dest_vfid = (u8)vport->vport_id;
 
+       if (hdev->reset_type == HNAE3_FUNC_RESET)
+               reset_type = HNAE3_VF_PF_FUNC_RESET;
+       else
+               return -EINVAL;
+
+       memcpy(&msg_data[0], &reset_type, sizeof(u16));
+
        /* send this requested info to VF */
-       return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+       return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
                                  HCLGE_MBX_ASSERTING_RESET, dest_vfid);
 }
 
 
 // Copyright (c) 2016-2017 Hisilicon Limited.
 
 #include <linux/etherdevice.h>
+#include <linux/iopoll.h>
 #include <net/rtnetlink.h>
 #include "hclgevf_cmd.h"
 #include "hclgevf_main.h"
 
 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
 {
-#define HCLGEVF_RESET_WAIT_MS  500
-#define HCLGEVF_RESET_WAIT_CNT 20
-       u32 val, cnt = 0;
+#define HCLGEVF_RESET_WAIT_US  20000
+#define HCLGEVF_RESET_WAIT_CNT 2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US  \
+       (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+       u32 val;
+       int ret;
 
        /* wait to check the hardware reset completion status */
-       val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
-       while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
-              (cnt < HCLGEVF_RESET_WAIT_CNT)) {
-               msleep(HCLGEVF_RESET_WAIT_MS);
-               val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
-               cnt++;
-       }
+       val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+       dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
+
+       ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
+                                !(val & HCLGEVF_RST_ING_BITS),
+                                HCLGEVF_RESET_WAIT_US,
+                                HCLGEVF_RESET_WAIT_TIMEOUT_US);
 
        /* hardware completion status should be available by this time */
-       if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
-               dev_warn(&hdev->pdev->dev,
-                        "could'nt get reset done status from h/w, timeout!\n");
-               return -EBUSY;
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "could'nt get reset done status from h/w, timeout!\n");
+               return ret;
        }
 
        /* we will wait a bit more to let reset of the stack to complete. This
                rst_level = HNAE3_VF_FULL_RESET;
                clear_bit(HNAE3_VF_FULL_RESET, addr);
                clear_bit(HNAE3_VF_FUNC_RESET, addr);
+       } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+               rst_level = HNAE3_VF_PF_FUNC_RESET;
+               clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+               clear_bit(HNAE3_VF_FUNC_RESET, addr);
        } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
                rst_level = HNAE3_VF_FUNC_RESET;
                clear_bit(HNAE3_VF_FUNC_RESET, addr);
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 
-       return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
+       return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
 }
 
 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
 
 
 #define HCLGEVF_TQP_RESET_TRY_TIMES    10
 /* Reset related Registers */
-#define HCLGEVF_FUN_RST_ING            0x20C00
-#define HCLGEVF_FUN_RST_ING_B          0
+#define HCLGEVF_RST_ING                        0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT                BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT     BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT       BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT                BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+       (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+        HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
 
 #define HCLGEVF_RSS_IND_TBL_SIZE               512
 #define HCLGEVF_RSS_SET_BITMAP_MSK     0xffff
 
 
 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
 {
+       enum hnae3_reset_type reset_type;
        u16 link_status;
        u16 *msg_q;
        u8 duplex;
                         * has been completely reset. After this stack should
                         * eventually be re-initialized.
                         */
-                       set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+                       reset_type = le16_to_cpu(msg_q[1]);
+                       set_bit(reset_type, &hdev->reset_pending);
                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                        hclgevf_reset_task_schedule(hdev);