return PCI_ERS_RESULT_DISCONNECT;
 }
 
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+       struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+       dev_info(&pdev->dev, "hns3 flr prepare\n");
+       if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
+               ae_dev->ops->flr_prepare(ae_dev);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+       struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+       dev_info(&pdev->dev, "hns3 flr done\n");
+       if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
+               ae_dev->ops->flr_done(ae_dev);
+}
+
 static const struct pci_error_handlers hns3_err_handler = {
        .error_detected = hns3_error_detected,
        .slot_reset     = hns3_slot_reset,
+       .reset_prepare  = hns3_reset_prepare,
+       .reset_done     = hns3_reset_done,
 };
 
 static struct pci_driver hns3_driver = {
 
                reg = HCLGE_FUN_RST_ING;
                reg_bit = HCLGE_FUN_RST_ING_B;
                break;
+       case HNAE3_FLR_RESET:
+               break;
        default:
                dev_err(&hdev->pdev->dev,
                        "Wait for unsupported reset type: %d\n",
                return -EINVAL;
        }
 
+       if (hdev->reset_type == HNAE3_FLR_RESET) {
+               while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+                      cnt++ < HCLGE_RESET_WAIT_CNT)
+                       msleep(HCLGE_RESET_WATI_MS);
+
+               if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+                       dev_err(&hdev->pdev->dev,
+                               "flr wait timeout: %d\n", cnt);
+                       return -EBUSY;
+               }
+
+               return 0;
+       }
+
        val = hclge_read_dev(&hdev->hw, reg);
        while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
                msleep(HCLGE_RESET_WATI_MS);
                set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
                hclge_reset_task_schedule(hdev);
                break;
+       case HNAE3_FLR_RESET:
+               dev_info(&pdev->dev, "FLR requested\n");
+               /* schedule again to check later */
+               set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
+               hclge_reset_task_schedule(hdev);
+               break;
        default:
                dev_warn(&pdev->dev,
                         "Unsupported reset type: %d\n", hdev->reset_type);
        } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
                rst_level = HNAE3_FUNC_RESET;
                clear_bit(HNAE3_FUNC_RESET, addr);
+       } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+               rst_level = HNAE3_FLR_RESET;
+               clear_bit(HNAE3_FLR_RESET, addr);
        }
 
        return rst_level;
 
        switch (hdev->reset_type) {
        case HNAE3_FUNC_RESET:
+               /* fall through */
+       case HNAE3_FLR_RESET:
                ret = hclge_set_all_vf_rst(hdev, true);
                break;
        default:
                 */
                set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
                break;
+       case HNAE3_FLR_RESET:
+               /* There is no mechanism for PF to know if VF has stopped IO
+                * for now, just wait 100 ms for VF to stop IO
+                */
+               msleep(100);
+               set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+               set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+               break;
        case HNAE3_IMP_RESET:
                reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
                hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
 
        switch (hdev->reset_type) {
        case HNAE3_FUNC_RESET:
+               /* fall through */
+       case HNAE3_FLR_RESET:
                ret = hclge_set_all_vf_rst(hdev, false);
                break;
        default:
                cancel_work_sync(&hdev->mbx_service_task);
 }
 
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_FLR_WAIT_MS      100
+#define HCLGE_FLR_WAIT_CNT     50
+       struct hclge_dev *hdev = ae_dev->priv;
+       int cnt = 0;
+
+       clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+       clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+       set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+       hclge_reset_event(hdev->pdev, NULL);
+
+       while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+              cnt++ < HCLGE_FLR_WAIT_CNT)
+               msleep(HCLGE_FLR_WAIT_MS);
+
+       if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+               dev_err(&hdev->pdev->dev,
+                       "flr wait down timeout: %d\n", cnt);
+}
+
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+       struct hclge_dev *hdev = ae_dev->priv;
+
+       set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 {
        struct pci_dev *pdev = ae_dev->pdev;
 static const struct hnae3_ae_ops hclge_ops = {
        .init_ae_dev = hclge_init_ae_dev,
        .uninit_ae_dev = hclge_uninit_ae_dev,
+       .flr_prepare = hclge_flr_prepare,
+       .flr_done = hclge_flr_done,
        .init_client_instance = hclge_init_client_instance,
        .uninit_client_instance = hclge_uninit_client_instance,
        .map_ring_to_vector = hclge_map_ring_to_vector,