return ret;
 }
 
+static void hclge_set_reset_pending(struct hclge_dev *hdev,
+                                   enum hnae3_reset_type reset_type)
+{
+       /* When an incorrect reset type is executed, the get_reset_level
+        * function generates the HNAE3_NONE_RESET flag. As a result, this
+        * type do not need to pending.
+        */
+       if (reset_type != HNAE3_NONE_RESET)
+               set_bit(reset_type, &hdev->reset_pending);
+}
+
 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
 {
        u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
         */
        if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
                dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
-               set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+               hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
                set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
                *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
                hdev->rst_stats.imp_rst_cnt++;
        if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
                dev_info(&hdev->pdev->dev, "global reset interrupt\n");
                set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
-               set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+               hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
                *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
                hdev->rst_stats.global_rst_cnt++;
                return HCLGE_VECTOR0_EVENT_RST;
        case HNAE3_FUNC_RESET:
                dev_info(&pdev->dev, "PF reset requested\n");
                /* schedule again to check later */
-               set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+               hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
                hclge_reset_task_schedule(hdev);
                break;
        default:
                clear_bit(HNAE3_FLR_RESET, addr);
        }
 
+       clear_bit(HNAE3_NONE_RESET, addr);
+
        if (hdev->reset_type != HNAE3_NONE_RESET &&
            rst_level < hdev->reset_type)
                return HNAE3_NONE_RESET;
                return false;
        } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
                hdev->rst_stats.reset_fail_cnt++;
-               set_bit(hdev->reset_type, &hdev->reset_pending);
+               hclge_set_reset_pending(hdev, hdev->reset_type);
                dev_info(&hdev->pdev->dev,
                         "re-schedule reset task(%u)\n",
                         hdev->rst_stats.reset_fail_cnt);
 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
                                        enum hnae3_reset_type rst_type)
 {
+#define HCLGE_SUPPORT_RESET_TYPE \
+       (BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
+       BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
+
        struct hclge_dev *hdev = ae_dev->priv;
 
+       if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
+               /* To prevent reset triggered by hclge_reset_event */
+               set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+               dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
+                        rst_type);
+               return;
+       }
+
        set_bit(rst_type, &hdev->default_reset_request);
 }
 
 
        return ret;
 }
 
+static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
+                                     enum hnae3_reset_type reset_type)
+{
+       /* When an incorrect reset type is executed, the get_reset_level
+        * function generates the HNAE3_NONE_RESET flag. As a result, this
+        * type do not need to pending.
+        */
+       if (reset_type != HNAE3_NONE_RESET)
+               set_bit(reset_type, &hdev->reset_pending);
+}
+
 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
 {
 #define HCLGEVF_RESET_WAIT_US  20000
                hdev->rst_stats.rst_fail_cnt);
 
        if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
-               set_bit(hdev->reset_type, &hdev->reset_pending);
+               hclgevf_set_reset_pending(hdev, hdev->reset_type);
 
        if (hclgevf_is_reset_pending(hdev)) {
                set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                clear_bit(HNAE3_FLR_RESET, addr);
        }
 
+       clear_bit(HNAE3_NONE_RESET, addr);
+
        return rst_level;
 }
 
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
        struct hclgevf_dev *hdev = ae_dev->priv;
 
-       dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
-
        if (hdev->default_reset_request)
                hdev->reset_level =
                        hclgevf_get_reset_level(&hdev->default_reset_request);
        else
                hdev->reset_level = HNAE3_VF_FUNC_RESET;
 
+       dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
+                hdev->reset_level);
+
        /* reset of this VF requested */
        set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
        hclgevf_reset_task_schedule(hdev);
 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
                                          enum hnae3_reset_type rst_type)
 {
+#define HCLGEVF_SUPPORT_RESET_TYPE \
+       (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
+       BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
+       BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
+
        struct hclgevf_dev *hdev = ae_dev->priv;
 
+       if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
+               /* To prevent reset triggered by hclge_reset_event */
+               set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
+               dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
+                        rst_type);
+               return;
+       }
        set_bit(rst_type, &hdev->default_reset_request);
 }
 
                 */
                if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
                        /* prepare for full reset of stack + pcie interface */
-                       set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+                       hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
 
                        /* "defer" schedule the reset task again */
                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                } else {
                        hdev->reset_attempts++;
 
-                       set_bit(hdev->reset_level, &hdev->reset_pending);
+                       hclgevf_set_reset_pending(hdev, hdev->reset_level);
                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                }
                hclgevf_reset_task_schedule(hdev);
                rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
                dev_info(&hdev->pdev->dev,
                         "receive reset interrupt 0x%x!\n", rst_ing_reg);
-               set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+               hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
                set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
                set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
                *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);