clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
 }
 
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+       u32 rst_src_reg;
+
+       /* fetch the events from their corresponding regs */
+       rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+
+       /* check for vector0 reset event sources */
+       if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+               set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+               *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+               return HCLGE_VECTOR0_EVENT_RST;
+       }
+
+       if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+               set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+               *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+               return HCLGE_VECTOR0_EVENT_RST;
+       }
+
+       if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+               set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+               *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+               return HCLGE_VECTOR0_EVENT_RST;
+       }
+
+       /* mailbox event sharing vector 0 interrupt would be placed here */
+
+       return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+                                   u32 regclr)
+{
+       if (event_type == HCLGE_VECTOR0_EVENT_RST)
+               hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+
+       /* mailbox event sharing vector 0 interrupt would be placed here */
+}
+
 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
 {
        writel(enable ? 1 : 0, vector->addr);
 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
 {
        struct hclge_dev *hdev = data;
+       u32 event_cause;
+       u32 clearval;
 
        hclge_enable_vector(&hdev->misc_vector, false);
-       if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
-               schedule_work(&hdev->service_task);
+       event_cause = hclge_check_event_cause(hdev, &clearval);
+
+       /* vector 0 interrupt is shared with reset and mailbox source events.
+        * For now, we are not handling mailbox events.
+        */
+       switch (event_cause) {
+       case HCLGE_VECTOR0_EVENT_RST:
+               /* reset task to be scheduled here */
+               break;
+       default:
+               dev_dbg(&hdev->pdev->dev,
+                       "received unknown or unhandled event of vector0\n");
+               break;
+       }
+
+       /* we should clear the source of interrupt */
+       hclge_clear_event_cause(hdev, event_cause, clearval);
+       hclge_enable_vector(&hdev->misc_vector, true);
 
        return IRQ_HANDLED;
 }
 
        hclge_get_misc_vector(hdev);
 
-       ret = devm_request_irq(&hdev->pdev->dev,
-                              hdev->misc_vector.vector_irq,
-                              hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+       /* this would be explicitly freed in the end */
+       ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+                         0, "hclge_misc", hdev);
        if (ret) {
                hclge_free_vector(hdev, 0);
                dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
        return ret;
 }
 
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+       free_irq(hdev->misc_vector.vector_irq, hdev);
+       hclge_free_vector(hdev, 0);
+}
+
 static int hclge_notify_client(struct hclge_dev *hdev,
                               enum hnae3_reset_notify_type type)
 {
                cnt++;
        }
 
-       /* must clear reset status register to
-        * prevent driver detect reset interrupt again
-        */
-       reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
-       hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
-
        if (cnt >= HCLGE_RESET_WAIT_CNT) {
                dev_warn(&hdev->pdev->dev,
                         "Wait for reset timeout: %d\n", hdev->reset_type);
        }
 }
 
-static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
-{
-       enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
-       u32 rst_reg_val;
-
-       rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
-       if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
-               rst_level = HNAE3_GLOBAL_RESET;
-       else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
-               rst_level = HNAE3_CORE_RESET;
-       else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
-               rst_level = HNAE3_IMP_RESET;
-
-       return rst_level;
-}
-
 static void hclge_reset_event(struct hnae3_handle *handle,
                              enum hnae3_reset_type reset)
 {
 
        do_reset = hdev->reset_type != HNAE3_NONE_RESET;
 
-       /* Reset is detected by interrupt */
-       if (hdev->reset_type == HNAE3_NONE_RESET)
-               hdev->reset_type = hclge_detected_reset_event(hdev);
 
        if (hdev->reset_type == HNAE3_NONE_RESET)
                return;
 static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
 {
        hclge_reset_subtask(hdev);
-       hclge_enable_vector(&hdev->misc_vector, true);
 }
 
 static void hclge_service_task(struct work_struct *work)
        hdev->pdev = pdev;
        hdev->ae_dev = ae_dev;
        hdev->reset_type = HNAE3_NONE_RESET;
+       hdev->reset_pending = 0;
        ae_dev->priv = hdev;
 
        ret = hclge_pci_init(hdev);
 
        /* Disable MISC vector(vector0) */
        hclge_enable_vector(&hdev->misc_vector, false);
-       hclge_free_vector(hdev, 0);
        hclge_destroy_cmd_queue(&hdev->hw);
+       hclge_misc_irq_uninit(hdev);
        hclge_pci_uninit(hdev);
        ae_dev->priv = NULL;
 }