]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
crypto: hisilicon/qm - fix EQ/AEQ interrupt issue
authorLongfang Liu <liulongfang@huawei.com>
Fri, 13 Oct 2023 03:49:57 +0000 (11:49 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 20 Oct 2023 05:39:26 +0000 (13:39 +0800)
During hisilicon accelerator live migration operation. In order to
prevent the problem of EQ/AEQ interrupt loss. Migration driver will
trigger an EQ/AEQ doorbell at the end of the migration.

This operation may cause double interruption of EQ/AEQ events.
To ensure that the EQ/AEQ interrupt processing function is normal.
The interrupt handling functionality of EQ/AEQ needs to be updated.
Used to handle repeated interrupts event.

Fixes: b0eed085903e ("hisi_acc_vfio_pci: Add support for VFIO live migration")
Signed-off-by: Longfang Liu <liulongfang@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/hisilicon/qm.c
include/linux/hisi_acc_qm.h

index a1d0473f1931b5a0a7244855f56faf07c1d12764..6ee24313d851257b88fbd032d921f2454f18d5b8 100644 (file)
@@ -894,47 +894,15 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
        qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
 }
 
-static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
-{
-       struct hisi_qm *qm = poll_data->qm;
-       struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
-       u16 eq_depth = qm->eq_depth;
-       int eqe_num = 0;
-       u16 cqn;
-
-       while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
-               cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
-               poll_data->qp_finish_id[eqe_num] = cqn;
-               eqe_num++;
-
-               if (qm->status.eq_head == eq_depth - 1) {
-                       qm->status.eqc_phase = !qm->status.eqc_phase;
-                       eqe = qm->eqe;
-                       qm->status.eq_head = 0;
-               } else {
-                       eqe++;
-                       qm->status.eq_head++;
-               }
-
-               if (eqe_num == (eq_depth >> 1) - 1)
-                       break;
-       }
-
-       qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
-
-       return eqe_num;
-}
-
 static void qm_work_process(struct work_struct *work)
 {
        struct hisi_qm_poll_data *poll_data =
                container_of(work, struct hisi_qm_poll_data, work);
        struct hisi_qm *qm = poll_data->qm;
+       u16 eqe_num = poll_data->eqe_num;
        struct hisi_qp *qp;
-       int eqe_num, i;
+       int i;
 
-       /* Get qp id of completed tasks and re-enable the interrupt. */
-       eqe_num = qm_get_complete_eqe_num(poll_data);
        for (i = eqe_num - 1; i >= 0; i--) {
                qp = &qm->qp_array[poll_data->qp_finish_id[i]];
                if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
@@ -950,39 +918,55 @@ static void qm_work_process(struct work_struct *work)
        }
 }
 
-static bool do_qm_eq_irq(struct hisi_qm *qm)
+static void qm_get_complete_eqe_num(struct hisi_qm *qm)
 {
        struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
-       struct hisi_qm_poll_data *poll_data;
-       u16 cqn;
+       struct hisi_qm_poll_data *poll_data = NULL;
+       u16 eq_depth = qm->eq_depth;
+       u16 cqn, eqe_num = 0;
 
-       if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
-               return false;
+       if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
+               atomic64_inc(&qm->debug.dfx.err_irq_cnt);
+               qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+               return;
+       }
 
-       if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
+       cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+       if (unlikely(cqn >= qm->qp_num))
+               return;
+       poll_data = &qm->poll_data[cqn];
+
+       while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
                cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
-               poll_data = &qm->poll_data[cqn];
-               queue_work(qm->wq, &poll_data->work);
+               poll_data->qp_finish_id[eqe_num] = cqn;
+               eqe_num++;
+
+               if (qm->status.eq_head == eq_depth - 1) {
+                       qm->status.eqc_phase = !qm->status.eqc_phase;
+                       eqe = qm->eqe;
+                       qm->status.eq_head = 0;
+               } else {
+                       eqe++;
+                       qm->status.eq_head++;
+               }
 
-               return true;
+               if (eqe_num == (eq_depth >> 1) - 1)
+                       break;
        }
 
-       return false;
+       poll_data->eqe_num = eqe_num;
+       queue_work(qm->wq, &poll_data->work);
+       qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
 }
 
 static irqreturn_t qm_eq_irq(int irq, void *data)
 {
        struct hisi_qm *qm = data;
-       bool ret;
-
-       ret = do_qm_eq_irq(qm);
-       if (ret)
-               return IRQ_HANDLED;
 
-       atomic64_inc(&qm->debug.dfx.err_irq_cnt);
-       qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
+       /* Get qp id of completed tasks and re-enable the interrupt */
+       qm_get_complete_eqe_num(qm);
 
-       return IRQ_NONE;
+       return IRQ_HANDLED;
 }
 
 static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
@@ -1064,6 +1048,8 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
        u16 aeq_depth = qm->aeq_depth;
        u32 type, qp_id;
 
+       atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
+
        while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
                type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) &
                        QM_AEQE_TYPE_MASK;
@@ -1102,17 +1088,6 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t qm_aeq_irq(int irq, void *data)
-{
-       struct hisi_qm *qm = data;
-
-       atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
-       if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
-               return IRQ_NONE;
-
-       return IRQ_WAKE_THREAD;
-}
-
 static void qm_init_qp_status(struct hisi_qp *qp)
 {
        struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -4923,8 +4898,8 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
                return 0;
 
        irq_vector = val & QM_IRQ_VECTOR_MASK;
-       ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
-                                                  qm_aeq_thread, 0, qm->dev_name, qm);
+       ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL,
+                                                  qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm);
        if (ret)
                dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
 
index 44e0c44a2e20428b7b41e057d6e226a5f1275d65..ddc7ebb705234c321d2cd7af0047f8a9c32c38ba 100644 (file)
@@ -276,6 +276,7 @@ struct hisi_qm_poll_data {
        struct hisi_qm *qm;
        struct work_struct work;
        u16 *qp_finish_id;
+       u16 eqe_num;
 };
 
 /**