]> www.infradead.org Git - nvme.git/commitdiff
ionic: Keep interrupt affinity up to date
authorBrett Creeley <brett.creeley@amd.com>
Wed, 19 Jun 2024 00:32:51 +0000 (17:32 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 20 Jun 2024 01:31:33 +0000 (18:31 -0700)
Currently the driver either sets the initial interrupt affinity for its
adminq and tx/rx queues on probe or resets it on various
down/up/reconfigure flows. If any user and/or user process
(i.e. irqbalance) changes IRQ affinity for any of the driver's interrupts
that will be reset to driver defaults whenever any down/up/reconfigure
operation happens. This is incorrect and is fixed by making 2 changes:

1. Allocate an array of cpumasks that's only allocated on probe and
   destroyed on remove.
2. Update the cpumask(s) for interrupts that are in use by registering
   for affinity notifiers.

Signed-off-by: Brett Creeley <brett.creeley@amd.com>
Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
Link: https://lore.kernel.org/r/20240619003257.6138-3-shannon.nelson@amd.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/pensando/ionic/ionic.h
drivers/net/ethernet/pensando/ionic/ionic_dev.h
drivers/net/ethernet/pensando/ionic/ionic_lif.c

index 2ccc2c2a06e313ae4d6e073e6a239e35d5f39b41..438172cfb1703d9a2e6420dfb2adf26574a03a6f 100644 (file)
@@ -54,6 +54,7 @@ struct ionic {
        unsigned int nrxqs_per_lif;
        unsigned int nintrs;
        DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
+       cpumask_var_t *affinity_masks;
        struct work_struct nb_work;
        struct notifier_block nb;
        struct rw_semaphore vf_op_lock; /* lock for VF operations */
index f30eee4a5a80e4bbd15a844da71b06660d4b6b05..7dbd3b8b0e36d73b13922563c485272d640b5667 100644 (file)
@@ -280,9 +280,9 @@ struct ionic_intr_info {
        u64 rearm_count;
        unsigned int index;
        unsigned int vector;
-       unsigned int cpu;
        u32 dim_coal_hw;
-       cpumask_t affinity_mask;
+       cpumask_var_t *affinity_mask;
+       struct irq_affinity_notify aff_notify;
 };
 
 struct ionic_cq {
index 1f02b32755fc7fa5357556bebf14f780c2d8f43b..46cb143b5941eca25959d8296270259c017cf0fa 100644 (file)
@@ -265,6 +265,18 @@ static void ionic_intr_free(struct ionic *ionic, int index)
                clear_bit(index, ionic->intrs);
 }
 
+static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
+                                const cpumask_t *mask)
+{
+       struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify);
+
+       cpumask_copy(*intr->affinity_mask, mask);
+}
+
+static void ionic_irq_aff_release(struct kref __always_unused *ref)
+{
+}
+
 static int ionic_qcq_enable(struct ionic_qcq *qcq)
 {
        struct ionic_queue *q = &qcq->q;
@@ -299,8 +311,10 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
 
        if (qcq->flags & IONIC_QCQ_F_INTR) {
                napi_enable(&qcq->napi);
+               irq_set_affinity_notifier(qcq->intr.vector,
+                                         &qcq->intr.aff_notify);
                irq_set_affinity_hint(qcq->intr.vector,
-                                     &qcq->intr.affinity_mask);
+                                     *qcq->intr.affinity_mask);
                ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
                                IONIC_INTR_MASK_CLEAR);
        }
@@ -334,6 +348,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
                ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
                                IONIC_INTR_MASK_SET);
                synchronize_irq(qcq->intr.vector);
+               irq_set_affinity_notifier(qcq->intr.vector, NULL);
                irq_set_affinity_hint(qcq->intr.vector, NULL);
                napi_disable(&qcq->napi);
        }
@@ -474,6 +489,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
 
 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
 {
+       cpumask_var_t *affinity_mask;
        int err;
 
        if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
@@ -505,10 +521,19 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
        }
 
        /* try to get the irq on the local numa node first */
-       qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
-                                            dev_to_node(lif->ionic->dev));
-       if (qcq->intr.cpu != -1)
-               cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
+       affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index];
+       if (cpumask_empty(*affinity_mask)) {
+               unsigned int cpu;
+
+               cpu = cpumask_local_spread(qcq->intr.index,
+                                          dev_to_node(lif->ionic->dev));
+               if (cpu != -1)
+                       cpumask_set_cpu(cpu, *affinity_mask);
+       }
+
+       qcq->intr.affinity_mask = affinity_mask;
+       qcq->intr.aff_notify.notify = ionic_irq_aff_notify;
+       qcq->intr.aff_notify.release = ionic_irq_aff_release;
 
        netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
        return 0;
@@ -3120,6 +3145,44 @@ err_out:
        return err;
 }
 
+static int ionic_affinity_masks_alloc(struct ionic *ionic)
+{
+       cpumask_var_t *affinity_masks;
+       int nintrs = ionic->nintrs;
+       int i;
+
+       affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL);
+       if (!affinity_masks)
+               return -ENOMEM;
+
+       for (i = 0; i < nintrs; i++) {
+               if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL,
+                                            dev_to_node(ionic->dev)))
+                       goto err_out;
+       }
+
+       ionic->affinity_masks = affinity_masks;
+
+       return 0;
+
+err_out:
+       for (--i; i >= 0; i--)
+               free_cpumask_var(affinity_masks[i]);
+       kfree(affinity_masks);
+
+       return -ENOMEM;
+}
+
+static void ionic_affinity_masks_free(struct ionic *ionic)
+{
+       int i;
+
+       for (i = 0; i < ionic->nintrs; i++)
+               free_cpumask_var(ionic->affinity_masks[i]);
+       kfree(ionic->affinity_masks);
+       ionic->affinity_masks = NULL;
+}
+
 int ionic_lif_alloc(struct ionic *ionic)
 {
        struct device *dev = ionic->dev;
@@ -3211,11 +3274,15 @@ int ionic_lif_alloc(struct ionic *ionic)
 
        ionic_debugfs_add_lif(lif);
 
+       err = ionic_affinity_masks_alloc(ionic);
+       if (err)
+               goto err_out_free_lif_info;
+
        /* allocate control queues and txrx queue arrays */
        ionic_lif_queue_identify(lif);
        err = ionic_qcqs_alloc(lif);
        if (err)
-               goto err_out_free_lif_info;
+               goto err_out_free_affinity_masks;
 
        /* allocate rss indirection table */
        tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
@@ -3237,6 +3304,8 @@ int ionic_lif_alloc(struct ionic *ionic)
 
 err_out_free_qcqs:
        ionic_qcqs_free(lif);
+err_out_free_affinity_masks:
+       ionic_affinity_masks_free(lif->ionic);
 err_out_free_lif_info:
        dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
        lif->info = NULL;
@@ -3410,6 +3479,8 @@ void ionic_lif_free(struct ionic_lif *lif)
        if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
                ionic_lif_reset(lif);
 
+       ionic_affinity_masks_free(lif->ionic);
+
        /* free lif info */
        kfree(lif->identity);
        dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
@@ -3487,7 +3558,7 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
 
        if (qcq->flags & IONIC_QCQ_F_INTR) {
                irq_set_affinity_hint(qcq->intr.vector,
-                                     &qcq->intr.affinity_mask);
+                                     *qcq->intr.affinity_mask);
                ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
                                IONIC_INTR_MASK_CLEAR);
        }