clear_bit(index, ionic->intrs);
}
+static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify);
+
+ cpumask_copy(*intr->affinity_mask, mask);
+}
+
+static void ionic_irq_aff_release(struct kref __always_unused *ref)
+{
+}
+
static int ionic_qcq_enable(struct ionic_qcq *qcq)
{
struct ionic_queue *q = &qcq->q;
if (qcq->flags & IONIC_QCQ_F_INTR) {
napi_enable(&qcq->napi);
+ irq_set_affinity_notifier(qcq->intr.vector,
+ &qcq->intr.aff_notify);
irq_set_affinity_hint(qcq->intr.vector,
- &qcq->intr.affinity_mask);
+ *qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
synchronize_irq(qcq->intr.vector);
+ irq_set_affinity_notifier(qcq->intr.vector, NULL);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
+ cpumask_var_t *affinity_mask;
int err;
if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
}
/* try to get the irq on the local numa node first */
- qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
- dev_to_node(lif->ionic->dev));
- if (qcq->intr.cpu != -1)
- cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
+ affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index];
+ if (cpumask_empty(*affinity_mask)) {
+ unsigned int cpu;
+
+ cpu = cpumask_local_spread(qcq->intr.index,
+ dev_to_node(lif->ionic->dev));
+ if (cpu != -1)
+ cpumask_set_cpu(cpu, *affinity_mask);
+ }
+
+ qcq->intr.affinity_mask = affinity_mask;
+ qcq->intr.aff_notify.notify = ionic_irq_aff_notify;
+ qcq->intr.aff_notify.release = ionic_irq_aff_release;
netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
return 0;
return err;
}
+static int ionic_affinity_masks_alloc(struct ionic *ionic)
+{
+ cpumask_var_t *affinity_masks;
+ int nintrs = ionic->nintrs;
+ int i;
+
+ affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!affinity_masks)
+ return -ENOMEM;
+
+ for (i = 0; i < nintrs; i++) {
+ if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL,
+ dev_to_node(ionic->dev)))
+ goto err_out;
+ }
+
+ ionic->affinity_masks = affinity_masks;
+
+ return 0;
+
+err_out:
+ for (--i; i >= 0; i--)
+ free_cpumask_var(affinity_masks[i]);
+ kfree(affinity_masks);
+
+ return -ENOMEM;
+}
+
+static void ionic_affinity_masks_free(struct ionic *ionic)
+{
+ int i;
+
+ for (i = 0; i < ionic->nintrs; i++)
+ free_cpumask_var(ionic->affinity_masks[i]);
+ kfree(ionic->affinity_masks);
+ ionic->affinity_masks = NULL;
+}
+
int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
ionic_debugfs_add_lif(lif);
+ err = ionic_affinity_masks_alloc(ionic);
+ if (err)
+ goto err_out_free_lif_info;
+
/* allocate control queues and txrx queue arrays */
ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
- goto err_out_free_lif_info;
+ goto err_out_free_affinity_masks;
/* allocate rss indirection table */
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
err_out_free_qcqs:
ionic_qcqs_free(lif);
+err_out_free_affinity_masks:
+ ionic_affinity_masks_free(lif->ionic);
err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
ionic_lif_reset(lif);
+ ionic_affinity_masks_free(lif->ionic);
+
/* free lif info */
kfree(lif->identity);
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
- &qcq->intr.affinity_mask);
+ *qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}