]> www.infradead.org Git - users/hch/misc.git/commitdiff
idpf: link NAPIs to queues
authorAlexander Lobakin <aleksander.lobakin@intel.com>
Tue, 26 Aug 2025 15:54:58 +0000 (17:54 +0200)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Mon, 8 Sep 2025 18:05:17 +0000 (11:05 -0700)
Add the missing linking of NAPIs to netdev queues when enabling
interrupt vectors in order to support NAPI configuration and
interfaces requiring get_rx_queue()->napi to be set (like XSk
busy polling).

As currently, idpf_vport_{start,stop}() is called from several flows
with inconsistent RTNL locking, we need to synchronize them to avoid
runtime assertions. Notably:

* idpf_{open,stop}() -- regular NDOs, RTNL is always taken;
* idpf_initiate_soft_reset() -- usually called under RTNL;
* idpf_init_task -- called from the init work, needs RTNL;
* idpf_vport_dealloc -- called without RTNL taken, needs it.

Expand common idpf_vport_{start,stop}() to take an additional bool
telling whether we need to manually take the RTNL lock.

Suggested-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> # helper
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Tested-by: Ramu R <ramu.r@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/idpf/idpf_lib.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c

index e327950c93d8e54cf8f424c4e246fdfdd99b0e8a..f4b89d222610fb2b513036417d1231413a974b32 100644 (file)
@@ -884,14 +884,18 @@ static void idpf_remove_features(struct idpf_vport *vport)
 /**
  * idpf_vport_stop - Disable a vport
  * @vport: vport to disable
+ * @rtnl: whether to take RTNL lock
  */
-static void idpf_vport_stop(struct idpf_vport *vport)
+static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
 {
        struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
 
        if (np->state <= __IDPF_VPORT_DOWN)
                return;
 
+       if (rtnl)
+               rtnl_lock();
+
        netif_carrier_off(vport->netdev);
        netif_tx_disable(vport->netdev);
 
@@ -913,6 +917,9 @@ static void idpf_vport_stop(struct idpf_vport *vport)
        idpf_vport_queues_rel(vport);
        idpf_vport_intr_rel(vport);
        np->state = __IDPF_VPORT_DOWN;
+
+       if (rtnl)
+               rtnl_unlock();
 }
 
 /**
@@ -936,7 +943,7 @@ static int idpf_stop(struct net_device *netdev)
        idpf_vport_ctrl_lock(netdev);
        vport = idpf_netdev_to_vport(netdev);
 
-       idpf_vport_stop(vport);
+       idpf_vport_stop(vport, false);
 
        idpf_vport_ctrl_unlock(netdev);
 
@@ -1029,7 +1036,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
        idpf_idc_deinit_vport_aux_device(vport->vdev_info);
 
        idpf_deinit_mac_addr(vport);
-       idpf_vport_stop(vport);
+       idpf_vport_stop(vport, true);
 
        if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
                idpf_decfg_netdev(vport);
@@ -1370,8 +1377,9 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
 /**
  * idpf_vport_open - Bring up a vport
  * @vport: vport to bring up
+ * @rtnl: whether to take RTNL lock
  */
-static int idpf_vport_open(struct idpf_vport *vport)
+static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
 {
        struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
        struct idpf_adapter *adapter = vport->adapter;
@@ -1381,6 +1389,9 @@ static int idpf_vport_open(struct idpf_vport *vport)
        if (np->state != __IDPF_VPORT_DOWN)
                return -EBUSY;
 
+       if (rtnl)
+               rtnl_lock();
+
        /* we do not allow interface up just yet */
        netif_carrier_off(vport->netdev);
 
@@ -1388,7 +1399,7 @@ static int idpf_vport_open(struct idpf_vport *vport)
        if (err) {
                dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
                        vport->vport_id, err);
-               return err;
+               goto err_rtnl_unlock;
        }
 
        err = idpf_vport_queues_alloc(vport);
@@ -1475,6 +1486,9 @@ static int idpf_vport_open(struct idpf_vport *vport)
                goto deinit_rss;
        }
 
+       if (rtnl)
+               rtnl_unlock();
+
        return 0;
 
 deinit_rss:
@@ -1492,6 +1506,10 @@ queues_rel:
 intr_rel:
        idpf_vport_intr_rel(vport);
 
+err_rtnl_unlock:
+       if (rtnl)
+               rtnl_unlock();
+
        return err;
 }
 
@@ -1572,7 +1590,7 @@ void idpf_init_task(struct work_struct *work)
        np = netdev_priv(vport->netdev);
        np->state = __IDPF_VPORT_DOWN;
        if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
-               idpf_vport_open(vport);
+               idpf_vport_open(vport, true);
 
        /* Spawn and return 'idpf_init_task' work queue until all the
         * default vports are created
@@ -1962,7 +1980,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
                idpf_send_delete_queues_msg(vport);
        } else {
                set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
-               idpf_vport_stop(vport);
+               idpf_vport_stop(vport, false);
        }
 
        idpf_deinit_rss(vport);
@@ -1992,7 +2010,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
                goto err_open;
 
        if (current_state == __IDPF_VPORT_UP)
-               err = idpf_vport_open(vport);
+               err = idpf_vport_open(vport, false);
 
        goto free_vport;
 
@@ -2002,7 +2020,7 @@ err_reset:
 
 err_open:
        if (current_state == __IDPF_VPORT_UP)
-               idpf_vport_open(vport);
+               idpf_vport_open(vport, false);
 
 free_vport:
        kfree(new_vport);
@@ -2240,7 +2258,7 @@ static int idpf_open(struct net_device *netdev)
        if (err)
                goto unlock;
 
-       err = idpf_vport_open(vport);
+       err = idpf_vport_open(vport, false);
 
 unlock:
        idpf_vport_ctrl_unlock(netdev);
index 53fb5cf496cc2eb8011769e4ebe1b74f10757c33..563de9a3291962d228beeb2a1bf6bba582b1654b 100644 (file)
@@ -3424,6 +3424,20 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
        vport->q_vectors = NULL;
 }
 
+static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
+{
+       struct napi_struct *napi = link ? &q_vector->napi : NULL;
+       struct net_device *dev = q_vector->vport->netdev;
+
+       for (u32 i = 0; i < q_vector->num_rxq; i++)
+               netif_queue_set_napi(dev, q_vector->rx[i]->idx,
+                                    NETDEV_QUEUE_TYPE_RX, napi);
+
+       for (u32 i = 0; i < q_vector->num_txq; i++)
+               netif_queue_set_napi(dev, q_vector->tx[i]->idx,
+                                    NETDEV_QUEUE_TYPE_TX, napi);
+}
+
 /**
  * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
  * @vport: main vport structure
@@ -3444,6 +3458,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
                vidx = vport->q_vector_idxs[vector];
                irq_num = adapter->msix_entries[vidx].vector;
 
+               idpf_q_vector_set_napi(q_vector, false);
                kfree(free_irq(irq_num, q_vector));
        }
 }
@@ -3631,6 +3646,8 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
                                   "Request_irq failed, error: %d\n", err);
                        goto free_q_irqs;
                }
+
+               idpf_q_vector_set_napi(q_vector, true);
        }
 
        return 0;