]> www.infradead.org Git - users/hch/misc.git/commitdiff
net: mana: Remove redundant netdev_lock_ops_to_full() calls
authorSaurabh Sengar <ssengar@linux.microsoft.com>
Tue, 9 Sep 2025 04:57:10 +0000 (21:57 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 11 Sep 2025 00:57:09 +0000 (17:57 -0700)
NET_SHAPER is always selected for MANA driver. When NET_SHAPER is enabled,
netdev_lock_ops_to_full() reduces effectively to only an assert for lock,
which is always held in the path when NET_SHAPER is enabled.

Remove the redundant netdev_lock_ops_to_full() call.

Signed-off-by: Saurabh Sengar <ssengar@linux.microsoft.com>
Link: https://patch.msgid.link/1757393830-20837-1-git-send-email-ssengar@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/microsoft/mana/mana_en.c

index f4fc86f20213d241e4b9eb751a20b611e023705b..0142fd98392c2ad2d92b13ae3c92ded09e9cbc90 100644 (file)
@@ -2145,10 +2145,8 @@ static void mana_destroy_txq(struct mana_port_context *apc)
                napi = &apc->tx_qp[i].tx_cq.napi;
                if (apc->tx_qp[i].txq.napi_initialized) {
                        napi_synchronize(napi);
-                       netdev_lock_ops_to_full(napi->dev);
                        napi_disable_locked(napi);
                        netif_napi_del_locked(napi);
-                       netdev_unlock_full_to_ops(napi->dev);
                        apc->tx_qp[i].txq.napi_initialized = false;
                }
                mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
@@ -2301,10 +2299,8 @@ static int mana_create_txq(struct mana_port_context *apc,
                mana_create_txq_debugfs(apc, i);
 
                set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
-               netdev_lock_ops_to_full(net);
                netif_napi_add_locked(net, &cq->napi, mana_poll);
                napi_enable_locked(&cq->napi);
-               netdev_unlock_full_to_ops(net);
                txq->napi_initialized = true;
 
                mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -2340,10 +2336,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
        if (napi_initialized) {
                napi_synchronize(napi);
 
-               netdev_lock_ops_to_full(napi->dev);
                napi_disable_locked(napi);
                netif_napi_del_locked(napi);
-               netdev_unlock_full_to_ops(napi->dev);
        }
        xdp_rxq_info_unreg(&rxq->xdp_rxq);
 
@@ -2604,18 +2598,14 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
 
        gc->cq_table[cq->gdma_id] = cq->gdma_cq;
 
-       netdev_lock_ops_to_full(ndev);
        netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
-       netdev_unlock_full_to_ops(ndev);
 
        WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
                                 cq->napi.napi_id));
        WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
                                           rxq->page_pool));
 
-       netdev_lock_ops_to_full(ndev);
        napi_enable_locked(&cq->napi);
-       netdev_unlock_full_to_ops(ndev);
 
        mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
 out: