]> www.infradead.org Git - users/hch/misc.git/commitdiff
bng_en: Add initial support for CP and NQ rings
authorBhargava Marreddy <bhargava.marreddy@broadcom.com>
Fri, 19 Sep 2025 17:47:34 +0000 (23:17 +0530)
committerJakub Kicinski <kuba@kernel.org>
Tue, 23 Sep 2025 00:51:27 +0000 (17:51 -0700)
Allocate CP and NQ related data structures and add support to
associate NQ and CQ rings. Also, add the association of NQ, NAPI,
and interrupts.

Signed-off-by: Bhargava Marreddy <bhargava.marreddy@broadcom.com>
Reviewed-by: Vikas Gupta <vikas.gupta@broadcom.com>
Reviewed-by: Rajashekar Hudumula <rajashekar.hudumula@broadcom.com>
Link: https://patch.msgid.link/20250919174742.24969-4-bhargava.marreddy@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnge/bnge.h
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
drivers/net/ethernet/broadcom/bnge/bnge_resc.c

index 03e55b931f7b82e4fa0374d9ab5dfad11a9cedf0..c536c0cc66ef408a535a1bcfe451738bae69f794 100644 (file)
@@ -215,5 +215,6 @@ static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
 }
 
 bool bnge_aux_registered(struct bnge_dev *bd);
+u16 bnge_aux_get_msix(struct bnge_dev *bd);
 
 #endif /* _BNGE_H_ */
index c25a793b8aeebaffc8a29bcdd084666f9d13891d..67da7001427abb8d96497cc213d22f214c46ccb6 100644 (file)
 #define BNGE_RING_TO_TC(bd, tx)                \
        ((tx) / (bd)->tx_nr_rings_per_tc)
 
+#define BNGE_TC_TO_RING_BASE(bd, tc)   \
+       ((tc) * (bd)->tx_nr_rings_per_tc)
+
+static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
+{
+       struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+       kfree(nqr->desc_ring);
+       nqr->desc_ring = NULL;
+       ring->ring_mem.pg_arr = NULL;
+       kfree(nqr->desc_mapping);
+       nqr->desc_mapping = NULL;
+       ring->ring_mem.dma_arr = NULL;
+}
+
+static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
+{
+       struct bnge_ring_struct *ring = &cpr->ring_struct;
+
+       kfree(cpr->desc_ring);
+       cpr->desc_ring = NULL;
+       ring->ring_mem.pg_arr = NULL;
+       kfree(cpr->desc_mapping);
+       cpr->desc_mapping = NULL;
+       ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
+{
+       nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
+       if (!nqr->desc_ring)
+               return -ENOMEM;
+
+       nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
+       if (!nqr->desc_mapping)
+               goto err_free_desc_ring;
+       return 0;
+
+err_free_desc_ring:
+       kfree(nqr->desc_ring);
+       nqr->desc_ring = NULL;
+       return -ENOMEM;
+}
+
+static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
+{
+       cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
+       if (!cpr->desc_ring)
+               return -ENOMEM;
+
+       cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
+       if (!cpr->desc_mapping)
+               goto err_free_desc_ring;
+       return 0;
+
+err_free_desc_ring:
+       kfree(cpr->desc_ring);
+       cpr->desc_ring = NULL;
+       return -ENOMEM;
+}
+
+static void bnge_free_nq_arrays(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       int i;
+
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               struct bnge_napi *bnapi = bn->bnapi[i];
+
+               bnge_free_nq_desc_arr(&bnapi->nq_ring);
+       }
+}
+
+static int bnge_alloc_nq_arrays(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       int i, rc;
+
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               struct bnge_napi *bnapi = bn->bnapi[i];
+
+               rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
+               if (rc)
+                       goto err_free_nq_arrays;
+       }
+       return 0;
+
+err_free_nq_arrays:
+       bnge_free_nq_arrays(bn);
+       return rc;
+}
+
+static void bnge_free_nq_tree(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       int i;
+
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               struct bnge_napi *bnapi = bn->bnapi[i];
+               struct bnge_nq_ring_info *nqr;
+               struct bnge_ring_struct *ring;
+               int j;
+
+               nqr = &bnapi->nq_ring;
+               ring = &nqr->ring_struct;
+
+               bnge_free_ring(bd, &ring->ring_mem);
+
+               if (!nqr->cp_ring_arr)
+                       continue;
+
+               for (j = 0; j < nqr->cp_ring_count; j++) {
+                       struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+                       ring = &cpr->ring_struct;
+                       bnge_free_ring(bd, &ring->ring_mem);
+                       bnge_free_cp_desc_arr(cpr);
+               }
+               kfree(nqr->cp_ring_arr);
+               nqr->cp_ring_arr = NULL;
+               nqr->cp_ring_count = 0;
+       }
+}
+
+static int alloc_one_cp_ring(struct bnge_net *bn,
+                            struct bnge_cp_ring_info *cpr)
+{
+       struct bnge_ring_mem_info *rmem;
+       struct bnge_ring_struct *ring;
+       struct bnge_dev *bd = bn->bd;
+       int rc;
+
+       rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
+       if (rc)
+               return -ENOMEM;
+       ring = &cpr->ring_struct;
+       rmem = &ring->ring_mem;
+       rmem->nr_pages = bn->cp_nr_pages;
+       rmem->page_size = HW_CMPD_RING_SIZE;
+       rmem->pg_arr = (void **)cpr->desc_ring;
+       rmem->dma_arr = cpr->desc_mapping;
+       rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
+       rc = bnge_alloc_ring(bd, rmem);
+       if (rc)
+               goto err_free_cp_desc_arr;
+       return rc;
+
+err_free_cp_desc_arr:
+       bnge_free_cp_desc_arr(cpr);
+       return rc;
+}
+
+static int bnge_alloc_nq_tree(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       int i, j, ulp_msix, rc;
+       int tcs = 1;
+
+       ulp_msix = bnge_aux_get_msix(bd);
+       for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
+               bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
+               struct bnge_napi *bnapi = bn->bnapi[i];
+               struct bnge_nq_ring_info *nqr;
+               struct bnge_cp_ring_info *cpr;
+               struct bnge_ring_struct *ring;
+               int cp_count = 0, k;
+               int rx = 0, tx = 0;
+
+               nqr = &bnapi->nq_ring;
+               nqr->bnapi = bnapi;
+               ring = &nqr->ring_struct;
+
+               rc = bnge_alloc_ring(bd, &ring->ring_mem);
+               if (rc)
+                       goto err_free_nq_tree;
+
+               ring->map_idx = ulp_msix + i;
+
+               if (i < bd->rx_nr_rings) {
+                       cp_count++;
+                       rx = 1;
+               }
+
+               if ((sh && i < bd->tx_nr_rings) ||
+                   (!sh && i >= bd->rx_nr_rings)) {
+                       cp_count += tcs;
+                       tx = 1;
+               }
+
+               nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+                                          GFP_KERNEL);
+               if (!nqr->cp_ring_arr) {
+                       rc = -ENOMEM;
+                       goto err_free_nq_tree;
+               }
+
+               nqr->cp_ring_count = cp_count;
+
+               for (k = 0; k < cp_count; k++) {
+                       cpr = &nqr->cp_ring_arr[k];
+                       rc = alloc_one_cp_ring(bn, cpr);
+                       if (rc)
+                               goto err_free_nq_tree;
+
+                       cpr->bnapi = bnapi;
+                       cpr->cp_idx = k;
+                       if (!k && rx) {
+                               bn->rx_ring[i].rx_cpr = cpr;
+                               cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
+                       } else {
+                               int n, tc = k - rx;
+
+                               n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
+                               bn->tx_ring[n].tx_cpr = cpr;
+                               cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
+                       }
+               }
+               if (tx)
+                       j++;
+       }
+       return 0;
+
+err_free_nq_tree:
+       bnge_free_nq_tree(bn);
+       return rc;
+}
+
 static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
 {
        return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
@@ -216,6 +443,8 @@ static void bnge_free_core(struct bnge_net *bn)
 {
        bnge_free_tx_rings(bn);
        bnge_free_rx_rings(bn);
+       bnge_free_nq_tree(bn);
+       bnge_free_nq_arrays(bn);
        kfree(bn->tx_ring_map);
        bn->tx_ring_map = NULL;
        kfree(bn->tx_ring);
@@ -302,6 +531,10 @@ static int bnge_alloc_core(struct bnge_net *bn)
                txr->bnapi = bnapi2;
        }
 
+       rc = bnge_alloc_nq_arrays(bn);
+       if (rc)
+               goto err_free_core;
+
        bnge_init_ring_struct(bn);
 
        rc = bnge_alloc_rx_rings(bn);
@@ -309,6 +542,10 @@ static int bnge_alloc_core(struct bnge_net *bn)
                goto err_free_core;
 
        rc = bnge_alloc_tx_rings(bn);
+       if (rc)
+               goto err_free_core;
+
+       rc = bnge_alloc_nq_tree(bn);
        if (rc)
                goto err_free_core;
        return 0;
@@ -318,6 +555,166 @@ err_free_core:
        return rc;
 }
 
+static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
+{
+       struct bnge_napi *bnapi = bn->bnapi[n];
+       struct bnge_nq_ring_info *nqr;
+
+       nqr = &bnapi->nq_ring;
+
+       return nqr->ring_struct.map_idx;
+}
+
+static irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+       /* NAPI scheduling to be added in a future patch */
+       return IRQ_HANDLED;
+}
+
+static void bnge_setup_msix(struct bnge_net *bn)
+{
+       struct net_device *dev = bn->netdev;
+       struct bnge_dev *bd = bn->bd;
+       int len, i;
+
+       len = sizeof(bd->irq_tbl[0].name);
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               int map_idx = bnge_cp_num_to_irq_num(bn, i);
+               char *attr;
+
+               if (bd->flags & BNGE_EN_SHARED_CHNL)
+                       attr = "TxRx";
+               else if (i < bd->rx_nr_rings)
+                       attr = "rx";
+               else
+                       attr = "tx";
+
+               snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+                        attr, i);
+               bd->irq_tbl[map_idx].handler = bnge_msix;
+       }
+}
+
+static int bnge_setup_interrupts(struct bnge_net *bn)
+{
+       struct net_device *dev = bn->netdev;
+       struct bnge_dev *bd = bn->bd;
+
+       bnge_setup_msix(bn);
+
+       return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
+}
+
+static void bnge_free_irq(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       struct bnge_irq *irq;
+       int i;
+
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+               irq = &bd->irq_tbl[map_idx];
+               if (irq->requested) {
+                       if (irq->have_cpumask) {
+                               irq_set_affinity_hint(irq->vector, NULL);
+                               free_cpumask_var(irq->cpu_mask);
+                               irq->have_cpumask = 0;
+                       }
+                       free_irq(irq->vector, bn->bnapi[i]);
+               }
+
+               irq->requested = 0;
+       }
+}
+
+static int bnge_request_irq(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       int i, rc;
+
+       rc = bnge_setup_interrupts(bn);
+       if (rc) {
+               netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
+               return rc;
+       }
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               int map_idx = bnge_cp_num_to_irq_num(bn, i);
+               struct bnge_irq *irq = &bd->irq_tbl[map_idx];
+
+               rc = request_irq(irq->vector, irq->handler, 0, irq->name,
+                                bn->bnapi[i]);
+               if (rc)
+                       goto err_free_irq;
+
+               netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
+               irq->requested = 1;
+
+               if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+                       int numa_node = dev_to_node(&bd->pdev->dev);
+
+                       irq->have_cpumask = 1;
+                       cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                                       irq->cpu_mask);
+                       rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+                       if (rc) {
+                               netdev_warn(bn->netdev,
+                                           "Set affinity failed, IRQ = %d\n",
+                                           irq->vector);
+                               goto err_free_irq;
+                       }
+               }
+       }
+       return 0;
+
+err_free_irq:
+       bnge_free_irq(bn);
+       return rc;
+}
+
+static int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+       int work_done = 0;
+
+       /* defer NAPI implementation to next patch series */
+       napi_complete_done(napi, work_done);
+
+       return work_done;
+}
+
+static void bnge_init_napi(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       struct bnge_napi *bnapi;
+       int i;
+
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               bnapi = bn->bnapi[i];
+               netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
+                                            bnge_napi_poll, bnapi->index);
+       }
+}
+
+static void bnge_del_napi(struct bnge_net *bn)
+{
+       struct bnge_dev *bd = bn->bd;
+       int i;
+
+       for (i = 0; i < bd->rx_nr_rings; i++)
+               netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+       for (i = 0; i < bd->tx_nr_rings; i++)
+               netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               struct bnge_napi *bnapi = bn->bnapi[i];
+
+               __netif_napi_del_locked(&bnapi->napi);
+       }
+
+       /* Wait for RCU grace period after removing NAPI instances */
+       synchronize_net();
+}
+
 static int bnge_open_core(struct bnge_net *bn)
 {
        struct bnge_dev *bd = bn->bd;
@@ -337,8 +734,20 @@ static int bnge_open_core(struct bnge_net *bn)
                return rc;
        }
 
+       bnge_init_napi(bn);
+       rc = bnge_request_irq(bn);
+       if (rc) {
+               netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
+               goto err_del_napi;
+       }
+
        set_bit(BNGE_STATE_OPEN, &bd->state);
        return 0;
+
+err_del_napi:
+       bnge_del_napi(bn);
+       bnge_free_core(bn);
+       return rc;
 }
 
 static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -365,6 +774,9 @@ static void bnge_close_core(struct bnge_net *bn)
        struct bnge_dev *bd = bn->bd;
 
        clear_bit(BNGE_STATE_OPEN, &bd->state);
+       bnge_free_irq(bn);
+       bnge_del_napi(bn);
+
        bnge_free_core(bn);
 }
 
@@ -587,6 +999,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
        bnge_init_l2_fltr_tbl(bn);
        bnge_init_mac_addr(bd);
 
+       netdev->request_ops_lock = true;
        rc = register_netdev(netdev);
        if (rc) {
                dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
index 92bae665f59cc7360177a3047893556e69d3a9e3..bccddae09fa7037aa15aca5a5a52f90d0c2104a6 100644 (file)
@@ -133,6 +133,9 @@ enum {
 
 #define BNGE_NET_EN_TPA                (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
 
+#define BNGE_NQ_HDL_TYPE_RX    0x00
+#define BNGE_NQ_HDL_TYPE_TX    0x01
+
 struct bnge_net {
        struct bnge_dev         *bd;
        struct net_device       *netdev;
@@ -172,6 +175,8 @@ struct bnge_net {
 
        u16                             *tx_ring_map;
        enum dma_data_direction         rx_dir;
+
+       int                             total_irqs;
 };
 
 #define BNGE_DEFAULT_RX_RING_SIZE      511
@@ -223,6 +228,8 @@ struct bnge_cp_ring_info {
        dma_addr_t              *desc_mapping;
        struct tx_cmp           **desc_ring;
        struct bnge_ring_struct ring_struct;
+       u8                      cp_ring_type;
+       u8                      cp_idx;
 };
 
 struct bnge_nq_ring_info {
@@ -230,6 +237,9 @@ struct bnge_nq_ring_info {
        dma_addr_t              *desc_mapping;
        struct nqe_cn           **desc_ring;
        struct bnge_ring_struct ring_struct;
+
+       int                             cp_ring_count;
+       struct bnge_cp_ring_info        *cp_ring_arr;
 };
 
 struct bnge_rx_ring_info {
index c79a3607a1b745eb0411398ddac3e4e8b5e6d45a..5597af1b3b7cde267f48bf6cf98466da85c1ad76 100644 (file)
@@ -46,7 +46,7 @@ static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
        return min_t(int, roce_msix, num_online_cpus() + 1);
 }
 
-static u16 bnge_aux_get_msix(struct bnge_dev *bd)
+u16 bnge_aux_get_msix(struct bnge_dev *bd)
 {
        if (bnge_is_roce_en(bd))
                return bd->aux_num_msix;