]> www.infradead.org Git - users/hch/misc.git/commitdiff
eth: fbnic: fix reporting of alloc_failed qstats
authorJakub Kicinski <kuba@kernel.org>
Tue, 7 Oct 2025 23:26:50 +0000 (16:26 -0700)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 9 Oct 2025 09:10:02 +0000 (11:10 +0200)
Rx processing under normal circumstances has 3 rings - 2 buffer
rings (heads, payloads) and a completion ring. All the rings
have a struct fbnic_ring. Make sure we expose alloc_failed
counter from the buffer rings, previously only the alloc_failed
from the completion ring was reported, even tho all ring types
may increment this counter (buffer rings in __fbnic_fill_bdq()).

This makes the pp_alloc_fail.py test pass, it expects the qstat
to be incrementing as page pool injections happen.

Reviewed-by: Simon Horman <horms@kernel.org>
Fixes: 67dc4eb5fc92 ("eth: fbnic: report software Rx queue stats")
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Link: https://patch.msgid.link/20251007232653.2099376-7-kuba@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
drivers/net/ethernet/meta/fbnic/fbnic_txrx.h

index a37906b70c3aca13eb89eea7df3a57669d6989b9..95fac020eb93c7b05c06fdf194614cd2f4c159d7 100644 (file)
@@ -190,8 +190,8 @@ static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
        }
 
        for (j = 0; j < nv->rxt_count; j++, i++) {
-               fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
-               fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
+               fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0);
+               fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1);
                fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
        }
 }
index d12b4cad84a57c006750fdaa904dd9b5aba0c8fe..e95be0e7bd9e0d6f8916e28fab817977e70ff16e 100644 (file)
@@ -543,17 +543,21 @@ static const struct net_device_ops fbnic_netdev_ops = {
 static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
                                     struct netdev_queue_stats_rx *rx)
 {
+       u64 bytes, packets, alloc_fail, alloc_fail_bdq;
        struct fbnic_net *fbn = netdev_priv(dev);
        struct fbnic_ring *rxr = fbn->rx[idx];
        struct fbnic_dev *fbd = fbn->fbd;
        struct fbnic_queue_stats *stats;
-       u64 bytes, packets, alloc_fail;
        u64 csum_complete, csum_none;
+       struct fbnic_q_triad *qt;
        unsigned int start;
 
        if (!rxr)
                return;
 
+       /* fbn->rx points to completion queues */
+       qt = container_of(rxr, struct fbnic_q_triad, cmpl);
+
        stats = &rxr->stats;
        do {
                start = u64_stats_fetch_begin(&stats->syncp);
@@ -564,6 +568,20 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
                csum_none = stats->rx.csum_none;
        } while (u64_stats_fetch_retry(&stats->syncp, start));
 
+       stats = &qt->sub0.stats;
+       do {
+               start = u64_stats_fetch_begin(&stats->syncp);
+               alloc_fail_bdq = stats->bdq.alloc_failed;
+       } while (u64_stats_fetch_retry(&stats->syncp, start));
+       alloc_fail += alloc_fail_bdq;
+
+       stats = &qt->sub1.stats;
+       do {
+               start = u64_stats_fetch_begin(&stats->syncp);
+               alloc_fail_bdq = stats->bdq.alloc_failed;
+       } while (u64_stats_fetch_retry(&stats->syncp, start));
+       alloc_fail += alloc_fail_bdq;
+
        rx->bytes = bytes;
        rx->packets = packets;
        rx->alloc_fail = alloc_fail;
@@ -641,7 +659,8 @@ static void fbnic_get_base_stats(struct net_device *dev,
 
        rx->bytes = fbn->rx_stats.bytes;
        rx->packets = fbn->rx_stats.packets;
-       rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
+       rx->alloc_fail = fbn->rx_stats.rx.alloc_failed +
+               fbn->bdq_stats.bdq.alloc_failed;
        rx->csum_complete = fbn->rx_stats.rx.csum_complete;
        rx->csum_none = fbn->rx_stats.rx.csum_none;
 }
index e84e0527c3a994bcdc48e9f932d46580fea39237..b0a87c57910f261f23d24a8c122c85c153e024e4 100644 (file)
@@ -68,6 +68,7 @@ struct fbnic_net {
        /* Storage for stats after ring destruction */
        struct fbnic_queue_stats tx_stats;
        struct fbnic_queue_stats rx_stats;
+       struct fbnic_queue_stats bdq_stats;
        u64 link_down_events;
 
        /* Time stamping filter config */
index 26328e8090c6322f19b5bb1bacdd6547156fe9cc..b1e8ce89870f73c76568fdc7f46fd879aa19762a 100644 (file)
@@ -904,7 +904,7 @@ static void fbnic_fill_bdq(struct fbnic_ring *bdq)
                netmem = page_pool_dev_alloc_netmems(bdq->page_pool);
                if (!netmem) {
                        u64_stats_update_begin(&bdq->stats.syncp);
-                       bdq->stats.rx.alloc_failed++;
+                       bdq->stats.bdq.alloc_failed++;
                        u64_stats_update_end(&bdq->stats.syncp);
 
                        break;
@@ -1414,6 +1414,17 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
        BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4);
 }
 
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+                                      struct fbnic_ring *bdq)
+{
+       struct fbnic_queue_stats *stats = &bdq->stats;
+
+       /* Capture stats from queues before dissasociating them */
+       fbn->bdq_stats.bdq.alloc_failed += stats->bdq.alloc_failed;
+       /* Remember to add new stats here */
+       BUILD_BUG_ON(sizeof(fbn->rx_stats.bdq) / 8 != 1);
+}
+
 void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
                                      struct fbnic_ring *txr)
 {
@@ -1486,6 +1497,15 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
        fbn->rx[rxr->q_idx] = NULL;
 }
 
+static void fbnic_remove_bdq_ring(struct fbnic_net *fbn,
+                                 struct fbnic_ring *bdq)
+{
+       if (!(bdq->flags & FBNIC_RING_F_STATS))
+               return;
+
+       fbnic_aggregate_ring_bdq_counters(fbn, bdq);
+}
+
 static void fbnic_free_qt_page_pools(struct fbnic_q_triad *qt)
 {
        page_pool_destroy(qt->sub0.page_pool);
@@ -1505,8 +1525,8 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
        }
 
        for (j = 0; j < nv->rxt_count; j++, i++) {
-               fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
-               fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
+               fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub0);
+               fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub1);
                fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
        }
 
@@ -1705,11 +1725,13 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
        while (rxt_count) {
                /* Configure header queue */
                db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
-               fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
+               fbnic_ring_init(&qt->sub0, db, 0,
+                               FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
 
                /* Configure payload queue */
                db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
-               fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
+               fbnic_ring_init(&qt->sub1, db, 0,
+                               FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
 
                /* Configure Rx completion queue */
                db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
@@ -2828,8 +2850,8 @@ static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx)
        real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
        nv = fbn->napi[idx % fbn->num_napi];
 
-       fbnic_aggregate_ring_rx_counters(fbn, &real->sub0);
-       fbnic_aggregate_ring_rx_counters(fbn, &real->sub1);
+       fbnic_aggregate_ring_bdq_counters(fbn, &real->sub0);
+       fbnic_aggregate_ring_bdq_counters(fbn, &real->sub1);
        fbnic_aggregate_ring_rx_counters(fbn, &real->cmpl);
 
        memcpy(real, qmem, sizeof(*real));
index 4a41e21ed542c7753020e73c8b209a1d10c70460..ca37da5a0b1797c17459bd04e418e38eb876c5bc 100644 (file)
@@ -92,6 +92,9 @@ struct fbnic_queue_stats {
                        u64 csum_none;
                        u64 length_errors;
                } rx;
+               struct {
+                       u64 alloc_failed;
+               } bdq;
        };
        u64 dropped;
        struct u64_stats_sync syncp;
@@ -165,6 +168,8 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
 
 void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
                                      struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+                                      struct fbnic_ring *rxr);
 void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
                                      struct fbnic_ring *txr);
 void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,