]> www.infradead.org Git - users/hch/misc.git/commitdiff
net: cadence: macb: Implement BQL
authorSean Anderson <sean.anderson@linux.dev>
Thu, 20 Feb 2025 16:42:57 +0000 (11:42 -0500)
committerJakub Kicinski <kuba@kernel.org>
Sat, 22 Feb 2025 00:39:08 +0000 (16:39 -0800)
Implement byte queue limits to allow queuing disciplines to account for
packets enqueued in the ring buffer but not yet transmitted. There are a
separate set of transmit functions for AT91 that I haven't touched since
I don't have hardware to test on.

Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
Link: https://patch.msgid.link/20250220164257.96859-1-sean.anderson@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/cadence/macb_main.c

index 2112a9701e05f2900f0f5920c16ab5a7d679addb..5345f3e1a7957f5dc1e15316503c42832bf97af7 100644 (file)
@@ -1079,15 +1079,18 @@ static void macb_tx_error_task(struct work_struct *work)
                                                      tx_error_task);
        bool                    halt_timeout = false;
        struct macb             *bp = queue->bp;
+       u32                     queue_index;
+       u32                     packets = 0;
+       u32                     bytes = 0;
        struct macb_tx_skb      *tx_skb;
        struct macb_dma_desc    *desc;
        struct sk_buff          *skb;
        unsigned int            tail;
        unsigned long           flags;
 
+       queue_index = queue - bp->queues;
        netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
-                   (unsigned int)(queue - bp->queues),
-                   queue->tx_tail, queue->tx_head);
+                   queue_index, queue->tx_tail, queue->tx_head);
 
        /* Prevent the queue NAPI TX poll from running, as it calls
         * macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1140,8 +1143,10 @@ static void macb_tx_error_task(struct work_struct *work)
                                            skb->data);
                                bp->dev->stats.tx_packets++;
                                queue->stats.tx_packets++;
+                               packets++;
                                bp->dev->stats.tx_bytes += skb->len;
                                queue->stats.tx_bytes += skb->len;
+                               bytes += skb->len;
                        }
                } else {
                        /* "Buffers exhausted mid-frame" errors may only happen
@@ -1158,6 +1163,9 @@ static void macb_tx_error_task(struct work_struct *work)
                macb_tx_unmap(bp, tx_skb, 0);
        }
 
+       netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+                                 packets, bytes);
+
        /* Set end of TX queue */
        desc = macb_tx_desc(queue, 0);
        macb_set_addr(bp, desc, 0);
@@ -1228,6 +1236,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
        unsigned int tail;
        unsigned int head;
        int packets = 0;
+       u32 bytes = 0;
 
        spin_lock(&queue->tx_ptr_lock);
        head = queue->tx_head;
@@ -1269,6 +1278,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
                                bp->dev->stats.tx_bytes += skb->len;
                                queue->stats.tx_bytes += skb->len;
                                packets++;
+                               bytes += skb->len;
                        }
 
                        /* Now we can safely release resources */
@@ -1283,6 +1293,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
                }
        }
 
+       netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+                                 packets, bytes);
+
        queue->tx_tail = tail;
        if (__netif_subqueue_stopped(bp->dev, queue_index) &&
            CIRC_CNT(queue->tx_head, queue->tx_tail,
@@ -2384,6 +2397,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Make newly initialized descriptor visible to hardware */
        wmb();
        skb_tx_timestamp(skb);
+       netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+                            skb->len);
 
        spin_lock_irq(&bp->lock);
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -3019,6 +3034,7 @@ static int macb_close(struct net_device *dev)
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                napi_disable(&queue->napi_rx);
                napi_disable(&queue->napi_tx);
+               netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
        }
 
        phylink_stop(bp->phylink);