]> www.infradead.org Git - users/griffoul/linux.git/commitdiff
net: macb: Fix tx_ptr_lock locking
authorSean Anderson <sean.anderson@linux.dev>
Fri, 29 Aug 2025 14:35:21 +0000 (10:35 -0400)
committerJakub Kicinski <kuba@kernel.org>
Mon, 1 Sep 2025 20:11:10 +0000 (13:11 -0700)
macb_start_xmit and macb_tx_poll can be called with bottom-halves
disabled (e.g. from softirq) as well as with interrupts disabled (with
netpoll). Because of this, all other functions taking tx_ptr_lock must
use spin_lock_irqsave.

Fixes: 138badbc21a0 ("net: macb: use NAPI for TX completion path")
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
Link: https://patch.msgid.link/20250829143521.1686062-1-sean.anderson@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/cadence/macb_main.c

index 16d28a8b3b56cc0e66007a4013faf32d7263fb92..c769b7dbd3baf5cafe64008e18dff939623528d4 100644 (file)
@@ -1223,12 +1223,13 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
 {
        struct macb *bp = queue->bp;
        u16 queue_index = queue - bp->queues;
+       unsigned long flags;
        unsigned int tail;
        unsigned int head;
        int packets = 0;
        u32 bytes = 0;
 
-       spin_lock(&queue->tx_ptr_lock);
+       spin_lock_irqsave(&queue->tx_ptr_lock, flags);
        head = queue->tx_head;
        for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
                struct macb_tx_skb      *tx_skb;
@@ -1291,7 +1292,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
            CIRC_CNT(queue->tx_head, queue->tx_tail,
                     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
                netif_wake_subqueue(bp->dev, queue_index);
-       spin_unlock(&queue->tx_ptr_lock);
+       spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
 
        return packets;
 }
@@ -1707,8 +1708,9 @@ static void macb_tx_restart(struct macb_queue *queue)
 {
        struct macb *bp = queue->bp;
        unsigned int head_idx, tbqp;
+       unsigned long flags;
 
-       spin_lock(&queue->tx_ptr_lock);
+       spin_lock_irqsave(&queue->tx_ptr_lock, flags);
 
        if (queue->tx_head == queue->tx_tail)
                goto out_tx_ptr_unlock;
@@ -1720,19 +1722,20 @@ static void macb_tx_restart(struct macb_queue *queue)
        if (tbqp == head_idx)
                goto out_tx_ptr_unlock;
 
-       spin_lock_irq(&bp->lock);
+       spin_lock(&bp->lock);
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
-       spin_unlock_irq(&bp->lock);
+       spin_unlock(&bp->lock);
 
 out_tx_ptr_unlock:
-       spin_unlock(&queue->tx_ptr_lock);
+       spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
 }
 
 static bool macb_tx_complete_pending(struct macb_queue *queue)
 {
        bool retval = false;
+       unsigned long flags;
 
-       spin_lock(&queue->tx_ptr_lock);
+       spin_lock_irqsave(&queue->tx_ptr_lock, flags);
        if (queue->tx_head != queue->tx_tail) {
                /* Make hw descriptor updates visible to CPU */
                rmb();
@@ -1740,7 +1743,7 @@ static bool macb_tx_complete_pending(struct macb_queue *queue)
                if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
                        retval = true;
        }
-       spin_unlock(&queue->tx_ptr_lock);
+       spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
        return retval;
 }
 
@@ -2308,6 +2311,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct macb_queue *queue = &bp->queues[queue_index];
        unsigned int desc_cnt, nr_frags, frag_size, f;
        unsigned int hdrlen;
+       unsigned long flags;
        bool is_lso;
        netdev_tx_t ret = NETDEV_TX_OK;
 
@@ -2368,7 +2372,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
                desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
        }
 
-       spin_lock_bh(&queue->tx_ptr_lock);
+       spin_lock_irqsave(&queue->tx_ptr_lock, flags);
 
        /* This is a hard error, log it. */
        if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
@@ -2392,15 +2396,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
                             skb->len);
 
-       spin_lock_irq(&bp->lock);
+       spin_lock(&bp->lock);
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
-       spin_unlock_irq(&bp->lock);
+       spin_unlock(&bp->lock);
 
        if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
                netif_stop_subqueue(dev, queue_index);
 
 unlock:
-       spin_unlock_bh(&queue->tx_ptr_lock);
+       spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
 
        return ret;
 }