]> www.infradead.org Git - users/hch/misc.git/commitdiff
net: gro_cells: Use nested-BH locking for gro_cell
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Thu, 9 Oct 2025 09:43:38 +0000 (11:43 +0200)
committerJakub Kicinski <kuba@kernel.org>
Tue, 14 Oct 2025 00:33:32 +0000 (17:33 -0700)
The gro_cell data structure is per-CPU variable and relies on disabled
BH for its locking. Without per-CPU locking in local_bh_disable() on
PREEMPT_RT this data structure requires explicit locking.

Add a local_lock_t to the data structure and use
local_lock_nested_bh() for locking. This change adds only lockdep
coverage and does not alter the functional behaviour for !PREEMPT_RT.

Reported-by: syzbot+8715dd783e9b0bef43b1@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/68c6c3b1.050a0220.2ff435.0382.GAE@google.com/
Fixes: 3253cb49cbad ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20251009094338.j1jyKfjR@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/gro_cells.c

index ff8e5b64bf6b76451a69e3eae132b593c60ee204..b43911562f4d10aa3d05c60f343ff89c5d9ed58d 100644 (file)
@@ -8,11 +8,13 @@
 struct gro_cell {
        struct sk_buff_head     napi_skbs;
        struct napi_struct      napi;
+       local_lock_t            bh_lock;
 };
 
 int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
+       bool have_bh_lock = false;
        struct gro_cell *cell;
        int res;
 
@@ -25,6 +27,8 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
                goto unlock;
        }
 
+       local_lock_nested_bh(&gcells->cells->bh_lock);
+       have_bh_lock = true;
        cell = this_cpu_ptr(gcells->cells);
 
        if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
@@ -39,6 +43,9 @@ drop:
        if (skb_queue_len(&cell->napi_skbs) == 1)
                napi_schedule(&cell->napi);
 
+       if (have_bh_lock)
+               local_unlock_nested_bh(&gcells->cells->bh_lock);
+
        res = NET_RX_SUCCESS;
 
 unlock:
@@ -54,6 +61,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
        struct sk_buff *skb;
        int work_done = 0;
 
+       __local_lock_nested_bh(&cell->bh_lock);
        while (work_done < budget) {
                skb = __skb_dequeue(&cell->napi_skbs);
                if (!skb)
@@ -64,6 +72,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
 
        if (work_done < budget)
                napi_complete_done(napi, work_done);
+       __local_unlock_nested_bh(&cell->bh_lock);
        return work_done;
 }
 
@@ -79,6 +88,7 @@ int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
                struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
                __skb_queue_head_init(&cell->napi_skbs);
+               local_lock_init(&cell->bh_lock);
 
                set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);