]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
netfilter: nft_counter: Disable BH in nft_counter_offload_stats().
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tue, 20 Aug 2024 07:54:30 +0000 (09:54 +0200)
committerPablo Neira Ayuso <pablo@netfilter.org>
Tue, 20 Aug 2024 10:26:22 +0000 (12:26 +0200)
The sequence counter nft_counter_seq is a per-CPU counter. There is no
lock associated with it. nft_counter_do_eval() is using the same counter
and disables BH which suggest that it can be invoked from a softirq.
This in turn means that nft_counter_offload_stats(), which disables only
preemption, can be interrupted by nft_counter_do_eval() leading to two
writer for one seqcount_t.
This can lead to loosing stats or reading statistics while they are
updated.

Disable BH during stats update in nft_counter_offload_stats() to ensure
one writer at a time.

Fixes: b72920f6e4a9d ("netfilter: nftables: counter hardware offload support")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
net/netfilter/nft_counter.c

index 291ed2026367ec4a5b2b893caa3d5abfcf2c5959..16f40b503d3798b45ebd69552d654e2071044d11 100644 (file)
@@ -265,7 +265,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
        struct nft_counter *this_cpu;
        seqcount_t *myseq;
 
-       preempt_disable();
+       local_bh_disable();
        this_cpu = this_cpu_ptr(priv->counter);
        myseq = this_cpu_ptr(&nft_counter_seq);
 
@@ -273,7 +273,7 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
        this_cpu->packets += stats->pkts;
        this_cpu->bytes += stats->bytes;
        write_seqcount_end(myseq);
-       preempt_enable();
+       local_bh_enable();
 }
 
 void nft_counter_init_seqcount(void)