]> www.infradead.org Git - nvme.git/commitdiff
net/tcp_sigpool: Use nested-BH locking for sigpool_scratch.
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Thu, 20 Jun 2024 13:21:55 +0000 (15:21 +0200)
committerJakub Kicinski <kuba@kernel.org>
Mon, 24 Jun 2024 23:41:22 +0000 (16:41 -0700)
sigpool_scratch is a per-CPU variable and relies on disabled BH for its
locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT
this data structure requires explicit locking.

Make a struct with a pad member (original sigpool_scratch) and a
local_lock_t and use local_lock_nested_bh() for locking. This change
adds only lockdep coverage and does not alter the functional behaviour
for !PREEMPT_RT.

Cc: David Ahern <dsahern@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-6-bigeasy@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/ipv4/tcp_sigpool.c

index 8512cb09ebc097d567dd907ad17c4ae38ddb5768..d8a4f192873a2a1036c2f017c85fb4436226e53c 100644 (file)
 #include <net/tcp.h>
 
 static size_t __scratch_size;
-static DEFINE_PER_CPU(void __rcu *, sigpool_scratch);
+struct sigpool_scratch {
+       local_lock_t bh_lock;
+       void __rcu *pad;
+};
+
+static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = {
+       .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
 
 struct sigpool_entry {
        struct crypto_ahash     *hash;
@@ -72,7 +79,7 @@ static int sigpool_reserve_scratch(size_t size)
                        break;
                }
 
-               old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
+               old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
                                        scratch, lockdep_is_held(&cpool_mutex));
                if (!cpu_online(cpu) || !old_scratch) {
                        kfree(old_scratch);
@@ -93,7 +100,7 @@ static void sigpool_scratch_free(void)
        int cpu;
 
        for_each_possible_cpu(cpu)
-               kfree(rcu_replace_pointer(per_cpu(sigpool_scratch, cpu),
+               kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
                                          NULL, lockdep_is_held(&cpool_mutex)));
        __scratch_size = 0;
 }
@@ -277,7 +284,8 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RC
        /* Pairs with tcp_sigpool_reserve_scratch(), scratch area is
         * valid (allocated) until tcp_sigpool_end().
         */
-       c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch));
+       local_lock_nested_bh(&sigpool_scratch.bh_lock);
+       c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad));
        return 0;
 }
 EXPORT_SYMBOL_GPL(tcp_sigpool_start);
@@ -286,6 +294,7 @@ void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH)
 {
        struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req);
 
+       local_unlock_nested_bh(&sigpool_scratch.bh_lock);
        rcu_read_unlock_bh();
        ahash_request_free(c->req);
        crypto_free_ahash(hash);