]> www.infradead.org Git - users/hch/dma-mapping.git/commitdiff
rxrpc: Fix a race between socket set up and I/O thread creation
authorDavid Howells <dhowells@redhat.com>
Tue, 1 Oct 2024 13:26:58 +0000 (14:26 +0100)
committerJakub Kicinski <kuba@kernel.org>
Thu, 3 Oct 2024 23:23:20 +0000 (16:23 -0700)
In rxrpc_open_socket(), it sets up the socket and then sets up the I/O
thread that will handle it.  This is a problem, however, as there's a gap
between the two phases in which a packet may come into rxrpc_encap_rcv()
from the UDP packet but we oops when trying to wake the not-yet created I/O
thread.

As a quick fix, just make rxrpc_encap_rcv() discard the packet if there's
no I/O thread yet.

A better, but more intrusive fix would perhaps be to rearrange things such
that the socket creation is done by the I/O thread.

Fixes: a275da62e8c1 ("rxrpc: Create a per-local endpoint receive queue and I/O thread")
Signed-off-by: David Howells <dhowells@redhat.com>
cc: yuxuanzhe@outlook.com
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20241001132702.3122709-2-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/rxrpc/ar-internal.h
net/rxrpc/io_thread.c
net/rxrpc/local_object.c

index 80d682f89b23325237ad9527a3788d8eb1edd6a1..d0fd37bdcfe9c8653192b8c7d9f07a55e938fb90 100644 (file)
@@ -1056,7 +1056,7 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
 int rxrpc_io_thread(void *data);
 static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
 {
-       wake_up_process(local->io_thread);
+       wake_up_process(READ_ONCE(local->io_thread));
 }
 
 static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
index 0300baa9afcd394278a202da512e5bc6bd2d4f71..07c74c77d80214b006c49b975ce73d3a46952708 100644 (file)
@@ -27,11 +27,17 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
 {
        struct sk_buff_head *rx_queue;
        struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
+       struct task_struct *io_thread;
 
        if (unlikely(!local)) {
                kfree_skb(skb);
                return 0;
        }
+       io_thread = READ_ONCE(local->io_thread);
+       if (!io_thread) {
+               kfree_skb(skb);
+               return 0;
+       }
        if (skb->tstamp == 0)
                skb->tstamp = ktime_get_real();
 
@@ -47,7 +53,7 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
 #endif
 
        skb_queue_tail(rx_queue, skb);
-       rxrpc_wake_up_io_thread(local);
+       wake_up_process(io_thread);
        return 0;
 }
 
@@ -565,7 +571,7 @@ int rxrpc_io_thread(void *data)
        __set_current_state(TASK_RUNNING);
        rxrpc_see_local(local, rxrpc_local_stop);
        rxrpc_destroy_local(local);
-       local->io_thread = NULL;
+       WRITE_ONCE(local->io_thread, NULL);
        rxrpc_see_local(local, rxrpc_local_stopped);
        return 0;
 }
index 504453c688d751fe11cbfbe18e1d20c0623f9a77..f9623ace22016f622ce2b4a5ccfac0e5112cf741 100644 (file)
@@ -232,7 +232,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
        }
 
        wait_for_completion(&local->io_thread_ready);
-       local->io_thread = io_thread;
+       WRITE_ONCE(local->io_thread, io_thread);
        _leave(" = 0");
        return 0;