]> www.infradead.org Git - users/willy/xarray.git/commitdiff
io_uring/io-wq: limit retrying worker initialisation
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 10 Jul 2024 17:58:17 +0000 (18:58 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 11 Jul 2024 07:51:44 +0000 (01:51 -0600)
If io-wq worker creation fails, we retry it by queueing up a task_work.
tasK_work is needed because it should be done from the user process
context. The problem is that retries are not limited, and if queueing a
task_work is the reason for the failure, we might get into an infinite
loop.

It doesn't seem to happen now but it would with the following patch
executing task_work in the freezer's loop. For now, arbitrarily limit the
number of attempts to create a worker.

Cc: stable@vger.kernel.org
Fixes: 3146cba99aa28 ("io-wq: make worker creation resilient against signals")
Reported-by: Julian Orth <ju.orth@gmail.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8280436925db88448c7c85c6656edee1a43029ea.1720634146.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io-wq.c

index 913c92249522e8c1eb38dc5dacfc93044a31db11..f1e7c670add85f64b9bc29191fe4ec1123271871 100644 (file)
@@ -23,6 +23,7 @@
 #include "io_uring.h"
 
 #define WORKER_IDLE_TIMEOUT    (5 * HZ)
+#define WORKER_INIT_LIMIT      3
 
 enum {
        IO_WORKER_F_UP          = 0,    /* up and active */
@@ -58,6 +59,7 @@ struct io_worker {
 
        unsigned long create_state;
        struct callback_head create_work;
+       int init_retries;
 
        union {
                struct rcu_head rcu;
@@ -745,7 +747,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
        return true;
 }
 
-static inline bool io_should_retry_thread(long err)
+static inline bool io_should_retry_thread(struct io_worker *worker, long err)
 {
        /*
         * Prevent perpetual task_work retry, if the task (or its group) is
@@ -753,6 +755,8 @@ static inline bool io_should_retry_thread(long err)
         */
        if (fatal_signal_pending(current))
                return false;
+       if (worker->init_retries++ >= WORKER_INIT_LIMIT)
+               return false;
 
        switch (err) {
        case -EAGAIN:
@@ -779,7 +783,7 @@ static void create_worker_cont(struct callback_head *cb)
                io_init_new_worker(wq, worker, tsk);
                io_worker_release(worker);
                return;
-       } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
+       } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
                struct io_wq_acct *acct = io_wq_get_acct(worker);
 
                atomic_dec(&acct->nr_running);
@@ -846,7 +850,7 @@ fail:
        tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
        if (!IS_ERR(tsk)) {
                io_init_new_worker(wq, worker, tsk);
-       } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
+       } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
                kfree(worker);
                goto fail;
        } else {