]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
io_uring: fix race between timeout flush and removal
authorJens Axboe <axboe@kernel.dk>
Fri, 8 Apr 2022 17:08:58 +0000 (11:08 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Apr 2022 19:01:08 +0000 (21:01 +0200)
commit e677edbcabee849bfdd43f1602bccbecf736a646 upstream.

io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.

Leave it on the list and let the normal timeout cancelation take care
of it.

Cc: stable@vger.kernel.org # 5.5+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/io_uring.c

index 3580fa2dabc869e104d4043aa48a01ee183254b9..ab9290ab4cae077f9a8a37d4990b5398bb87e225 100644 (file)
@@ -1556,6 +1556,7 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
 
 static void io_flush_timeouts(struct io_ring_ctx *ctx)
 {
+       struct io_kiocb *req, *tmp;
        u32 seq;
 
        if (list_empty(&ctx->timeout_list))
@@ -1563,10 +1564,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
 
        seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
 
-       do {
+       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
                u32 events_needed, events_got;
-               struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
-                                               struct io_kiocb, timeout.list);
 
                if (io_is_timeout_noseq(req))
                        break;
@@ -1583,9 +1582,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
                if (events_got < events_needed)
                        break;
 
-               list_del_init(&req->timeout.list);
                io_kill_timeout(req, 0);
-       } while (!list_empty(&ctx->timeout_list));
+       }
 
        ctx->cq_last_tm_flush = seq;
 }
@@ -5639,6 +5637,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        else
                data->mode = HRTIMER_MODE_REL;
 
+       INIT_LIST_HEAD(&req->timeout.list);
        hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
        return 0;
 }
@@ -6282,12 +6281,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
        if (!list_empty(&req->link_list)) {
                prev = list_entry(req->link_list.prev, struct io_kiocb,
                                  link_list);
-               if (refcount_inc_not_zero(&prev->refs))
-                       list_del_init(&req->link_list);
-               else
+               list_del_init(&req->link_list);
+               if (!refcount_inc_not_zero(&prev->refs))
                        prev = NULL;
        }
 
+       list_del(&req->timeout.list);
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        if (prev) {