From c4ea060e85eabe40f3572969daff4fc2f242b7b8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 1 Apr 2021 15:43:58 +0100 Subject: [PATCH] io_uring: simplify overflow handling Overflowed CQEs doesn't lock requests anymore, so we don't care so much about cancelling them, so kill cq_overflow_flushed and simplify the code. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5799867aeba9e713c32f49aef78e5e1aef9fbc43.1617287883.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 7c5c3d46c6b7..99f5252ff2dc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -335,7 +335,6 @@ struct io_ring_ctx { struct { unsigned int flags; unsigned int compat: 1; - unsigned int cq_overflow_flushed: 1; unsigned int drain_next: 1; unsigned int eventfd_async: 1; unsigned int restricted: 1; @@ -1525,8 +1524,7 @@ static bool __io_cqring_fill_event(struct io_kiocb *req, long res, WRITE_ONCE(cqe->flags, cflags); return true; } - if (!ctx->cq_overflow_flushed && - !atomic_read(&req->task->io_uring->in_idle)) { + if (!atomic_read(&req->task->io_uring->in_idle)) { struct io_overflow_cqe *ocqe; ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT); @@ -8491,6 +8489,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) mutex_lock(&ctx->uring_lock); io_sqe_files_unregister(ctx); + if (ctx->rings) + __io_cqring_overflow_flush(ctx, true); mutex_unlock(&ctx->uring_lock); io_eventfd_unregister(ctx); io_destroy_buffers(ctx); @@ -8692,8 +8692,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) mutex_lock(&ctx->uring_lock); percpu_ref_kill(&ctx->refs); - /* if force is set, the ring is going away. always drop after that */ - ctx->cq_overflow_flushed = 1; if (ctx->rings) __io_cqring_overflow_flush(ctx, true); xa_for_each(&ctx->personalities, index, creds) -- 2.50.1