From 9c2ff3f9b5e0202d1cc1f6193b1e96df203ae4a4 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 6 May 2025 13:31:07 +0100 Subject: [PATCH] io_uring: remove io_preinit_req() Apart from setting ->ctx, io_preinit_req() zeroes a bunch of fields of a request, from which only ->file_node is mandatory. Remove the function and zero the entire request on first allocation. With that, we also need to initialise ->ctx every time, which might be a good thing for performance as now we're likely overwriting the entire cache line, and so it can write combined and avoid RMW. Suggested-by: Jens Axboe Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ba5485dc913f1e275862ce88f5169d4ac4a33836.1746533807.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 21 ++------------------- io_uring/notif.c | 1 + 2 files changed, 3 insertions(+), 19 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 703251f6f4d8..3d20f3b63443 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -927,22 +927,6 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res) io_req_complete_defer(req); } -/* - * Don't initialise the fields below on every allocation, but do that in - * advance and keep them valid across allocations. - */ -static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) -{ - req->ctx = ctx; - req->buf_node = NULL; - req->file_node = NULL; - req->link = NULL; - req->async_data = NULL; - /* not necessary, but safer to zero */ - memset(&req->cqe, 0, sizeof(req->cqe)); - memset(&req->big_cqe, 0, sizeof(req->big_cqe)); -} - /* * A request might get retired back into the request caches even before opcode * handlers and io_issue_sqe() are done with it, e.g. inline completion path. @@ -952,7 +936,7 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { - gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + gfp_t gfp = GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO; void *reqs[IO_REQ_ALLOC_BATCH]; int ret; @@ -973,7 +957,6 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) while (ret--) { struct io_kiocb *req = reqs[ret]; - io_preinit_req(req, ctx); io_req_add_to_cache(req, ctx); } return true; @@ -2049,7 +2032,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, int personality; u8 opcode; - /* req is partially pre-initialised, see io_preinit_req() */ + req->ctx = ctx; req->opcode = opcode = READ_ONCE(sqe->opcode); /* same numerical values with corresponding REQ_F_*, safe to copy */ sqe_flags = READ_ONCE(sqe->flags); diff --git a/io_uring/notif.c b/io_uring/notif.c index 7bd92538dccb..9a6f6e92d742 100644 --- a/io_uring/notif.c +++ b/io_uring/notif.c @@ -112,6 +112,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) if (unlikely(!io_alloc_req(ctx, ¬if))) return NULL; + notif->ctx = ctx; notif->opcode = IORING_OP_NOP; notif->flags = 0; notif->file = NULL; -- 2.50.1