From 3c90b80df5b574c2c61626fd40fa3b23be21fa26 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 21 Sep 2024 01:59:48 -0600 Subject: [PATCH 01/16] io_uring/eventfd: check for the need to async notifier earlier It's not necessary to do this post grabbing a reference. With that, we can drop the out goto path as well. Link: https://lore.kernel.org/r/20240921080307.185186-3-axboe@kernel.dk Signed-off-by: Jens Axboe --- io_uring/eventfd.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c index 8b628ab6bbff..829873806f9f 100644 --- a/io_uring/eventfd.c +++ b/io_uring/eventfd.c @@ -69,10 +69,10 @@ void io_eventfd_signal(struct io_ring_ctx *ctx) */ if (unlikely(!ev_fd)) return; + if (ev_fd->eventfd_async && !io_wq_current_is_worker()) + return; if (!refcount_inc_not_zero(&ev_fd->refs)) return; - if (ev_fd->eventfd_async && !io_wq_current_is_worker()) - goto out; if (likely(eventfd_signal_allowed())) { eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE); @@ -82,7 +82,6 @@ void io_eventfd_signal(struct io_ring_ctx *ctx) return; } } -out: io_eventfd_put(ev_fd); } -- 2.50.1 From 60c5f15800f21883615689e2423217a9c8a1b502 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 21 Sep 2024 01:59:49 -0600 Subject: [PATCH 02/16] io_uring/eventfd: move actual signaling part into separate helper In preparation for using this from multiple spots, move the signaling into a helper. Link: https://lore.kernel.org/r/20240921080307.185186-4-axboe@kernel.dk Signed-off-by: Jens Axboe --- io_uring/eventfd.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c index 829873806f9f..58e76f4d1e00 100644 --- a/io_uring/eventfd.c +++ b/io_uring/eventfd.c @@ -47,6 +47,22 @@ static void io_eventfd_put(struct io_ev_fd *ev_fd) call_rcu(&ev_fd->rcu, io_eventfd_free); } +/* + * Returns true if the caller should put the ev_fd reference, false if not. + */ +static bool __io_eventfd_signal(struct io_ev_fd *ev_fd) +{ + if (eventfd_signal_allowed()) { + eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE); + return true; + } + if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) { + call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal); + return false; + } + return true; +} + void io_eventfd_signal(struct io_ring_ctx *ctx) { struct io_ev_fd *ev_fd = NULL; @@ -73,16 +89,8 @@ void io_eventfd_signal(struct io_ring_ctx *ctx) return; if (!refcount_inc_not_zero(&ev_fd->refs)) return; - - if (likely(eventfd_signal_allowed())) { - eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE); - } else { - if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) { - call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal); - return; - } - } - io_eventfd_put(ev_fd); + if (__io_eventfd_signal(ev_fd)) + io_eventfd_put(ev_fd); } void io_eventfd_flush_signal(struct io_ring_ctx *ctx) -- 2.50.1 From 3ca5a356041438534ecbb74159df91736238c6b1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 21 Sep 2024 01:59:50 -0600 Subject: [PATCH 03/16] io_uring/eventfd: move trigger check into a helper It's a bit hard to read what guards the triggering, move it into a helper and add a comment explaining it too. This additionally moves the ev_fd == NULL check in there as well. Link: https://lore.kernel.org/r/20240921080307.185186-5-axboe@kernel.dk Signed-off-by: Jens Axboe --- io_uring/eventfd.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c index 58e76f4d1e00..0946d3da88d3 100644 --- a/io_uring/eventfd.c +++ b/io_uring/eventfd.c @@ -63,6 +63,17 @@ static bool __io_eventfd_signal(struct io_ev_fd *ev_fd) return true; } +/* + * Trigger if eventfd_async isn't set, or if it's set and the caller is + * an async worker. If ev_fd isn't valid, obviously return false. + */ +static bool io_eventfd_trigger(struct io_ev_fd *ev_fd) +{ + if (ev_fd) + return !ev_fd->eventfd_async || io_wq_current_is_worker(); + return false; +} + void io_eventfd_signal(struct io_ring_ctx *ctx) { struct io_ev_fd *ev_fd = NULL; @@ -83,9 +94,7 @@ void io_eventfd_signal(struct io_ring_ctx *ctx) * completed between the NULL check of ctx->io_ev_fd at the start of * the function and rcu_read_lock. */ - if (unlikely(!ev_fd)) - return; - if (ev_fd->eventfd_async && !io_wq_current_is_worker()) + if (!io_eventfd_trigger(ev_fd)) return; if (!refcount_inc_not_zero(&ev_fd->refs)) return; -- 2.50.1 From 83a4f865e273b83426eafdd3aa51334cc21ac0fd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 21 Sep 2024 01:59:51 -0600 Subject: [PATCH 04/16] io_uring/eventfd: abstract out ev_fd grab + release helpers In preparation for needing the ev_fd grabbing (and releasing) from another path, abstract out two helpers for that. Link: https://lore.kernel.org/r/20240921080307.185186-6-axboe@kernel.dk Signed-off-by: Jens Axboe --- io_uring/eventfd.c | 41 ++++++++++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c index 0946d3da88d3..d1fdecd0c458 100644 --- a/io_uring/eventfd.c +++ b/io_uring/eventfd.c @@ -47,6 +47,13 @@ static void io_eventfd_put(struct io_ev_fd *ev_fd) call_rcu(&ev_fd->rcu, io_eventfd_free); } +static void io_eventfd_release(struct io_ev_fd *ev_fd, bool put_ref) +{ + if (put_ref) + io_eventfd_put(ev_fd); + rcu_read_unlock(); +} + /* * Returns true if the caller should put the ev_fd reference, false if not. */ @@ -74,14 +81,18 @@ static bool io_eventfd_trigger(struct io_ev_fd *ev_fd) return false; } -void io_eventfd_signal(struct io_ring_ctx *ctx) +/* + * On success, returns with an ev_fd reference grabbed and the RCU read + * lock held. + */ +static struct io_ev_fd *io_eventfd_grab(struct io_ring_ctx *ctx) { - struct io_ev_fd *ev_fd = NULL; + struct io_ev_fd *ev_fd; if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) - return; + return NULL; - guard(rcu)(); + rcu_read_lock(); /* * rcu_dereference ctx->io_ev_fd once and use it for both for checking @@ -90,16 +101,24 @@ void io_eventfd_signal(struct io_ring_ctx *ctx) ev_fd = rcu_dereference(ctx->io_ev_fd); /* - * Check again if ev_fd exists incase an io_eventfd_unregister call + * Check again if ev_fd exists in case an io_eventfd_unregister call * completed between the NULL check of ctx->io_ev_fd at the start of * the function and rcu_read_lock. */ - if (!io_eventfd_trigger(ev_fd)) - return; - if (!refcount_inc_not_zero(&ev_fd->refs)) - return; - if (__io_eventfd_signal(ev_fd)) - io_eventfd_put(ev_fd); + if (io_eventfd_trigger(ev_fd) && refcount_inc_not_zero(&ev_fd->refs)) + return ev_fd; + + rcu_read_unlock(); + return NULL; +} + +void io_eventfd_signal(struct io_ring_ctx *ctx) +{ + struct io_ev_fd *ev_fd; + + ev_fd = io_eventfd_grab(ctx); + if (ev_fd) + io_eventfd_release(ev_fd, __io_eventfd_signal(ev_fd)); } void io_eventfd_flush_signal(struct io_ring_ctx *ctx) -- 2.50.1 From f4bb2f65bb8154c1a2c2d7e01db0c98dffb5918f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 21 Sep 2024 01:59:52 -0600 Subject: [PATCH 05/16] io_uring/eventfd: move ctx->evfd_last_cq_tail into io_ev_fd Everything else about the io_uring eventfd support is nicely kept private to that code, except the cached_cq_tail tracking. With everything else in place, move io_eventfd_flush_signal() to using the ev_fd grab+release helpers, which then enables the direct use of io_ev_fd for this tracking too. Link: https://lore.kernel.org/r/20240921080307.185186-7-axboe@kernel.dk Signed-off-by: Jens Axboe --- io_uring/eventfd.c | 50 +++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c index d1fdecd0c458..fab936d31ba8 100644 --- a/io_uring/eventfd.c +++ b/io_uring/eventfd.c @@ -13,10 +13,12 @@ struct io_ev_fd { struct eventfd_ctx *cq_ev_fd; - unsigned int eventfd_async: 1; - struct rcu_head rcu; + unsigned int eventfd_async; + /* protected by ->completion_lock */ + unsigned last_cq_tail; refcount_t refs; atomic_t ops; + struct rcu_head rcu; }; enum { @@ -123,25 +125,31 @@ void io_eventfd_signal(struct io_ring_ctx *ctx) void io_eventfd_flush_signal(struct io_ring_ctx *ctx) { - bool skip; - - spin_lock(&ctx->completion_lock); - - /* - * Eventfd should only get triggered when at least one event has been - * posted. Some applications rely on the eventfd notification count - * only changing IFF a new CQE has been added to the CQ ring. There's - * no depedency on 1:1 relationship between how many times this - * function is called (and hence the eventfd count) and number of CQEs - * posted to the CQ ring. - */ - skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail; - ctx->evfd_last_cq_tail = ctx->cached_cq_tail; - spin_unlock(&ctx->completion_lock); - if (skip) - return; + struct io_ev_fd *ev_fd; - io_eventfd_signal(ctx); + ev_fd = io_eventfd_grab(ctx); + if (ev_fd) { + bool skip, put_ref = true; + + /* + * Eventfd should only get triggered when at least one event + * has been posted. Some applications rely on the eventfd + * notification count only changing IFF a new CQE has been + * added to the CQ ring. There's no dependency on 1:1 + * relationship between how many times this function is called + * (and hence the eventfd count) and number of CQEs posted to + * the CQ ring. + */ + spin_lock(&ctx->completion_lock); + skip = ctx->cached_cq_tail == ev_fd->last_cq_tail; + ev_fd->last_cq_tail = ctx->cached_cq_tail; + spin_unlock(&ctx->completion_lock); + + if (!skip) + put_ref = __io_eventfd_signal(ev_fd); + + io_eventfd_release(ev_fd, put_ref); + } } int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, @@ -172,7 +180,7 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, } spin_lock(&ctx->completion_lock); - ctx->evfd_last_cq_tail = ctx->cached_cq_tail; + ev_fd->last_cq_tail = ctx->cached_cq_tail; spin_unlock(&ctx->completion_lock); ev_fd->eventfd_async = eventfd_async; -- 2.50.1 From 95d6c9229a04cc12d39034cd6be6446a55a85d6d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 24 Sep 2024 05:57:30 -0600 Subject: [PATCH 06/16] io_uring/msg_ring: refactor a few helper functions Mostly just to skip them taking an io_kiocb, rather just pass in the ctx and io_msg directly. In preparation for being able to issue a MSG_RING request without having an io_kiocb. No functional changes in this patch. Link: https://lore.kernel.org/r/20240924115932.116167-2-axboe@kernel.dk Signed-off-by: Jens Axboe --- io_uring/msg_ring.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 7fd9badcfaf8..b8c527f08cd5 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -116,14 +116,13 @@ static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx) return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); } -static int io_msg_data_remote(struct io_kiocb *req) +static int io_msg_data_remote(struct io_ring_ctx *target_ctx, + struct io_msg *msg) { - struct io_ring_ctx *target_ctx = req->file->private_data; - struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct io_kiocb *target; u32 flags = 0; - target = io_msg_get_kiocb(req->ctx); + target = io_msg_get_kiocb(target_ctx); if (unlikely(!target)) return -ENOMEM; @@ -134,10 +133,9 @@ static int io_msg_data_remote(struct io_kiocb *req) msg->user_data); } -static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) +static int __io_msg_ring_data(struct io_ring_ctx *target_ctx, + struct io_msg *msg, unsigned int issue_flags) { - struct io_ring_ctx *target_ctx = req->file->private_data; - struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); u32 flags = 0; int ret; @@ -149,7 +147,7 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) return -EBADFD; if (io_msg_need_remote(target_ctx)) - return io_msg_data_remote(req); + return io_msg_data_remote(target_ctx, msg); if (msg->flags & IORING_MSG_RING_FLAGS_PASS) flags = msg->cqe_flags; @@ -166,6 +164,14 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) return ret; } +static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_ring_ctx *target_ctx = req->file->private_data; + struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); + + return __io_msg_ring_data(target_ctx, msg, issue_flags); +} + static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) { struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); @@ -271,10 +277,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) return io_msg_install_complete(req, issue_flags); } -int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int __io_msg_ring_prep(struct io_msg *msg, const struct io_uring_sqe *sqe) { - struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); - if (unlikely(sqe->buf_index || sqe->personality)) return -EINVAL; @@ -291,6 +295,11 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } +int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + return __io_msg_ring_prep(io_kiocb_to_cmd(req, struct io_msg), sqe); +} + int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) { struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); -- 2.50.1 From a377132154ab8404dafcc52e8bc0c73050a954c2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 24 Sep 2024 05:57:31 -0600 Subject: [PATCH 07/16] io_uring/msg_ring: add support for sending a sync message Normally MSG_RING requires both a source and a destination ring. But some users don't always have a ring avilable to send a message from, yet they still need to notify a target ring. Add support for using io_uring_register(2) without having a source ring, using a file descriptor of -1 for that. Internally those are called blind registration opcodes. Implement IORING_REGISTER_SEND_MSG_RING as a blind opcode, which simply takes an sqe that the application can put on the stack and use the normal liburing helpers to initialize it. Then the app can call: io_uring_register(-1, IORING_REGISTER_SEND_MSG_RING, &sqe, 1); and get the same behavior in terms of the target, where a CQE is posted with the details given in the sqe. For now this takes a single sqe pointer argument, and hence arg must be set to that, and nr_args must be 1. Could easily be extended to take an array of sqes, but for now let's keep it simple. Link: https://lore.kernel.org/r/20240924115932.116167-3-axboe@kernel.dk Signed-off-by: Jens Axboe --- include/uapi/linux/io_uring.h | 3 +++ io_uring/msg_ring.c | 29 +++++++++++++++++++++++++++++ io_uring/msg_ring.h | 1 + io_uring/register.c | 30 ++++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 1fe79e750470..86cb385fe0b5 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -612,6 +612,9 @@ enum io_uring_register_op { /* clone registered buffers from source ring to current ring */ IORING_REGISTER_CLONE_BUFFERS = 30, + /* send MSG_RING without having a ring */ + IORING_REGISTER_SEND_MSG_RING = 31, + /* this goes last */ IORING_REGISTER_LAST, diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index b8c527f08cd5..edea1ffd501c 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -331,6 +331,35 @@ done: return IOU_OK; } +int io_uring_sync_msg_ring(struct io_uring_sqe *sqe) +{ + struct io_msg io_msg = { }; + struct fd f; + int ret; + + ret = __io_msg_ring_prep(&io_msg, sqe); + if (unlikely(ret)) + return ret; + + /* + * Only data sending supported, not IORING_MSG_SEND_FD as that one + * doesn't make sense without a source ring to send files from. + */ + if (io_msg.cmd != IORING_MSG_DATA) + return -EINVAL; + + ret = -EBADF; + f = fdget(sqe->fd); + if (fd_file(f)) { + ret = -EBADFD; + if (io_is_uring_fops(fd_file(f))) + ret = __io_msg_ring_data(fd_file(f)->private_data, + &io_msg, IO_URING_F_UNLOCKED); + fdput(f); + } + return ret; +} + void io_msg_cache_free(const void *entry) { struct io_kiocb *req = (struct io_kiocb *) entry; diff --git a/io_uring/msg_ring.h b/io_uring/msg_ring.h index 3030f3942f0f..38e7f8f0c944 100644 --- a/io_uring/msg_ring.h +++ b/io_uring/msg_ring.h @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +int io_uring_sync_msg_ring(struct io_uring_sqe *sqe); int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags); void io_msg_ring_cleanup(struct io_kiocb *req); diff --git a/io_uring/register.c b/io_uring/register.c index eca26d4884d9..52b2f9b74af8 100644 --- a/io_uring/register.c +++ b/io_uring/register.c @@ -28,6 +28,7 @@ #include "kbuf.h" #include "napi.h" #include "eventfd.h" +#include "msg_ring.h" #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ IORING_REGISTER_LAST + IORING_OP_LAST) @@ -588,6 +589,32 @@ struct file *io_uring_register_get_file(unsigned int fd, bool registered) return ERR_PTR(-EOPNOTSUPP); } +/* + * "blind" registration opcodes are ones where there's no ring given, and + * hence the source fd must be -1. + */ +static int io_uring_register_blind(unsigned int opcode, void __user *arg, + unsigned int nr_args) +{ + switch (opcode) { + case IORING_REGISTER_SEND_MSG_RING: { + struct io_uring_sqe sqe; + + if (!arg || nr_args != 1) + return -EINVAL; + if (copy_from_user(&sqe, arg, sizeof(sqe))) + return -EFAULT; + /* no flags supported */ + if (sqe.flags) + return -EINVAL; + if (sqe.opcode == IORING_OP_MSG_RING) + return io_uring_sync_msg_ring(&sqe); + } + } + + return -EINVAL; +} + SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, void __user *, arg, unsigned int, nr_args) { @@ -602,6 +629,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, if (opcode >= IORING_REGISTER_LAST) return -EINVAL; + if (fd == -1) + return io_uring_register_blind(opcode, arg, nr_args); + file = io_uring_register_get_file(fd, use_registered_ring); if (IS_ERR(file)) return PTR_ERR(file); -- 2.50.1 From 829ab73e7bca455e1a8718325177cfb98b63d0df Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 30 Sep 2024 13:52:54 -0600 Subject: [PATCH 08/16] io_uring/poll: remove 'ctx' argument from io_poll_req_delete() It's always req->ctx being used anyway, having this as a separate argument (that is then not even used) just makes it more confusing. Signed-off-by: Jens Axboe --- io_uring/poll.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_uring/poll.c b/io_uring/poll.c index 1f63b60e85e7..175c279e59ea 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -129,7 +129,7 @@ static void io_poll_req_insert(struct io_kiocb *req) spin_unlock(&hb->lock); } -static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) +static void io_poll_req_delete(struct io_kiocb *req) { struct io_hash_table *table = &req->ctx->cancel_table; u32 index = hash_long(req->cqe.user_data, table->hash_bits); @@ -165,7 +165,7 @@ static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts) hash_del(&req->hash_node); req->flags &= ~REQ_F_HASH_LOCKED; } else { - io_poll_req_delete(req, ctx); + io_poll_req_delete(req); } } -- 2.50.1 From 085268829b07202cf7bf8ec1a8fb7fd9d8f6a41a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 30 Sep 2024 14:22:36 -0600 Subject: [PATCH 09/16] io_uring/poll: get rid of unlocked cancel hash io_uring maintains two hash lists of inflight requests: 1) ctx->cancel_table_locked. This is used when the caller has the ctx->uring_lock held already. This is only an issue side parameter, as removal or task_work will always have it held. 2) ctx->cancel_table. This is used when the issuer does NOT have the ctx->uring_lock held, and relies on the table spinlocks for access. However, it's pretty trivial to simply grab the lock in the one spot where we care about it, for insertion. With that, we can kill the unlocked table (and get rid of the _locked postfix for the other one). Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 6 +- io_uring/fdinfo.c | 11 +-- io_uring/io_uring.c | 4 - io_uring/poll.c | 142 ++++++++------------------------- 4 files changed, 36 insertions(+), 127 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 4b9ba523978d..d8ca27da1341 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -291,7 +291,7 @@ struct io_ring_ctx { struct xarray io_bl_xa; - struct io_hash_table cancel_table_locked; + struct io_hash_table cancel_table; struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; struct io_alloc_cache rw_cache; @@ -342,7 +342,6 @@ struct io_ring_ctx { struct list_head io_buffers_comp; struct list_head cq_overflow_list; - struct io_hash_table cancel_table; struct hlist_head waitid_list; @@ -459,7 +458,6 @@ enum { REQ_F_DOUBLE_POLL_BIT, REQ_F_APOLL_MULTISHOT_BIT, REQ_F_CLEAR_POLLIN_BIT, - REQ_F_HASH_LOCKED_BIT, /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, @@ -534,8 +532,6 @@ enum { REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), /* recvmsg special flag, clear EPOLLIN */ REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT), - /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ - REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), /* don't use lazy poll wake for this request */ REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), /* file is pollable */ diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index 6b1247664b35..a6bac533edbe 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -190,22 +190,13 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) } seq_puts(m, "PollList:\n"); - for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) { + for (i = 0; has_lock && i < (1U << ctx->cancel_table.hash_bits); i++) { struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; - struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i]; struct io_kiocb *req; - spin_lock(&hb->lock); hlist_for_each_entry(req, &hb->list, hash_node) seq_printf(m, " op=%d, task_works=%d\n", req->opcode, task_work_pending(req->task)); - spin_unlock(&hb->lock); - - if (!has_lock) - continue; - hlist_for_each_entry(req, &hbl->list, hash_node) - seq_printf(m, " op=%d, task_works=%d\n", req->opcode, - task_work_pending(req->task)); } if (has_lock) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index b2736e3491b8..f4e069cd03a5 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -294,8 +294,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) hash_bits = clamp(hash_bits, 1, 8); if (io_alloc_hash_table(&ctx->cancel_table, hash_bits)) goto err; - if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits)) - goto err; if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 0, GFP_KERNEL)) goto err; @@ -361,7 +359,6 @@ err: io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); io_futex_cache_free(ctx); kfree(ctx->cancel_table.hbs); - kfree(ctx->cancel_table_locked.hbs); xa_destroy(&ctx->io_bl_xa); kfree(ctx); return NULL; @@ -2774,7 +2771,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) io_wq_put_hash(ctx->hash_map); io_napi_free(ctx); kfree(ctx->cancel_table.hbs); - kfree(ctx->cancel_table_locked.hbs); xa_destroy(&ctx->io_bl_xa); kfree(ctx); } diff --git a/io_uring/poll.c b/io_uring/poll.c index 175c279e59ea..217d667e0622 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -122,28 +122,6 @@ static void io_poll_req_insert(struct io_kiocb *req) { struct io_hash_table *table = &req->ctx->cancel_table; u32 index = hash_long(req->cqe.user_data, table->hash_bits); - struct io_hash_bucket *hb = &table->hbs[index]; - - spin_lock(&hb->lock); - hlist_add_head(&req->hash_node, &hb->list); - spin_unlock(&hb->lock); -} - -static void io_poll_req_delete(struct io_kiocb *req) -{ - struct io_hash_table *table = &req->ctx->cancel_table; - u32 index = hash_long(req->cqe.user_data, table->hash_bits); - spinlock_t *lock = &table->hbs[index].lock; - - spin_lock(lock); - hash_del(&req->hash_node); - spin_unlock(lock); -} - -static void io_poll_req_insert_locked(struct io_kiocb *req) -{ - struct io_hash_table *table = &req->ctx->cancel_table_locked; - u32 index = hash_long(req->cqe.user_data, table->hash_bits); lockdep_assert_held(&req->ctx->uring_lock); @@ -154,19 +132,14 @@ static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts) { struct io_ring_ctx *ctx = req->ctx; - if (req->flags & REQ_F_HASH_LOCKED) { - /* - * ->cancel_table_locked is protected by ->uring_lock in - * contrast to per bucket spinlocks. Likely, tctx_task_work() - * already grabbed the mutex for us, but there is a chance it - * failed. - */ - io_tw_lock(ctx, ts); - hash_del(&req->hash_node); - req->flags &= ~REQ_F_HASH_LOCKED; - } else { - io_poll_req_delete(req); - } + /* + * ->cancel_table_locked is protected by ->uring_lock in + * contrast to per bucket spinlocks. Likely, tctx_task_work() + * already grabbed the mutex for us, but there is a chance it + * failed. + */ + io_tw_lock(ctx, ts); + hash_del(&req->hash_node); } static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) @@ -563,12 +536,13 @@ static bool io_poll_can_finish_inline(struct io_kiocb *req, return pt->owning || io_poll_get_ownership(req); } -static void io_poll_add_hash(struct io_kiocb *req) +static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags) { - if (req->flags & REQ_F_HASH_LOCKED) - io_poll_req_insert_locked(req); - else - io_poll_req_insert(req); + struct io_ring_ctx *ctx = req->ctx; + + io_ring_submit_lock(ctx, issue_flags); + io_poll_req_insert(req); + io_ring_submit_unlock(ctx, issue_flags); } /* @@ -605,11 +579,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req, ipt->owning = issue_flags & IO_URING_F_UNLOCKED; atomic_set(&req->poll_refs, (int)ipt->owning); - /* io-wq doesn't hold uring_lock */ - if (issue_flags & IO_URING_F_UNLOCKED) - req->flags &= ~REQ_F_HASH_LOCKED; - - /* * Exclusive waits may only wake a limited amount of entries * rather than all of them, this may interfere with lazy @@ -638,7 +607,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, if (mask && ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { if (!io_poll_can_finish_inline(req, ipt)) { - io_poll_add_hash(req); + io_poll_add_hash(req, issue_flags); return 0; } io_poll_remove_entries(req); @@ -647,7 +616,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, return 1; } - io_poll_add_hash(req); + io_poll_add_hash(req, issue_flags); if (mask && (poll->events & EPOLLET) && io_poll_can_finish_inline(req, ipt)) { @@ -720,12 +689,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) __poll_t mask = POLLPRI | POLLERR | EPOLLET; int ret; - /* - * apoll requests already grab the mutex to complete in the tw handler, - * so removal from the mutex-backed hash is free, use it by default. - */ - req->flags |= REQ_F_HASH_LOCKED; - if (!def->pollin && !def->pollout) return IO_APOLL_ABORTED; if (!io_file_can_poll(req)) @@ -761,18 +724,22 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) return IO_APOLL_OK; } -static __cold bool io_poll_remove_all_table(struct task_struct *tsk, - struct io_hash_table *table, - bool cancel_all) +/* + * Returns true if we found and killed one or more poll requests + */ +__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, + bool cancel_all) { - unsigned nr_buckets = 1U << table->hash_bits; + unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits; struct hlist_node *tmp; struct io_kiocb *req; bool found = false; int i; + lockdep_assert_held(&ctx->uring_lock); + for (i = 0; i < nr_buckets; i++) { - struct io_hash_bucket *hb = &table->hbs[i]; + struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; spin_lock(&hb->lock); hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { @@ -787,28 +754,13 @@ static __cold bool io_poll_remove_all_table(struct task_struct *tsk, return found; } -/* - * Returns true if we found and killed one or more poll requests - */ -__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, - bool cancel_all) - __must_hold(&ctx->uring_lock) -{ - bool ret; - - ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all); - ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all); - return ret; -} - static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, struct io_cancel_data *cd, - struct io_hash_table *table, struct io_hash_bucket **out_bucket) { struct io_kiocb *req; - u32 index = hash_long(cd->data, table->hash_bits); - struct io_hash_bucket *hb = &table->hbs[index]; + u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits); + struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index]; *out_bucket = NULL; @@ -831,17 +783,16 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, struct io_cancel_data *cd, - struct io_hash_table *table, struct io_hash_bucket **out_bucket) { - unsigned nr_buckets = 1U << table->hash_bits; + unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits; struct io_kiocb *req; int i; *out_bucket = NULL; for (i = 0; i < nr_buckets; i++) { - struct io_hash_bucket *hb = &table->hbs[i]; + struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; spin_lock(&hb->lock); hlist_for_each_entry(req, &hb->list, hash_node) { @@ -866,17 +817,16 @@ static int io_poll_disarm(struct io_kiocb *req) return 0; } -static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, - struct io_hash_table *table) +static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) { struct io_hash_bucket *bucket; struct io_kiocb *req; if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP | IORING_ASYNC_CANCEL_ANY)) - req = io_poll_file_find(ctx, cd, table, &bucket); + req = io_poll_file_find(ctx, cd, &bucket); else - req = io_poll_find(ctx, false, cd, table, &bucket); + req = io_poll_find(ctx, false, cd, &bucket); if (req) io_poll_cancel_req(req); @@ -890,12 +840,8 @@ int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, { int ret; - ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table); - if (ret != -ENOENT) - return ret; - io_ring_submit_lock(ctx, issue_flags); - ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked); + ret = __io_poll_cancel(ctx, cd); io_ring_submit_unlock(ctx, issue_flags); return ret; } @@ -972,13 +918,6 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) ipt.pt._qproc = io_poll_queue_proc; - /* - * If sqpoll or single issuer, there is no contention for ->uring_lock - * and we'll end up holding it in tw handlers anyway. - */ - if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) - req->flags |= REQ_F_HASH_LOCKED; - ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); if (ret > 0) { io_req_set_res(req, ipt.result_mask, 0); @@ -997,18 +936,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) int ret2, ret = 0; io_ring_submit_lock(ctx, issue_flags); - preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); - ret2 = io_poll_disarm(preq); - if (bucket) - spin_unlock(&bucket->lock); - if (!ret2) - goto found; - if (ret2 != -ENOENT) { - ret = ret2; - goto out; - } - - preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket); + preq = io_poll_find(ctx, true, &cd, &bucket); ret2 = io_poll_disarm(preq); if (bucket) spin_unlock(&bucket->lock); @@ -1016,8 +944,6 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) ret = ret2; goto out; } - -found: if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) { ret = -EFAULT; goto out; -- 2.50.1 From 879ba46a38e67595b96c87428fbb718d63821da2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 30 Sep 2024 14:35:52 -0600 Subject: [PATCH 10/16] io_uring/poll: get rid of io_poll_tw_hash_eject() It serves no purposes anymore, all it does is delete the hash list entry. task_work always has the ring locked. Signed-off-by: Jens Axboe --- io_uring/poll.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/io_uring/poll.c b/io_uring/poll.c index 217d667e0622..a0d1a09c5a20 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -128,20 +128,6 @@ static void io_poll_req_insert(struct io_kiocb *req) hlist_add_head(&req->hash_node, &table->hbs[index].list); } -static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts) -{ - struct io_ring_ctx *ctx = req->ctx; - - /* - * ->cancel_table_locked is protected by ->uring_lock in - * contrast to per bucket spinlocks. Likely, tctx_task_work() - * already grabbed the mutex for us, but there is a chance it - * failed. - */ - io_tw_lock(ctx, ts); - hash_del(&req->hash_node); -} - static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) { poll->head = NULL; @@ -336,7 +322,8 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) return; } io_poll_remove_entries(req); - io_poll_tw_hash_eject(req, ts); + /* task_work always has ->uring_lock held */ + hash_del(&req->hash_node); if (req->opcode == IORING_OP_POLL_ADD) { if (ret == IOU_POLL_DONE) { -- 2.50.1 From ba4366f57b117c2eab996642288e5c75646ccfc9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 30 Sep 2024 14:29:06 -0600 Subject: [PATCH 11/16] io_uring/poll: get rid of per-hashtable bucket locks Any access to the table is protected by ctx->uring_lock now anyway, the per-bucket locking doesn't buy us anything. Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 1 - io_uring/cancel.c | 4 +--- io_uring/poll.c | 39 +++++++++------------------------- 3 files changed, 11 insertions(+), 33 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index d8ca27da1341..9c7e1d3f06e5 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -67,7 +67,6 @@ struct io_file_table { }; struct io_hash_bucket { - spinlock_t lock; struct hlist_head list; } ____cacheline_aligned_in_smp; diff --git a/io_uring/cancel.c b/io_uring/cancel.c index a6e58a20efdd..755dd5506a5f 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -236,10 +236,8 @@ void init_hash_table(struct io_hash_table *table, unsigned size) { unsigned int i; - for (i = 0; i < size; i++) { - spin_lock_init(&table->hbs[i].lock); + for (i = 0; i < size; i++) INIT_HLIST_HEAD(&table->hbs[i].list); - } } static int __io_sync_cancel(struct io_uring_task *tctx, diff --git a/io_uring/poll.c b/io_uring/poll.c index a0d1a09c5a20..2d6698fb7400 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -728,7 +728,6 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, for (i = 0; i < nr_buckets; i++) { struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; - spin_lock(&hb->lock); hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { if (io_match_task_safe(req, tsk, cancel_all)) { hlist_del_init(&req->hash_node); @@ -736,22 +735,17 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, found = true; } } - spin_unlock(&hb->lock); } return found; } static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, - struct io_cancel_data *cd, - struct io_hash_bucket **out_bucket) + struct io_cancel_data *cd) { struct io_kiocb *req; u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits); struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index]; - *out_bucket = NULL; - - spin_lock(&hb->lock); hlist_for_each_entry(req, &hb->list, hash_node) { if (cd->data != req->cqe.user_data) continue; @@ -761,34 +755,25 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, if (io_cancel_match_sequence(req, cd->seq)) continue; } - *out_bucket = hb; return req; } - spin_unlock(&hb->lock); return NULL; } static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, - struct io_cancel_data *cd, - struct io_hash_bucket **out_bucket) + struct io_cancel_data *cd) { unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits; struct io_kiocb *req; int i; - *out_bucket = NULL; - for (i = 0; i < nr_buckets; i++) { struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; - spin_lock(&hb->lock); hlist_for_each_entry(req, &hb->list, hash_node) { - if (io_cancel_req_match(req, cd)) { - *out_bucket = hb; + if (io_cancel_req_match(req, cd)) return req; - } } - spin_unlock(&hb->lock); } return NULL; } @@ -806,20 +791,19 @@ static int io_poll_disarm(struct io_kiocb *req) static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) { - struct io_hash_bucket *bucket; struct io_kiocb *req; if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP | IORING_ASYNC_CANCEL_ANY)) - req = io_poll_file_find(ctx, cd, &bucket); + req = io_poll_file_find(ctx, cd); else - req = io_poll_find(ctx, false, cd, &bucket); + req = io_poll_find(ctx, false, cd); - if (req) + if (req) { io_poll_cancel_req(req); - if (bucket) - spin_unlock(&bucket->lock); - return req ? 0 : -ENOENT; + return 0; + } + return -ENOENT; } int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, @@ -918,15 +902,12 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); struct io_ring_ctx *ctx = req->ctx; struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, }; - struct io_hash_bucket *bucket; struct io_kiocb *preq; int ret2, ret = 0; io_ring_submit_lock(ctx, issue_flags); - preq = io_poll_find(ctx, true, &cd, &bucket); + preq = io_poll_find(ctx, true, &cd); ret2 = io_poll_disarm(preq); - if (bucket) - spin_unlock(&bucket->lock); if (ret2) { ret = ret2; goto out; -- 2.50.1 From 8abf47a8d61c9e8314ae4cfa27e18c8df67c37bc Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 30 Sep 2024 14:30:39 -0600 Subject: [PATCH 12/16] io_uring/cancel: get rid of init_hash_table() helper All it does is initialize the lists, just move the INIT_HLIST_HEAD() into the one caller. Signed-off-by: Jens Axboe --- io_uring/cancel.c | 8 -------- io_uring/cancel.h | 1 - io_uring/io_uring.c | 4 +++- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/io_uring/cancel.c b/io_uring/cancel.c index 755dd5506a5f..cc3475b22ae5 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -232,14 +232,6 @@ done: return IOU_OK; } -void init_hash_table(struct io_hash_table *table, unsigned size) -{ - unsigned int i; - - for (i = 0; i < size; i++) - INIT_HLIST_HEAD(&table->hbs[i].list); -} - static int __io_sync_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd, int fd) { diff --git a/io_uring/cancel.h b/io_uring/cancel.h index b33995e00ba9..bbfea2cd00ea 100644 --- a/io_uring/cancel.h +++ b/io_uring/cancel.h @@ -20,7 +20,6 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags); int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd, unsigned int issue_flags); -void init_hash_table(struct io_hash_table *table, unsigned size); int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg); bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index f4e069cd03a5..6aac72b2958f 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -263,13 +263,15 @@ static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) { unsigned hash_buckets = 1U << bits; size_t hash_size = hash_buckets * sizeof(table->hbs[0]); + int i; table->hbs = kmalloc(hash_size, GFP_KERNEL); if (!table->hbs) return -ENOMEM; table->hash_bits = bits; - init_hash_table(table, hash_buckets); + for (i = 0; i < hash_buckets; i++) + INIT_HLIST_HEAD(&table->hbs[i].list); return 0; } -- 2.50.1 From b6b3eb19dd86ecc3f188bd419f12cdfcfbeda5e7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 30 Sep 2024 17:11:32 -0600 Subject: [PATCH 13/16] io_uring: move cancel hash tables to kvmalloc/kvfree Convert to using kvmalloc/kfree() for the hash tables, and while at it, make it handle low memory situations better. Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 6aac72b2958f..d7ad4ea5f40b 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -261,13 +261,19 @@ static __cold void io_fallback_req_func(struct work_struct *work) static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) { - unsigned hash_buckets = 1U << bits; - size_t hash_size = hash_buckets * sizeof(table->hbs[0]); + unsigned int hash_buckets; int i; - table->hbs = kmalloc(hash_size, GFP_KERNEL); - if (!table->hbs) - return -ENOMEM; + do { + hash_buckets = 1U << bits; + table->hbs = kvmalloc_array(hash_buckets, sizeof(table->hbs[0]), + GFP_KERNEL_ACCOUNT); + if (table->hbs) + break; + if (bits == 1) + return -ENOMEM; + bits--; + } while (1); table->hash_bits = bits; for (i = 0; i < hash_buckets; i++) @@ -360,7 +366,7 @@ err: io_alloc_cache_free(&ctx->uring_cache, kfree); io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); io_futex_cache_free(ctx); - kfree(ctx->cancel_table.hbs); + kvfree(ctx->cancel_table.hbs); xa_destroy(&ctx->io_bl_xa); kfree(ctx); return NULL; @@ -2772,7 +2778,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) if (ctx->hash_map) io_wq_put_hash(ctx->hash_map); io_napi_free(ctx); - kfree(ctx->cancel_table.hbs); + kvfree(ctx->cancel_table.hbs); xa_destroy(&ctx->io_bl_xa); kfree(ctx); } -- 2.50.1 From 1e6e7602cc9fdeaf7e2593755409e8d50545ed69 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 18 Oct 2024 17:07:31 +0100 Subject: [PATCH 14/16] io_uring: kill io_llist_xchg io_llist_xchg is only used to set the list to NULL, which can also be done with llist_del_all(). Use the latter and kill io_llist_xchg. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d6765112680d2e86a58b76166b7513391ff4e5d7.1729264960.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index d7ad4ea5f40b..c0358a8d85d2 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1081,20 +1081,6 @@ struct llist_node *io_handle_tw_list(struct llist_node *node, return node; } -/** - * io_llist_xchg - swap all entries in a lock-less list - * @head: the head of lock-less list to delete all entries - * @new: new entry as the head of the list - * - * If list is empty, return NULL, otherwise, return the pointer to the first entry. - * The order of entries returned is from the newest to the oldest added one. - */ -static inline struct llist_node *io_llist_xchg(struct llist_head *head, - struct llist_node *new) -{ - return xchg(&head->first, new); -} - static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) { struct llist_node *node = llist_del_all(&tctx->task_list); @@ -1316,7 +1302,7 @@ again: * llists are in reverse order, flip it back the right way before * running the pending items. */ - node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL)); + node = llist_reverse_order(llist_del_all(&ctx->work_llist)); while (node) { struct llist_node *next = node->next; struct io_kiocb *req = container_of(node, struct io_kiocb, -- 2.50.1 From 9b296c625ac1d2ca9b129743c3f886bf7a0f471d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 18 Oct 2024 17:07:59 +0100 Subject: [PATCH 15/16] io_uring: static_key for !IORING_SETUP_NO_SQARRAY IORING_SETUP_NO_SQARRAY should be preferred and used by default by liburing, optimise flag checking in io_get_sqe() with a static key. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/c164a48542fbb080115e2377ecf160c758562742.1729264988.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c0358a8d85d2..fa9d31034c62 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -149,6 +150,8 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, static void io_queue_sqe(struct io_kiocb *req); +static __read_mostly DEFINE_STATIC_KEY_FALSE(io_key_has_sqarray); + struct kmem_cache *req_cachep; static struct workqueue_struct *iou_wq __ro_after_init; @@ -2254,7 +2257,8 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) unsigned mask = ctx->sq_entries - 1; unsigned head = ctx->cached_sq_head++ & mask; - if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) { + if (static_branch_unlikely(&io_key_has_sqarray) && + (!(ctx->flags & IORING_SETUP_NO_SQARRAY))) { head = READ_ONCE(ctx->sq_array[head]); if (unlikely(head >= ctx->sq_entries)) { /* drop invalid entries */ @@ -2758,6 +2762,9 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) } io_rings_free(ctx); + if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) + static_branch_dec(&io_key_has_sqarray); + percpu_ref_exit(&ctx->refs); free_uid(ctx->user); io_req_caches_free(ctx); @@ -3549,6 +3556,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, ctx->clockid = CLOCK_MONOTONIC; ctx->clock_offset = 0; + if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) + static_branch_inc(&io_key_has_sqarray); + if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && !(ctx->flags & IORING_SETUP_IOPOLL) && !(ctx->flags & IORING_SETUP_SQPOLL)) -- 2.50.1 From 2946f08ae9ed650b94e0ffebcdfdda8de76bd926 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 18 Oct 2024 17:14:00 +0100 Subject: [PATCH 16/16] io_uring: clean up cqe trace points We have too many helpers posting CQEs, instead of tracing completion events before filling in a CQE and thus having to pass all the data, set the CQE first, pass it to the tracing helper and let it extract everything it needs. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b83c1ca9ee5aed2df0f3bb743bf5ed699cce4c86.1729267437.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 5 +++++ include/trace/events/io_uring.h | 24 +++++++++--------------- io_uring/io_uring.c | 4 ++-- io_uring/io_uring.h | 7 +++---- 4 files changed, 19 insertions(+), 21 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 9c7e1d3f06e5..391087144666 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -662,4 +662,9 @@ struct io_overflow_cqe { struct io_uring_cqe cqe; }; +static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx) +{ + return ctx->flags & IORING_SETUP_CQE32; +} + #endif diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 412c9c210a32..fb81c533b310 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -315,20 +315,14 @@ TRACE_EVENT(io_uring_fail_link, * io_uring_complete - called when completing an SQE * * @ctx: pointer to a ring context structure - * @req: pointer to a submitted request - * @user_data: user data associated with the request - * @res: result of the request - * @cflags: completion flags - * @extra1: extra 64-bit data for CQE32 - * @extra2: extra 64-bit data for CQE32 - * + * @req: (optional) pointer to a submitted request + * @cqe: pointer to the filled in CQE being posted */ TRACE_EVENT(io_uring_complete, - TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags, - u64 extra1, u64 extra2), +TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe), - TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2), + TP_ARGS(ctx, req, cqe), TP_STRUCT__entry ( __field( void *, ctx ) @@ -343,11 +337,11 @@ TRACE_EVENT(io_uring_complete, TP_fast_assign( __entry->ctx = ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->res = res; - __entry->cflags = cflags; - __entry->extra1 = extra1; - __entry->extra2 = extra2; + __entry->user_data = cqe->user_data; + __entry->res = cqe->res; + __entry->cflags = cqe->flags; + __entry->extra1 = io_ctx_cqe32(ctx) ? cqe->big_cqe[0] : 0; + __entry->extra2 = io_ctx_cqe32(ctx) ? cqe->big_cqe[1] : 0; ), TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x " diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index fa9d31034c62..58b401900b41 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -828,8 +828,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, * the ring. */ if (likely(io_get_cqe(ctx, &cqe))) { - trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); - WRITE_ONCE(cqe->user_data, user_data); WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->flags, cflags); @@ -838,6 +836,8 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, WRITE_ONCE(cqe->big_cqe[0], 0); WRITE_ONCE(cqe->big_cqe[1], 0); } + + trace_io_uring_complete(ctx, NULL, cqe); return true; } return false; diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 70b6675941ff..9cd9a127e9ed 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -189,16 +189,15 @@ static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, if (unlikely(!io_get_cqe(ctx, &cqe))) return false; - if (trace_io_uring_complete_enabled()) - trace_io_uring_complete(req->ctx, req, req->cqe.user_data, - req->cqe.res, req->cqe.flags, - req->big_cqe.extra1, req->big_cqe.extra2); memcpy(cqe, &req->cqe, sizeof(*cqe)); if (ctx->flags & IORING_SETUP_CQE32) { memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); memset(&req->big_cqe, 0, sizeof(req->big_cqe)); } + + if (trace_io_uring_complete_enabled()) + trace_io_uring_complete(req->ctx, req, cqe); return true; } -- 2.50.1