The current situation with buffer group id juggling is not ideal.
req->buf_index first stores the bgid, then it's overwritten by a buffer
id, and then it can get restored back no recycling / etc. It's not so
easy to control, and it's not handled consistently across request types
with receive requests saving and restoring the bgid it by hand.
It's a prep patch that adds a buffer group id argument to
io_buffer_select(). The caller will be responsible for stashing a copy
somewhere and passing it into the function.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/a210d6427cc3f4f42271a6853274cd5a50e56820.1743437358.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
 }
 
 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
-                             unsigned int issue_flags)
+                             unsigned buf_group, unsigned int issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_buffer_list *bl;
 
        io_ring_submit_lock(req->ctx, issue_flags);
 
-       bl = io_buffer_get_list(ctx, req->buf_index);
+       bl = io_buffer_get_list(ctx, buf_group);
        if (likely(bl)) {
                if (bl->flags & IOBL_BUF_RING)
                        ret = io_ring_buffer_select(req, len, bl, issue_flags);
 
 };
 
 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
-                             unsigned int issue_flags);
+                             unsigned buf_group, unsigned int issue_flags);
 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
                      unsigned int issue_flags);
 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
 
        sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
        if (sr->msg_flags & MSG_DONTWAIT)
                req->flags |= REQ_F_NOWAIT;
+       if (req->flags & REQ_F_BUFFER_SELECT)
+               sr->buf_group = req->buf_index;
        if (sr->flags & IORING_RECVSEND_BUNDLE) {
                if (req->opcode == IORING_OP_SENDMSG)
                        return -EINVAL;
-               if (!(req->flags & REQ_F_BUFFER_SELECT))
-                       return -EINVAL;
                sr->msg_flags |= MSG_WAITALL;
-               sr->buf_group = req->buf_index;
                req->buf_list = NULL;
                req->flags |= REQ_F_MULTISHOT;
        }
                void __user *buf;
                size_t len = sr->len;
 
-               buf = io_buffer_select(req, &len, issue_flags);
+               buf = io_buffer_select(req, &len, sr->buf_group, issue_flags);
                if (!buf)
                        return -ENOBUFS;
 
                void __user *buf;
 
                *len = sr->len;
-               buf = io_buffer_select(req, len, issue_flags);
+               buf = io_buffer_select(req, len, sr->buf_group, issue_flags);
                if (!buf)
                        return -ENOBUFS;
                sr->buf = buf;
 
                return io_import_vec(ddir, req, io, buf, sqe_len);
 
        if (io_do_buffer_select(req)) {
-               buf = io_buffer_select(req, &sqe_len, issue_flags);
+               buf = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags);
                if (!buf)
                        return -ENOBUFS;
                rw->addr = (unsigned long) buf;
                        int ddir)
 {
        struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+       struct io_async_rw *io;
        unsigned ioprio;
        u64 attr_type_mask;
        int ret;
 
        if (io_rw_alloc_async(req))
                return -ENOMEM;
+       io = req->async_data;
 
        rw->kiocb.ki_pos = READ_ONCE(sqe->off);
        /* used for fixed read/write too - just read unconditionally */
        req->buf_index = READ_ONCE(sqe->buf_index);
+       io->buf_group = req->buf_index;
 
        ioprio = READ_ONCE(sqe->ioprio);
        if (ioprio) {
 
                struct iov_iter                 iter;
                struct iov_iter_state           iter_state;
                struct iovec                    fast_iov;
+               unsigned                        buf_group;
+
                /*
                 * wpq is for buffered io, while meta fields are used with
                 * direct io