struct io_sr_msg        sr_msg;
        };
 
-       const struct io_uring_sqe       *sqe;
        struct io_async_ctx             *io;
        struct file                     *ring_file;
        int                             ring_fd;
 {
        bool do_hashed = false;
 
-       if (req->sqe) {
-               switch (req->opcode) {
-               case IORING_OP_WRITEV:
-               case IORING_OP_WRITE_FIXED:
-                       /* only regular files should be hashed for writes */
-                       if (req->flags & REQ_F_ISREG)
-                               do_hashed = true;
-                       /* fall-through */
-               case IORING_OP_READV:
-               case IORING_OP_READ_FIXED:
-               case IORING_OP_SENDMSG:
-               case IORING_OP_RECVMSG:
-               case IORING_OP_ACCEPT:
-               case IORING_OP_POLL_ADD:
-               case IORING_OP_CONNECT:
-                       /*
-                        * We know REQ_F_ISREG is not set on some of these
-                        * opcodes, but this enables us to keep the check in
-                        * just one place.
-                        */
-                       if (!(req->flags & REQ_F_ISREG))
-                               req->work.flags |= IO_WQ_WORK_UNBOUND;
-                       break;
-               }
-               if (io_req_needs_user(req))
-                       req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+       switch (req->opcode) {
+       case IORING_OP_WRITEV:
+       case IORING_OP_WRITE_FIXED:
+               /* only regular files should be hashed for writes */
+               if (req->flags & REQ_F_ISREG)
+                       do_hashed = true;
+               /* fall-through */
+       case IORING_OP_READV:
+       case IORING_OP_READ_FIXED:
+       case IORING_OP_SENDMSG:
+       case IORING_OP_RECVMSG:
+       case IORING_OP_ACCEPT:
+       case IORING_OP_POLL_ADD:
+       case IORING_OP_CONNECT:
+               /*
+                * We know REQ_F_ISREG is not set on some of these
+                * opcodes, but this enables us to keep the check in
+                * just one place.
+                */
+               if (!(req->flags & REQ_F_ISREG))
+                       req->work.flags |= IO_WQ_WORK_UNBOUND;
+               break;
        }
+       if (io_req_needs_user(req))
+               req->work.flags |= IO_WQ_WORK_NEEDS_USER;
 
        *link = io_prep_linked_timeout(req);
        return do_hashed;
        return false;
 }
 
-static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     bool force_nonblock)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw.kiocb;
        unsigned ioprio;
        int ret;
 
-       if (!sqe)
-               return 0;
        if (!req->file)
                return -EBADF;
 
                kiocb->ki_complete = io_complete_rw;
        }
 
-       req->rw.addr = READ_ONCE(req->sqe->addr);
-       req->rw.len = READ_ONCE(req->sqe->len);
+       req->rw.addr = READ_ONCE(sqe->addr);
+       req->rw.len = READ_ONCE(sqe->len);
        /* we own ->private, reuse it for the buffer index */
        req->rw.kiocb.private = (void *) (unsigned long)
-                                       READ_ONCE(req->sqe->buf_index);
-       req->sqe = NULL;
+                                       READ_ONCE(sqe->buf_index);
        return 0;
 }
 
        return 0;
 }
 
-static int io_read_prep(struct io_kiocb *req, struct iovec **iovec,
-                       struct iov_iter *iter, bool force_nonblock)
+static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                       bool force_nonblock)
 {
+       struct io_async_ctx *io;
+       struct iov_iter iter;
        ssize_t ret;
 
-       if (req->sqe) {
-               ret = io_prep_rw(req, force_nonblock);
-               if (ret)
-                       return ret;
+       ret = io_prep_rw(req, sqe, force_nonblock);
+       if (ret)
+               return ret;
 
-               if (unlikely(!(req->file->f_mode & FMODE_READ)))
-                       return -EBADF;
-       }
+       if (unlikely(!(req->file->f_mode & FMODE_READ)))
+               return -EBADF;
 
-       return io_import_iovec(READ, req, iovec, iter);
+       if (!req->io)
+               return 0;
+
+       io = req->io;
+       io->rw.iov = io->rw.fast_iov;
+       req->io = NULL;
+       ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
+       req->io = io;
+       if (ret < 0)
+               return ret;
+
+       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       return 0;
 }
 
 static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
        size_t iov_count;
        ssize_t io_size, ret;
 
-       ret = io_read_prep(req, &iovec, &iter, force_nonblock);
+       ret = io_import_iovec(READ, req, &iovec, &iter);
        if (ret < 0)
                return ret;
 
        return ret;
 }
 
-static int io_write_prep(struct io_kiocb *req, struct iovec **iovec,
-                        struct iov_iter *iter, bool force_nonblock)
+static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                        bool force_nonblock)
 {
+       struct io_async_ctx *io;
+       struct iov_iter iter;
        ssize_t ret;
 
-       if (req->sqe) {
-               ret = io_prep_rw(req, force_nonblock);
-               if (ret)
-                       return ret;
+       ret = io_prep_rw(req, sqe, force_nonblock);
+       if (ret)
+               return ret;
 
-               if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
-                       return -EBADF;
-       }
+       if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
+               return -EBADF;
 
-       return io_import_iovec(WRITE, req, iovec, iter);
+       if (!req->io)
+               return 0;
+
+       io = req->io;
+       io->rw.iov = io->rw.fast_iov;
+       req->io = NULL;
+       ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
+       req->io = io;
+       if (ret < 0)
+               return ret;
+
+       io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+       return 0;
 }
 
 static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
        size_t iov_count;
        ssize_t ret, io_size;
 
-       ret = io_write_prep(req, &iovec, &iter, force_nonblock);
+       ret = io_import_iovec(WRITE, req, &iovec, &iter);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int io_prep_fsync(struct io_kiocb *req)
+static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!req->sqe)
-               return 0;
        if (!req->file)
                return -EBADF;
 
 
        req->sync.off = READ_ONCE(sqe->off);
        req->sync.len = READ_ONCE(sqe->len);
-       req->sqe = NULL;
        return 0;
 }
 
                    bool force_nonblock)
 {
        struct io_wq_work *work, *old_work;
-       int ret;
-
-       ret = io_prep_fsync(req);
-       if (ret)
-               return ret;
 
        /* fsync always requires a blocking context */
        if (force_nonblock) {
        return 0;
 }
 
-static int io_prep_sfr(struct io_kiocb *req)
+static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!sqe)
-               return 0;
        if (!req->file)
                return -EBADF;
 
        req->sync.off = READ_ONCE(sqe->off);
        req->sync.len = READ_ONCE(sqe->len);
        req->sync.flags = READ_ONCE(sqe->sync_range_flags);
-       req->sqe = NULL;
        return 0;
 }
 
                              bool force_nonblock)
 {
        struct io_wq_work *work, *old_work;
-       int ret;
-
-       ret = io_prep_sfr(req);
-       if (ret)
-               return ret;
 
        /* sync_file_range always requires a blocking context */
        if (force_nonblock) {
 }
 #endif
 
-static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_sr_msg *sr = &req->sr_msg;
-       int ret;
+       struct io_async_ctx *io = req->io;
 
-       if (!sqe)
-               return 0;
        sr->msg_flags = READ_ONCE(sqe->msg_flags);
        sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+       if (!io)
+               return 0;
+
        io->msg.iov = io->msg.fast_iov;
-       ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+       return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
                                        &io->msg.iov);
-       req->sqe = NULL;
-       return ret;
 #else
        return -EOPNOTSUPP;
 #endif
                                kmsg->iov = kmsg->fast_iov;
                        kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
+                       struct io_sr_msg *sr = &req->sr_msg;
+
                        kmsg = &io.msg;
                        kmsg->msg.msg_name = &addr;
-                       ret = io_sendmsg_prep(req, &io);
+
+                       io.msg.iov = io.msg.fast_iov;
+                       ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
+                                       sr->msg_flags, &io.msg.iov);
                        if (ret)
-                               goto out;
+                               return ret;
                }
 
                flags = req->sr_msg.msg_flags;
                        ret = -EINTR;
        }
 
-out:
        if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
                kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
 #endif
 }
 
-static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_recvmsg_prep(struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
        struct io_sr_msg *sr = &req->sr_msg;
-       int ret;
+       struct io_async_ctx *io = req->io;
+
+       sr->msg_flags = READ_ONCE(sqe->msg_flags);
+       sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 
-       if (!req->sqe)
+       if (!io)
                return 0;
 
-       sr->msg_flags = READ_ONCE(req->sqe->msg_flags);
-       sr->msg = u64_to_user_ptr(READ_ONCE(req->sqe->addr));
        io->msg.iov = io->msg.fast_iov;
-       ret = recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+       return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
                                        &io->msg.uaddr, &io->msg.iov);
-       req->sqe = NULL;
-       return ret;
 #else
        return -EOPNOTSUPP;
 #endif
                                kmsg->iov = kmsg->fast_iov;
                        kmsg->msg.msg_iter.iov = kmsg->iov;
                } else {
+                       struct io_sr_msg *sr = &req->sr_msg;
+
                        kmsg = &io.msg;
                        kmsg->msg.msg_name = &addr;
-                       ret = io_recvmsg_prep(req, &io);
+
+                       io.msg.iov = io.msg.fast_iov;
+                       ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
+                                       sr->msg_flags, &io.msg.uaddr,
+                                       &io.msg.iov);
                        if (ret)
-                               goto out;
+                               return ret;
                }
 
                flags = req->sr_msg.msg_flags;
                        ret = -EINTR;
        }
 
-out:
        if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
                kfree(kmsg->iov);
        io_cqring_add_event(req, ret);
 #endif
 }
 
-static int io_accept_prep(struct io_kiocb *req)
+static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_accept *accept = &req->accept;
 
-       if (!req->sqe)
-               return 0;
-
        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
                return -EINVAL;
        if (sqe->ioprio || sqe->len || sqe->buf_index)
        accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
        accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        accept->flags = READ_ONCE(sqe->accept_flags);
-       req->sqe = NULL;
        return 0;
 #else
        return -EOPNOTSUPP;
 #if defined(CONFIG_NET)
        int ret;
 
-       ret = io_accept_prep(req);
-       if (ret)
-               return ret;
-
        ret = __io_accept(req, nxt, force_nonblock);
        if (ret == -EAGAIN && force_nonblock) {
                req->work.func = io_accept_finish;
 #endif
 }
 
-static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_NET)
-       const struct io_uring_sqe *sqe = req->sqe;
-       int ret;
+       struct io_connect *conn = &req->connect;
+       struct io_async_ctx *io = req->io;
 
-       if (!sqe)
-               return 0;
        if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
                return -EINVAL;
        if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
                return -EINVAL;
 
-       req->connect.addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
-       req->connect.addr_len =  READ_ONCE(sqe->addr2);
-       ret = move_addr_to_kernel(req->connect.addr, req->connect.addr_len,
+       conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       conn->addr_len =  READ_ONCE(sqe->addr2);
+
+       if (!io)
+               return 0;
+
+       return move_addr_to_kernel(conn->addr, conn->addr_len,
                                        &io->connect.address);
-       req->sqe = NULL;
-       return ret;
 #else
        return -EOPNOTSUPP;
 #endif
        if (req->io) {
                io = req->io;
        } else {
-               ret = io_connect_prep(req, &__io);
+               ret = move_addr_to_kernel(req->connect.addr,
+                                               req->connect.addr_len,
+                                               &__io.connect.address);
                if (ret)
                        goto out;
                io = &__io;
        return -ENOENT;
 }
 
-static int io_poll_remove_prep(struct io_kiocb *req)
+static int io_poll_remove_prep(struct io_kiocb *req,
+                              const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (!sqe)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
                return -EINVAL;
 
        req->poll.addr = READ_ONCE(sqe->addr);
-       req->sqe = NULL;
        return 0;
 }
 
        u64 addr;
        int ret;
 
-       ret = io_poll_remove_prep(req);
-       if (ret)
-               return ret;
-
        addr = req->poll.addr;
        spin_lock_irq(&ctx->completion_lock);
        ret = io_poll_cancel(ctx, addr);
        hlist_add_head(&req->hash_node, list);
 }
 
-static int io_poll_add_prep(struct io_kiocb *req)
+static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_poll_iocb *poll = &req->poll;
        u16 events;
 
-       if (!sqe)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
 
        events = READ_ONCE(sqe->poll_events);
        poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
-       req->sqe = NULL;
        return 0;
 }
 
        struct io_poll_table ipt;
        bool cancel = false;
        __poll_t mask;
-       int ret;
-
-       ret = io_poll_add_prep(req);
-       if (ret)
-               return ret;
 
        INIT_IO_WORK(&req->work, io_poll_complete_work);
        INIT_HLIST_NODE(&req->hash_node);
        return 0;
 }
 
-static int io_timeout_remove_prep(struct io_kiocb *req)
+static int io_timeout_remove_prep(struct io_kiocb *req,
+                                 const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (!sqe)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
        if (req->timeout.flags)
                return -EINVAL;
 
-       req->sqe = NULL;
        return 0;
 }
 
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       ret = io_timeout_remove_prep(req);
-       if (ret)
-               return ret;
-
        spin_lock_irq(&ctx->completion_lock);
        ret = io_timeout_cancel(ctx, req->timeout.addr);
 
        return 0;
 }
 
-static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
+static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                           bool is_timeout_link)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
        struct io_timeout_data *data;
        unsigned flags;
 
-       if (!sqe)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
 
        req->timeout.count = READ_ONCE(sqe->off);
 
-       if (!io && io_alloc_async_ctx(req))
+       if (!req->io && io_alloc_async_ctx(req))
                return -ENOMEM;
 
        data = &req->io->timeout;
                data->mode = HRTIMER_MODE_REL;
 
        hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
-       req->sqe = NULL;
        return 0;
 }
 
        struct io_timeout_data *data;
        struct list_head *entry;
        unsigned span = 0;
-       int ret;
 
-       ret = io_timeout_prep(req, req->io, false);
-       if (ret)
-               return ret;
        data = &req->io->timeout;
 
        /*
        io_put_req_find_next(req, nxt);
 }
 
-static int io_async_cancel_prep(struct io_kiocb *req)
+static int io_async_cancel_prep(struct io_kiocb *req,
+                               const struct io_uring_sqe *sqe)
 {
-       const struct io_uring_sqe *sqe = req->sqe;
-
-       if (!sqe)
-               return 0;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
                return -EINVAL;
 
        req->cancel.addr = READ_ONCE(sqe->addr);
-       req->sqe = NULL;
        return 0;
 }
 
 static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       int ret;
-
-       ret = io_async_cancel_prep(req);
-       if (ret)
-               return ret;
 
        io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
        return 0;
 }
 
-static int io_req_defer_prep(struct io_kiocb *req)
+static int io_req_defer_prep(struct io_kiocb *req,
+                            const struct io_uring_sqe *sqe)
 {
-       struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
-       struct io_async_ctx *io = req->io;
-       struct iov_iter iter;
        ssize_t ret = 0;
 
        switch (req->opcode) {
                break;
        case IORING_OP_READV:
        case IORING_OP_READ_FIXED:
-               /* ensure prep does right import */
-               req->io = NULL;
-               ret = io_read_prep(req, &iovec, &iter, true);
-               req->io = io;
-               if (ret < 0)
-                       break;
-               io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
-               ret = 0;
+               ret = io_read_prep(req, sqe, true);
                break;
        case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
-               /* ensure prep does right import */
-               req->io = NULL;
-               ret = io_write_prep(req, &iovec, &iter, true);
-               req->io = io;
-               if (ret < 0)
-                       break;
-               io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
-               ret = 0;
+               ret = io_write_prep(req, sqe, true);
                break;
        case IORING_OP_POLL_ADD:
-               ret = io_poll_add_prep(req);
+               ret = io_poll_add_prep(req, sqe);
                break;
        case IORING_OP_POLL_REMOVE:
-               ret = io_poll_remove_prep(req);
+               ret = io_poll_remove_prep(req, sqe);
                break;
        case IORING_OP_FSYNC:
-               ret = io_prep_fsync(req);
+               ret = io_prep_fsync(req, sqe);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
-               ret = io_prep_sfr(req);
+               ret = io_prep_sfr(req, sqe);
                break;
        case IORING_OP_SENDMSG:
-               ret = io_sendmsg_prep(req, io);
+               ret = io_sendmsg_prep(req, sqe);
                break;
        case IORING_OP_RECVMSG:
-               ret = io_recvmsg_prep(req, io);
+               ret = io_recvmsg_prep(req, sqe);
                break;
        case IORING_OP_CONNECT:
-               ret = io_connect_prep(req, io);
+               ret = io_connect_prep(req, sqe);
                break;
        case IORING_OP_TIMEOUT:
-               ret = io_timeout_prep(req, io, false);
+               ret = io_timeout_prep(req, sqe, false);
                break;
        case IORING_OP_TIMEOUT_REMOVE:
-               ret = io_timeout_remove_prep(req);
+               ret = io_timeout_remove_prep(req, sqe);
                break;
        case IORING_OP_ASYNC_CANCEL:
-               ret = io_async_cancel_prep(req);
+               ret = io_async_cancel_prep(req, sqe);
                break;
        case IORING_OP_LINK_TIMEOUT:
-               ret = io_timeout_prep(req, io, true);
+               ret = io_timeout_prep(req, sqe, true);
                break;
        case IORING_OP_ACCEPT:
-               ret = io_accept_prep(req);
+               ret = io_accept_prep(req, sqe);
                break;
        default:
                printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
        return ret;
 }
 
-static int io_req_defer(struct io_kiocb *req)
+static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
        if (!req_need_defer(req) && list_empty(&ctx->defer_list))
                return 0;
 
-       if (io_alloc_async_ctx(req))
+       if (!req->io && io_alloc_async_ctx(req))
                return -EAGAIN;
 
-       ret = io_req_defer_prep(req);
+       ret = io_req_defer_prep(req, sqe);
        if (ret < 0)
                return ret;
 
        return -EIOCBQUEUED;
 }
 
-__attribute__((nonnull))
-static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
-                       bool force_nonblock)
+static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                       struct io_kiocb **nxt, bool force_nonblock)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
                ret = io_nop(req);
                break;
        case IORING_OP_READV:
-               ret = io_read(req, nxt, force_nonblock);
-               break;
-       case IORING_OP_WRITEV:
-               ret = io_write(req, nxt, force_nonblock);
-               break;
        case IORING_OP_READ_FIXED:
+               if (sqe) {
+                       ret = io_read_prep(req, sqe, force_nonblock);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_read(req, nxt, force_nonblock);
                break;
+       case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
+               if (sqe) {
+                       ret = io_write_prep(req, sqe, force_nonblock);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_write(req, nxt, force_nonblock);
                break;
        case IORING_OP_FSYNC:
+               if (sqe) {
+                       ret = io_prep_fsync(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_fsync(req, nxt, force_nonblock);
                break;
        case IORING_OP_POLL_ADD:
+               if (sqe) {
+                       ret = io_poll_add_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_poll_add(req, nxt);
                break;
        case IORING_OP_POLL_REMOVE:
+               if (sqe) {
+                       ret = io_poll_remove_prep(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_poll_remove(req);
                break;
        case IORING_OP_SYNC_FILE_RANGE:
+               if (sqe) {
+                       ret = io_prep_sfr(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_sync_file_range(req, nxt, force_nonblock);
                break;
        case IORING_OP_SENDMSG:
+               if (sqe) {
+                       ret = io_sendmsg_prep(req, sqe);
+                       if (ret < 0)
+                               break;
+               }
                ret = io_sendmsg(req, nxt, force_nonblock);
                break;
        case IORING_OP_RECVMSG:
+               if (sqe) {
+                       ret = io_recvmsg_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_recvmsg(req, nxt, force_nonblock);
                break;
        case IORING_OP_TIMEOUT:
+               if (sqe) {
+                       ret = io_timeout_prep(req, sqe, false);
+                       if (ret)
+                               break;
+               }
                ret = io_timeout(req);
                break;
        case IORING_OP_TIMEOUT_REMOVE:
+               if (sqe) {
+                       ret = io_timeout_remove_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_timeout_remove(req);
                break;
        case IORING_OP_ACCEPT:
+               if (sqe) {
+                       ret = io_accept_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_accept(req, nxt, force_nonblock);
                break;
        case IORING_OP_CONNECT:
+               if (sqe) {
+                       ret = io_connect_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_connect(req, nxt, force_nonblock);
                break;
        case IORING_OP_ASYNC_CANCEL:
+               if (sqe) {
+                       ret = io_async_cancel_prep(req, sqe);
+                       if (ret)
+                               break;
+               }
                ret = io_async_cancel(req, nxt);
                break;
        default:
                req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
                req->in_async = true;
                do {
-                       ret = io_issue_sqe(req, &nxt, false);
+                       ret = io_issue_sqe(req, NULL, &nxt, false);
                        /*
                         * We can get EAGAIN for polled IO even though we're
                         * forcing a sync submission from here, since we can't
        return table->files[index & IORING_FILE_TABLE_MASK];
 }
 
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
+                          const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
        unsigned flags;
        int fd, ret;
 
-       flags = READ_ONCE(req->sqe->flags);
-       fd = READ_ONCE(req->sqe->fd);
+       flags = READ_ONCE(sqe->flags);
+       fd = READ_ONCE(sqe->fd);
 
        if (flags & IOSQE_IO_DRAIN)
                req->flags |= REQ_F_IO_DRAIN;
        return nxt;
 }
 
-static void __io_queue_sqe(struct io_kiocb *req)
+static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_kiocb *linked_timeout;
        struct io_kiocb *nxt = NULL;
 again:
        linked_timeout = io_prep_linked_timeout(req);
 
-       ret = io_issue_sqe(req, &nxt, true);
+       ret = io_issue_sqe(req, sqe, &nxt, true);
 
        /*
         * We async punt it if the file wasn't marked NOWAIT, or if the file
        }
 }
 
-static void io_queue_sqe(struct io_kiocb *req)
+static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        int ret;
 
        }
        req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
 
-       ret = io_req_defer(req);
+       ret = io_req_defer(req, sqe);
        if (ret) {
                if (ret != -EIOCBQUEUED) {
                        io_cqring_add_event(req, ret);
                        io_double_put_req(req);
                }
        } else
-               __io_queue_sqe(req);
+               __io_queue_sqe(req, sqe);
 }
 
 static inline void io_queue_link_head(struct io_kiocb *req)
                io_cqring_add_event(req, -ECANCELED);
                io_double_put_req(req);
        } else
-               io_queue_sqe(req);
+               io_queue_sqe(req, NULL);
 }
 
 #define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
                                IOSQE_IO_HARDLINK)
 
-static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
-                         struct io_kiocb **link)
+static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                         struct io_submit_state *state, struct io_kiocb **link)
 {
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
        /* enforce forwards compatibility on users */
-       if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
+       if (unlikely(sqe->flags & ~SQE_VALID_FLAGS)) {
                ret = -EINVAL;
                goto err_req;
        }
 
-       ret = io_req_set_file(state, req);
+       ret = io_req_set_file(state, req, sqe);
        if (unlikely(ret)) {
 err_req:
                io_cqring_add_event(req, ret);
        if (*link) {
                struct io_kiocb *prev = *link;
 
-               if (req->sqe->flags & IOSQE_IO_DRAIN)
+               if (sqe->flags & IOSQE_IO_DRAIN)
                        (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
 
-               if (req->sqe->flags & IOSQE_IO_HARDLINK)
+               if (sqe->flags & IOSQE_IO_HARDLINK)
                        req->flags |= REQ_F_HARDLINK;
 
                if (io_alloc_async_ctx(req)) {
                        goto err_req;
                }
 
-               ret = io_req_defer_prep(req);
+               ret = io_req_defer_prep(req, sqe);
                if (ret) {
                        /* fail even hard links since we don't submit */
                        prev->flags |= REQ_F_FAIL_LINK;
                }
                trace_io_uring_link(ctx, req, prev);
                list_add_tail(&req->link_list, &prev->link_list);
-       } else if (req->sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
+       } else if (sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
                req->flags |= REQ_F_LINK;
-               if (req->sqe->flags & IOSQE_IO_HARDLINK)
+               if (sqe->flags & IOSQE_IO_HARDLINK)
                        req->flags |= REQ_F_HARDLINK;
 
                INIT_LIST_HEAD(&req->link_list);
+               ret = io_req_defer_prep(req, sqe);
+               if (ret)
+                       req->flags |= REQ_F_FAIL_LINK;
                *link = req;
        } else {
-               io_queue_sqe(req);
+               io_queue_sqe(req, sqe);
        }
 
        return true;
 }
 
 /*
- * Fetch an sqe, if one is available. Note that req->sqe will point to memory
+ * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
  * that is mapped by userspace. This means that care needs to be taken to
  * ensure that reads are stable, as we cannot rely on userspace always
  * being a good citizen. If members of the sqe are validated and then later
  * used, it's important that those reads are done through READ_ONCE() to
  * prevent a re-load down the line.
  */
-static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                         const struct io_uring_sqe **sqe_ptr)
 {
        struct io_rings *rings = ctx->rings;
        u32 *sq_array = ctx->sq_array;
                 * link list.
                 */
                req->sequence = ctx->cached_sq_head;
-               req->sqe = &ctx->sq_sqes[head];
-               req->opcode = READ_ONCE(req->sqe->opcode);
-               req->user_data = READ_ONCE(req->sqe->user_data);
+               *sqe_ptr = &ctx->sq_sqes[head];
+               req->opcode = READ_ONCE((*sqe_ptr)->opcode);
+               req->user_data = READ_ONCE((*sqe_ptr)->user_data);
                ctx->cached_sq_head++;
                return true;
        }
        }
 
        for (i = 0; i < nr; i++) {
+               const struct io_uring_sqe *sqe;
                struct io_kiocb *req;
                unsigned int sqe_flags;
 
                                submitted = -EAGAIN;
                        break;
                }
-               if (!io_get_sqring(ctx, req)) {
+               if (!io_get_sqring(ctx, req, &sqe)) {
                        __io_free_req(req);
                        break;
                }
                }
 
                submitted++;
-               sqe_flags = req->sqe->flags;
+               sqe_flags = sqe->flags;
 
                req->ring_file = ring_file;
                req->ring_fd = ring_fd;
                req->in_async = async;
                req->needs_fixed_file = async;
                trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
-               if (!io_submit_sqe(req, statep, &link))
+               if (!io_submit_sqe(req, sqe, statep, &link))
                        break;
                /*
                 * If previous wasn't linked and we have a linked command,