struct io_ring_ctx *ctx = req->ctx;
        struct io_buffer_list *bl;
 
-       if (req->flags & REQ_F_BUFFER_SELECTED)
-               return u64_to_user_ptr(kbuf->addr);
-
        io_ring_submit_lock(req->ctx, issue_flags);
 
        bl = io_buffer_get_list(ctx, req->buf_index);
        buf = io_buffer_select(req, &len, issue_flags);
        if (IS_ERR(buf))
                return PTR_ERR(buf);
+       req->rw.addr = (unsigned long) buf;
        iov[0].iov_base = buf;
-       iov[0].iov_len = (compat_size_t) len;
+       req->rw.len = iov[0].iov_len = (compat_size_t) len;
        return 0;
 }
 #endif
        buf = io_buffer_select(req, &len, issue_flags);
        if (IS_ERR(buf))
                return PTR_ERR(buf);
+       req->rw.addr = (unsigned long) buf;
        iov[0].iov_base = buf;
-       iov[0].iov_len = len;
+       req->rw.len = iov[0].iov_len = len;
        return 0;
 }
 
                                    unsigned int issue_flags)
 {
        if (req->flags & REQ_F_BUFFER_SELECTED) {
-               struct io_buffer *kbuf = req->kbuf;
-
-               iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
-               iov[0].iov_len = kbuf->len;
+               iov[0].iov_base = u64_to_user_ptr(req->rw.addr);
+               iov[0].iov_len = req->rw.len;
                return 0;
        }
        if (req->rw.len != 1)
        return __io_iov_buffer_select(req, iov, issue_flags);
 }
 
+static inline bool io_do_buffer_select(struct io_kiocb *req)
+{
+       if (!(req->flags & REQ_F_BUFFER_SELECT))
+               return false;
+       return !(req->flags & REQ_F_BUFFER_SELECTED);
+}
+
 static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
                                       struct io_rw_state *s,
                                       unsigned int issue_flags)
        sqe_len = req->rw.len;
 
        if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
-               if (req->flags & REQ_F_BUFFER_SELECT) {
+               if (io_do_buffer_select(req)) {
                        buf = io_buffer_select(req, &sqe_len, issue_flags);
                        if (IS_ERR(buf))
                                return ERR_CAST(buf);
+                       req->rw.addr = (unsigned long) buf;
                        req->rw.len = sqe_len;
                }
 
            (sr->flags & IORING_RECVSEND_POLL_FIRST))
                return io_setup_async_msg(req, kmsg);
 
-       if (req->flags & REQ_F_BUFFER_SELECT) {
+       if (io_do_buffer_select(req)) {
                void __user *buf;
 
                buf = io_buffer_select(req, &sr->len, issue_flags);
        if (unlikely(!sock))
                return -ENOTSOCK;
 
-       if (req->flags & REQ_F_BUFFER_SELECT) {
+       if (io_do_buffer_select(req)) {
                void __user *buf;
 
                buf = io_buffer_select(req, &sr->len, issue_flags);