return __io_iov_buffer_select(req, iov, issue_flags);
 }
 
-static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
-                          struct iov_iter *iter, unsigned int issue_flags)
+static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
+                            struct io_rw_state *s, unsigned int issue_flags)
 {
+       struct iov_iter *iter = &s->iter;
        void __user *buf = u64_to_user_ptr(req->rw.addr);
        size_t sqe_len = req->rw.len;
        u8 opcode = req->opcode;
                        req->rw.len = sqe_len;
                }
 
-               ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
+               ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
                *iovec = NULL;
                return ret;
        }
 
+       *iovec = s->fast_iov;
+
        if (req->flags & REQ_F_BUFFER_SELECT) {
                ret = io_iov_buffer_select(req, *iovec, issue_flags);
                if (!ret)
                              req->ctx->compat);
 }
 
+static inline int io_import_iovec(int rw, struct io_kiocb *req,
+                                 struct iovec **iovec, struct io_rw_state *s,
+                                 unsigned int issue_flags)
+{
+       int ret;
+
+       ret = __io_import_iovec(rw, req, iovec, s, issue_flags);
+       if (unlikely(ret < 0))
+               return ret;
+       iov_iter_save_state(&s->iter, &s->iter_state);
+       return ret;
+}
+
 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
 {
        return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
 {
        struct io_async_rw *iorw = req->async_data;
-       struct iovec *iov = iorw->s.fast_iov;
+       struct iovec *iov;
        int ret;
 
        /* submission path, ->uring_lock should already be taken */
-       ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, IO_URING_F_NONBLOCK);
+       ret = io_import_iovec(rw, req, &iov, &iorw->s, IO_URING_F_NONBLOCK);
        if (unlikely(ret < 0))
                return ret;
 
        iorw->free_iovec = iov;
        if (iov)
                req->flags |= REQ_F_NEED_CLEANUP;
-       iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
        return 0;
 }
 
                iovec = NULL;
        } else {
                s = &__s;
-               iovec = s->fast_iov;
-               ret = io_import_iovec(READ, req, &iovec, &s->iter, issue_flags);
-               if (ret < 0)
+               ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
+               if (unlikely(ret < 0))
                        return ret;
-
-               iov_iter_save_state(&s->iter, &s->iter_state);
        }
        req->result = iov_iter_count(&s->iter);
 
                iovec = NULL;
        } else {
                s = &__s;
-               iovec = s->fast_iov;
-               ret = io_import_iovec(WRITE, req, &iovec, &s->iter, issue_flags);
-               if (ret < 0)
+               ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
+               if (unlikely(ret < 0))
                        return ret;
-               iov_iter_save_state(&s->iter, &s->iter_state);
        }
        req->result = iov_iter_count(&s->iter);