atomic_long_sub(nr_pages, &user->locked_vm);
}
+void io_vec_free(struct iou_vec *iv);
+
+static inline void io_vec_reset_iovec(struct iou_vec *iv,
+ struct iovec *iovec, unsigned nr)
+{
+ io_vec_free(iv);
+ iv->iovec = iovec;
+ iv->nr = nr;
+}
+
+static inline void io_alloc_cache_vec_kasan(struct iou_vec *iv)
+{
+ if (IS_ENABLED(CONFIG_KASAN))
+ io_vec_free(iv);
+}
+
#endif
int ret, nr_segs;
struct iovec *iov;
- if (io->free_iovec) {
- nr_segs = io->free_iov_nr;
- iov = io->free_iovec;
+ if (io->vec.iovec) {
+ nr_segs = io->vec.nr;
+ iov = io->vec.iovec;
} else {
nr_segs = 1;
iov = &io->fast_iov;
return ret;
if (iov) {
req->flags |= REQ_F_NEED_CLEANUP;
- io->free_iov_nr = io->iter.nr_segs;
- kfree(io->free_iovec);
- io->free_iovec = iov;
+ io_vec_reset_iovec(&io->vec, iov, io->iter.nr_segs);
}
return 0;
}
if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
return;
- io_alloc_cache_kasan(&rw->free_iovec, &rw->free_iov_nr);
+ io_alloc_cache_vec_kasan(&rw->vec);
if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
rw = io_uring_alloc_async_data(&ctx->rw_cache, req);
if (!rw)
return -ENOMEM;
- if (rw->free_iovec)
+ if (rw->vec.iovec)
req->flags |= REQ_F_NEED_CLEANUP;
rw->bytes_done = 0;
return 0;
{
struct io_async_rw *rw = (struct io_async_rw *) entry;
- if (rw->free_iovec)
- kfree(rw->free_iovec);
+ io_vec_free(&rw->vec);
kfree(rw);
}
};
struct io_async_rw {
+ struct iou_vec vec;
size_t bytes_done;
- struct iovec *free_iovec;
+
struct_group(clear,
struct iov_iter iter;
struct iov_iter_state iter_state;
struct iovec fast_iov;
- int free_iov_nr;
/*
* wpq is for buffered io, while meta fields are used with
* direct io