kmem_cache_free(req_cachep, req);
}
-static void io_req_link_next(struct io_kiocb *req)
+static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
struct io_kiocb *nxt;
}
nxt->flags |= REQ_F_LINK_DONE;
- INIT_WORK(&nxt->work, io_sq_wq_submit_work);
- io_queue_async_work(req->ctx, nxt);
+ /*
+ * If we're in async work, we can continue processing the chain
+ * in this context instead of having to queue up new async work.
+ */
+ if (nxtptr && current_work()) {
+ *nxtptr = nxt;
+ } else {
+ INIT_WORK(&nxt->work, io_sq_wq_submit_work);
+ io_queue_async_work(req->ctx, nxt);
+ }
}
}
}
}
-static void io_free_req(struct io_kiocb *req)
+static void io_free_req(struct io_kiocb *req, struct io_kiocb **nxt)
{
/*
* If LINK is set, we have dependent requests in this chain. If we
if (req->flags & REQ_F_FAIL_LINK)
io_fail_links(req);
else
- io_req_link_next(req);
+ io_req_link_next(req, nxt);
}
__io_free_req(req);
}
-static void io_put_req(struct io_kiocb *req)
+/*
+ * Drop reference to request, return next in chain (if there is one) if this
+ * was the last reference to this request.
+ */
+static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
{
+ struct io_kiocb *nxt = NULL;
+
if (refcount_dec_and_test(&req->refs))
- io_free_req(req);
+ io_free_req(req, &nxt);
+
+ return nxt;
+}
+
+static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
+{
+ struct io_kiocb *nxt;
+
+ nxt = io_put_req_find_next(req);
+ if (nxt) {
+ if (nxtptr) {
+ *nxtptr = nxt;
+ } else {
+ INIT_WORK(&nxt->work, io_sq_wq_submit_work);
+ io_queue_async_work(nxt->ctx, nxt);
+ }
+ }
}
static unsigned io_cqring_events(struct io_rings *rings)
if (to_free == ARRAY_SIZE(reqs))
io_free_req_many(ctx, reqs, &to_free);
} else {
- io_free_req(req);
+ io_free_req(req, NULL);
}
}
}
file_end_write(req->file);
}
-static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
+static void io_complete_rw_common(struct kiocb *kiocb, long res)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req->ctx, req->user_data, res);
- io_put_req(req);
+}
+
+static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
+{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+
+ io_complete_rw_common(kiocb, res);
+ io_put_req(req, NULL);
+}
+
+static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
+{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+
+ io_complete_rw_common(kiocb, res);
+ return io_put_req_find_next(req);
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
}
}
+static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
+ bool in_async)
+{
+ if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
+ *nxt = __io_complete_rw(kiocb, ret);
+ else
+ io_rw_done(kiocb, ret);
+}
+
static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
const struct io_uring_sqe *sqe,
struct iov_iter *iter)
}
static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw;
ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
- io_rw_done(kiocb, ret2);
+ kiocb_done(kiocb, ret2, nxt, s->needs_lock);
} else {
/*
* If ->needs_lock is true, we're already in async
}
static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw;
else
ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
if (!force_nonblock || ret2 != -EAGAIN) {
- io_rw_done(kiocb, ret2);
+ kiocb_done(kiocb, ret2, nxt, s->needs_lock);
} else {
/*
* If ->needs_lock is true, we're already in async
return -EINVAL;
io_cqring_add_event(ctx, user_data, err);
- io_put_req(req);
+ io_put_req(req, NULL);
return 0;
}
}
static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
loff_t sqe_off = READ_ONCE(sqe->off);
loff_t sqe_len = READ_ONCE(sqe->len);
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_put_req(req, nxt);
return 0;
}
static int io_sync_file_range(struct io_kiocb *req,
const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt,
bool force_nonblock)
{
loff_t sqe_off;
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_put_req(req, nxt);
return 0;
}
#if defined(CONFIG_NET)
static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock,
+ struct io_kiocb **nxt, bool force_nonblock,
long (*fn)(struct socket *, struct user_msghdr __user *,
unsigned int))
{
}
io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_put_req(req, nxt);
return 0;
}
#endif
static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
- return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
+ return io_send_recvmsg(req, sqe, nxt, force_nonblock,
+ __sys_sendmsg_sock);
#else
return -EOPNOTSUPP;
#endif
}
static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
- return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
+ return io_send_recvmsg(req, sqe, nxt, force_nonblock,
+ __sys_recvmsg_sock);
#else
return -EOPNOTSUPP;
#endif
spin_unlock_irq(&ctx->completion_lock);
io_cqring_add_event(req->ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_put_req(req, NULL);
return 0;
}
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
- io_put_req(req);
+ io_put_req(req, NULL);
}
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
- io_put_req(req);
+ io_put_req(req, NULL);
} else {
io_queue_async_work(ctx, req);
}
if (mask) {
io_cqring_ev_posted(ctx);
- io_put_req(req);
+ io_put_req(req, NULL);
}
return ipt.error;
}
io_cqring_ev_posted(ctx);
- io_put_req(req);
+ io_put_req(req, NULL);
return HRTIMER_NORESTART;
}
}
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct sqe_submit *s, bool force_nonblock)
+ const struct sqe_submit *s, struct io_kiocb **nxt,
+ bool force_nonblock)
{
int ret, opcode;
case IORING_OP_READV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_read(req, s, force_nonblock);
+ ret = io_read(req, s, nxt, force_nonblock);
break;
case IORING_OP_WRITEV:
if (unlikely(s->sqe->buf_index))
return -EINVAL;
- ret = io_write(req, s, force_nonblock);
+ ret = io_write(req, s, nxt, force_nonblock);
break;
case IORING_OP_READ_FIXED:
- ret = io_read(req, s, force_nonblock);
+ ret = io_read(req, s, nxt, force_nonblock);
break;
case IORING_OP_WRITE_FIXED:
- ret = io_write(req, s, force_nonblock);
+ ret = io_write(req, s, nxt, force_nonblock);
break;
case IORING_OP_FSYNC:
- ret = io_fsync(req, s->sqe, force_nonblock);
+ ret = io_fsync(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_POLL_ADD:
ret = io_poll_add(req, s->sqe);
ret = io_poll_remove(req, s->sqe);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, s->sqe, force_nonblock);
+ ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, s->sqe, force_nonblock);
+ ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, s->sqe, force_nonblock);
+ ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_TIMEOUT:
ret = io_timeout(req, s->sqe);
struct sqe_submit *s = &req->submit;
const struct io_uring_sqe *sqe = s->sqe;
unsigned int flags = req->flags;
+ struct io_kiocb *nxt = NULL;
/* Ensure we clear previously set non-block flag */
req->rw.ki_flags &= ~IOCB_NOWAIT;
s->has_user = cur_mm != NULL;
s->needs_lock = true;
do {
- ret = __io_submit_sqe(ctx, req, s, false);
+ ret = __io_submit_sqe(ctx, req, s, &nxt, false);
/*
* We can get EAGAIN for polled IO even though
* we're forcing a sync submission from here,
}
/* drop submission reference */
- io_put_req(req);
+ io_put_req(req, NULL);
if (ret) {
io_cqring_add_event(ctx, sqe->user_data, ret);
- io_put_req(req);
+ io_put_req(req, NULL);
}
/* async context always use a copy of the sqe */
kfree(sqe);
+ /* if a dependent link is ready, do that as the next one */
+ if (!ret && nxt) {
+ req = nxt;
+ continue;
+ }
+
/* req from defer and link list needn't decrease async cnt */
if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
goto out;
{
int ret;
- ret = __io_submit_sqe(ctx, req, s, true);
+ ret = __io_submit_sqe(ctx, req, s, NULL, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
}
/* drop submission reference */
- io_put_req(req);
+ io_put_req(req, NULL);
/* and drop final reference, if we failed */
if (ret) {
io_cqring_add_event(ctx, req->user_data, ret);
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
- io_put_req(req);
+ io_put_req(req, NULL);
}
return ret;
ret = io_req_defer(ctx, req, s->sqe);
if (ret) {
if (ret != -EIOCBQUEUED) {
- io_free_req(req);
+ io_free_req(req, NULL);
io_cqring_add_event(ctx, s->sqe->user_data, ret);
}
return 0;
ret = io_req_defer(ctx, req, s->sqe);
if (ret) {
if (ret != -EIOCBQUEUED) {
- io_free_req(req);
+ io_free_req(req, NULL);
__io_free_req(shadow);
io_cqring_add_event(ctx, s->sqe->user_data, ret);
return 0;
ret = io_req_set_file(ctx, s, state, req);
if (unlikely(ret)) {
err_req:
- io_free_req(req);
+ io_free_req(req, NULL);
err:
io_cqring_add_event(ctx, s->sqe->user_data, ret);
return;