return def->prep(req, sqe);
}
-static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
+static __cold int io_submit_fail_link(struct io_submit_link *link,
struct io_kiocb *req, int ret)
{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_submit_link *link = &ctx->submit_state.link;
struct io_kiocb *head = link->head;
- trace_io_uring_req_failed(sqe, req, ret);
-
/*
* Avoid breaking links in the middle as it renders links with SQPOLL
* unusable. Instead of failing eagerly, continue assembling the link if
* applicable and mark the head with REQ_F_FAIL. The link flushing code
* should find the flag and handle the rest.
*/
- req_fail_link_node(req, ret);
if (head && !(head->flags & REQ_F_FAIL))
req_fail_link_node(head, -ECANCELED);
else
link->head = req;
link->last = req;
+
return 0;
}
+static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
+ struct io_kiocb *req, int ret)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_submit_link *link = &ctx->submit_state.link;
+
+ trace_io_uring_req_failed(sqe, req, ret);
+
+ req_fail_link_node(req, ret);
+
+ /* cover both linked and non-linked request */
+ return io_submit_fail_link(link, req, ret);
+}
+
/*
* Return NULL if nothing to be queued, otherwise return request for queueing */
static struct io_kiocb *io_link_sqe(struct io_submit_link *link,