static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
-static void io_cqring_fill_event(struct io_kiocb *req, long res);
+static bool io_cqring_fill_event(struct io_kiocb *req, long res, unsigned cflags);
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req, int nr);
static void io_dismantle_req(struct io_kiocb *req);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
- io_cqring_fill_event(req, status);
+ io_cqring_fill_event(req, status, 0);
io_put_req_deferred(req, 1);
}
}
atomic_inc(&req->refs);
}
-static bool __io_cqring_fill_event(struct io_kiocb *req, long res,
- unsigned int cflags)
+static bool io_cqring_fill_event(struct io_kiocb *req, long res,
+ unsigned int cflags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_cqe *cqe;
return false;
}
-static void io_cqring_fill_event(struct io_kiocb *req, long res)
-{
- __io_cqring_fill_event(req, res, 0);
-}
-
static void io_req_complete_post(struct io_kiocb *req, long res,
unsigned int cflags)
{
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
- __io_cqring_fill_event(req, res, cflags);
+ io_cqring_fill_event(req, res, cflags);
/*
* If we're the last reference to this request, add to our locked
* free_list cache.
link->timeout.head = NULL;
ret = hrtimer_try_to_cancel(&io->timer);
if (ret != -1) {
- io_cqring_fill_event(link, -ECANCELED);
+ io_cqring_fill_event(link, -ECANCELED, 0);
io_put_req_deferred(link, 1);
return true;
}
link->link = NULL;
trace_io_uring_fail_link(req, link);
- io_cqring_fill_event(link, -ECANCELED);
+ io_cqring_fill_event(link, -ECANCELED, 0);
io_put_req_deferred(link, 2);
link = nxt;
}
spin_lock_irq(&ctx->completion_lock);
for (i = 0; i < nr; i++) {
req = cs->reqs[i];
- __io_cqring_fill_event(req, req->result, req->compl.cflags);
+ io_cqring_fill_event(req, req->result, req->compl.cflags);
}
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_rw_kbuf(req);
- __io_cqring_fill_event(req, req->result, cflags);
+ io_cqring_fill_event(req, req->result, cflags);
(*nr_events)++;
if (req_ref_put_and_test(req))
}
if (req->poll.events & EPOLLONESHOT)
flags = 0;
- if (!__io_cqring_fill_event(req, error, flags)) {
+ if (!io_cqring_fill_event(req, error, flags)) {
io_poll_remove_waitqs(req);
req->poll.done = true;
flags = 0;
do_complete = io_poll_remove_waitqs(req);
if (do_complete) {
- io_cqring_fill_event(req, -ECANCELED);
+ io_cqring_fill_event(req, -ECANCELED, 0);
io_commit_cqring(req->ctx);
req_set_fail_links(req);
io_put_req_deferred(req, 1);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
- io_cqring_fill_event(req, -ETIME);
+ io_cqring_fill_event(req, -ETIME, 0);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
return PTR_ERR(req);
req_set_fail_links(req);
- io_cqring_fill_event(req, -ECANCELED);
+ io_cqring_fill_event(req, -ECANCELED, 0);
io_put_req_deferred(req, 1);
return 0;
}
ret = io_timeout_update(ctx, tr->addr, &tr->ts,
io_translate_timeout_mode(tr->flags));
- io_cqring_fill_event(req, ret);
+ io_cqring_fill_event(req, ret, 0);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
done:
if (!ret)
ret = success_ret;
- io_cqring_fill_event(req, ret);
+ io_cqring_fill_event(req, ret, 0);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
spin_lock_irq(&ctx->completion_lock);
done:
- io_cqring_fill_event(req, ret);
+ io_cqring_fill_event(req, ret, 0);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);