static void io_flush_timeouts(struct io_ring_ctx *ctx)
{
+ struct io_kiocb *req, *tmp;
u32 seq;
if (list_empty(&ctx->timeout_list))
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
- do {
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
u32 events_needed, events_got;
- struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
- struct io_kiocb, timeout.list);
if (io_is_timeout_noseq(req))
break;
if (events_got < events_needed)
break;
- list_del_init(&req->timeout.list);
io_kill_timeout(req, 0);
- } while (!list_empty(&ctx->timeout_list));
+ }
ctx->cq_last_tm_flush = seq;
}
else
data->mode = HRTIMER_MODE_REL;
+ INIT_LIST_HEAD(&req->timeout.list);
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
return 0;
}
if (!list_empty(&req->link_list)) {
prev = list_entry(req->link_list.prev, struct io_kiocb,
link_list);
- if (refcount_inc_not_zero(&prev->refs))
- list_del_init(&req->link_list);
- else
+ list_del_init(&req->link_list);
+ if (!refcount_inc_not_zero(&prev->refs))
prev = NULL;
}
+ list_del(&req->timeout.list);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {