return node;
}
-static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
{
- struct llist_node *node = llist_del_all(&tctx->task_list);
struct io_ring_ctx *last_ctx = NULL;
struct io_kiocb *req;
}
}
+static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+{
+ struct llist_node *node = llist_del_all(&tctx->task_list);
+
+ __io_fallback_tw(node, sync);
+}
+
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
unsigned int max_entries,
unsigned int *count)
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
{
- struct llist_node *node;
+ struct llist_node *node = llist_del_all(&ctx->work_llist);
- node = llist_del_all(&ctx->work_llist);
- while (node) {
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- io_task_work.node);
-
- node = node->next;
- io_req_normal_work_add(req);
- }
+ __io_fallback_tw(node, false);
}
static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
* Not from an SQE, as those cannot be submitted, but via
* updating tagged resources.
*/
- if (ctx->submitter_task->flags & PF_EXITING)
+ if (percpu_ref_is_dying(&ctx->refs))
lockdep_assert(current_work());
else
lockdep_assert(current == ctx->submitter_task);