struct io_wq_work_node          comp_list;
        atomic_t                        refs;
        atomic_t                        poll_refs;
-       struct io_kiocb                 *link;
        struct io_task_work             io_task_work;
        /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
        struct hlist_node               hash_node;
        /* custom credentials, valid IFF REQ_F_CREDS is set */
        /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
        struct io_buffer                *kbuf;
+       struct io_kiocb                 *link;
        const struct cred               *creds;
        struct io_wq_work               work;
 };
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                    io_task_work.node);
 
+               prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+
                if (req->ctx != *ctx) {
                        if (unlikely(!*uring_locked && *ctx))
                                ctx_commit_and_unlock(*ctx);
                struct io_kiocb *req = container_of(node, struct io_kiocb,
                                                    io_task_work.node);
 
+               prefetch(container_of(next, struct io_kiocb, io_task_work.node));
+
                if (req->ctx != *ctx) {
                        ctx_flush_and_put(*ctx, locked);
                        *ctx = req->ctx;