#define IO_REQ_CACHE_SIZE              32
 #define IO_REQ_ALLOC_BATCH             8
 
-struct io_comp_state {
-       struct io_kiocb         *reqs[IO_COMPL_BATCH];
-       unsigned int            nr;
-       /* inline/task_work completion list, under ->uring_lock */
-       struct list_head        free_list;
-};
-
 struct io_submit_link {
        struct io_kiocb         *head;
        struct io_kiocb         *last;
        /*
         * Batch completion logic
         */
-       struct io_comp_state    comp;
+       struct io_kiocb         *compl_reqs[IO_COMPL_BATCH];
+       unsigned int            compl_nr;
+       /* inline/task_work completion list, under ->uring_lock */
+       struct list_head        free_list;
 
        /*
         * File reference cache
        INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
        init_llist_head(&ctx->rsrc_put_llist);
        INIT_LIST_HEAD(&ctx->tctx_list);
-       INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
+       INIT_LIST_HEAD(&ctx->submit_state.free_list);
        INIT_LIST_HEAD(&ctx->locked_free_list);
        INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
        return ctx;
 }
 
 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
-                                       struct io_comp_state *cs)
+                                       struct io_submit_state *state)
 {
        spin_lock_irq(&ctx->completion_lock);
-       list_splice_init(&ctx->locked_free_list, &cs->free_list);
+       list_splice_init(&ctx->locked_free_list, &state->free_list);
        ctx->locked_free_nr = 0;
        spin_unlock_irq(&ctx->completion_lock);
 }
 static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
 {
        struct io_submit_state *state = &ctx->submit_state;
-       struct io_comp_state *cs = &state->comp;
        int nr;
 
        /*
         * side cache.
         */
        if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
-               io_flush_cached_locked_reqs(ctx, cs);
+               io_flush_cached_locked_reqs(ctx, state);
 
        nr = state->free_reqs;
-       while (!list_empty(&cs->free_list)) {
-               struct io_kiocb *req = list_first_entry(&cs->free_list,
+       while (!list_empty(&state->free_list)) {
+               struct io_kiocb *req = list_first_entry(&state->free_list,
                                        struct io_kiocb, inflight_entry);
 
                list_del(&req->inflight_entry);
 {
        if (!ctx)
                return;
-       if (ctx->submit_state.comp.nr) {
+       if (ctx->submit_state.compl_nr) {
                mutex_lock(&ctx->uring_lock);
                io_submit_flush_completions(ctx);
                mutex_unlock(&ctx->uring_lock);
        if (state->free_reqs != ARRAY_SIZE(state->reqs))
                state->reqs[state->free_reqs++] = req;
        else
-               list_add(&req->inflight_entry, &state->comp.free_list);
+               list_add(&req->inflight_entry, &state->free_list);
 }
 
 static void io_submit_flush_completions(struct io_ring_ctx *ctx)
        __must_hold(&req->ctx->uring_lock)
 {
-       struct io_comp_state *cs = &ctx->submit_state.comp;
-       int i, nr = cs->nr;
+       struct io_submit_state *state = &ctx->submit_state;
+       int i, nr = state->compl_nr;
        struct req_batch rb;
 
        spin_lock_irq(&ctx->completion_lock);
        for (i = 0; i < nr; i++) {
-               struct io_kiocb *req = cs->reqs[i];
+               struct io_kiocb *req = state->compl_reqs[i];
 
                __io_cqring_fill_event(ctx, req->user_data, req->result,
                                        req->compl.cflags);
 
        io_init_req_batch(&rb);
        for (i = 0; i < nr; i++) {
-               struct io_kiocb *req = cs->reqs[i];
+               struct io_kiocb *req = state->compl_reqs[i];
 
                /* submission and completion refs */
                if (req_ref_sub_and_test(req, 2))
        }
 
        io_req_free_batch_finish(ctx, &rb);
-       cs->nr = 0;
+       state->compl_nr = 0;
 }
 
 /*
                /* drop submission reference */
                if (req->flags & REQ_F_COMPLETE_INLINE) {
                        struct io_ring_ctx *ctx = req->ctx;
-                       struct io_comp_state *cs = &ctx->submit_state.comp;
+                       struct io_submit_state *state = &ctx->submit_state;
 
-                       cs->reqs[cs->nr++] = req;
-                       if (cs->nr == ARRAY_SIZE(cs->reqs))
+                       state->compl_reqs[state->compl_nr++] = req;
+                       if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
                                io_submit_flush_completions(ctx);
                } else {
                        io_put_req(req);
 {
        if (state->link.head)
                io_queue_sqe(state->link.head);
-       if (state->comp.nr)
+       if (state->compl_nr)
                io_submit_flush_completions(ctx);
        if (state->plug_started)
                blk_finish_plug(&state->plug);
 
 static void io_req_caches_free(struct io_ring_ctx *ctx)
 {
-       struct io_submit_state *submit_state = &ctx->submit_state;
-       struct io_comp_state *cs = &ctx->submit_state.comp;
+       struct io_submit_state *state = &ctx->submit_state;
 
        mutex_lock(&ctx->uring_lock);
 
-       if (submit_state->free_reqs) {
-               kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
-                                    submit_state->reqs);
-               submit_state->free_reqs = 0;
+       if (state->free_reqs) {
+               kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
+               state->free_reqs = 0;
        }
 
-       io_flush_cached_locked_reqs(ctx, cs);
-       io_req_cache_free(&cs->free_list);
+       io_flush_cached_locked_reqs(ctx, state);
+       io_req_cache_free(&state->free_list);
        mutex_unlock(&ctx->uring_lock);
 }