REQ_F_SINGLE_POLL_BIT,
        REQ_F_DOUBLE_POLL_BIT,
        REQ_F_PARTIAL_IO_BIT,
-       REQ_F_CQE32_INIT_BIT,
        REQ_F_APOLL_MULTISHOT_BIT,
        REQ_F_CLEAR_POLLIN_BIT,
        REQ_F_HASH_LOCKED_BIT,
        REQ_F_PARTIAL_IO        = BIT(REQ_F_PARTIAL_IO_BIT),
        /* fast poll multishot mode */
        REQ_F_APOLL_MULTISHOT   = BIT(REQ_F_APOLL_MULTISHOT_BIT),
-       /* ->extra1 and ->extra2 are initialised */
-       REQ_F_CQE32_INIT        = BIT(REQ_F_CQE32_INIT_BIT),
        /* recvmsg special flag, clear EPOLLIN */
        REQ_F_CLEAR_POLLIN      = BIT(REQ_F_CLEAR_POLLIN_BIT),
        /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
        struct io_task_work             io_task_work;
        unsigned                        nr_tw;
        /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
-       union {
-               struct hlist_node       hash_node;
-               struct {
-                       u64             extra1;
-                       u64             extra2;
-               };
-       };
+       struct hlist_node               hash_node;
        /* internal polling, see IORING_FEAT_FAST_POLL */
        struct async_poll               *apoll;
        /* opcode allocated if it needs to store data for async defer */
        /* custom credentials, valid IFF REQ_F_CREDS is set */
        const struct cred               *creds;
        struct io_wq_work               work;
+
+       struct {
+               u64                     extra1;
+               u64                     extra2;
+       } big_cqe;
 };
 
 struct io_overflow_cqe {
 
 
 void io_req_cqe_overflow(struct io_kiocb *req)
 {
-       if (!(req->flags & REQ_F_CQE32_INIT)) {
-               req->extra1 = 0;
-               req->extra2 = 0;
-       }
        io_cqring_event_overflow(req->ctx, req->cqe.user_data,
                                req->cqe.res, req->cqe.flags,
-                               req->extra1, req->extra2);
+                               req->big_cqe.extra1, req->big_cqe.extra2);
+       memset(&req->big_cqe, 0, sizeof(req->big_cqe));
 }
 
 /*
        req->async_data = NULL;
        /* not necessary, but safer to zero */
        memset(&req->cqe, 0, sizeof(req->cqe));
+       memset(&req->big_cqe, 0, sizeof(req->big_cqe));
 }
 
 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
 
        if (trace_io_uring_complete_enabled())
                trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
                                        req->cqe.res, req->cqe.flags,
-                                       (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
-                                       (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
+                                       req->big_cqe.extra1, req->big_cqe.extra2);
 
        memcpy(cqe, &req->cqe, sizeof(*cqe));
-
        if (ctx->flags & IORING_SETUP_CQE32) {
-               u64 extra1 = 0, extra2 = 0;
-
-               if (req->flags & REQ_F_CQE32_INIT) {
-                       extra1 = req->extra1;
-                       extra2 = req->extra2;
-               }
-
-               WRITE_ONCE(cqe->big_cqe[0], extra1);
-               WRITE_ONCE(cqe->big_cqe[1], extra2);
+               memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
+               memset(&req->big_cqe, 0, sizeof(req->big_cqe));
        }
        return true;
 }
 
 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
                                          u64 extra1, u64 extra2)
 {
-       req->extra1 = extra1;
-       req->extra2 = extra2;
-       req->flags |= REQ_F_CQE32_INIT;
+       req->big_cqe.extra1 = extra1;
+       req->big_cqe.extra2 = extra2;
 }
 
 /*