}
 }
 
+static __cold int io_init_fail_req(struct io_kiocb *req, int err)
+{
+       /* ensure per-opcode data is cleared if we fail before prep */
+       memset(&req->cmd.data, 0, sizeof(req->cmd.data));
+       return err;
+}
+
 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                       const struct io_uring_sqe *sqe)
        __must_hold(&ctx->uring_lock)
 
        if (unlikely(opcode >= IORING_OP_LAST)) {
                req->opcode = 0;
-               return -EINVAL;
+               return io_init_fail_req(req, -EINVAL);
        }
        def = &io_issue_defs[opcode];
        if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
                /* enforce forwards compatibility on users */
                if (sqe_flags & ~SQE_VALID_FLAGS)
-                       return -EINVAL;
+                       return io_init_fail_req(req, -EINVAL);
                if (sqe_flags & IOSQE_BUFFER_SELECT) {
                        if (!def->buffer_select)
-                               return -EOPNOTSUPP;
+                               return io_init_fail_req(req, -EOPNOTSUPP);
                        req->buf_index = READ_ONCE(sqe->buf_group);
                }
                if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
                        ctx->drain_disabled = true;
                if (sqe_flags & IOSQE_IO_DRAIN) {
                        if (ctx->drain_disabled)
-                               return -EOPNOTSUPP;
+                               return io_init_fail_req(req, -EOPNOTSUPP);
                        io_init_req_drain(req);
                }
        }
        if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
                if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
-                       return -EACCES;
+                       return io_init_fail_req(req, -EACCES);
                /* knock it to the slow queue path, will be drained there */
                if (ctx->drain_active)
                        req->flags |= REQ_F_FORCE_ASYNC;
        }
 
        if (!def->ioprio && sqe->ioprio)
-               return -EINVAL;
+               return io_init_fail_req(req, -EINVAL);
        if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
-               return -EINVAL;
+               return io_init_fail_req(req, -EINVAL);
 
        if (def->needs_file) {
                struct io_submit_state *state = &ctx->submit_state;
 
                req->creds = xa_load(&ctx->personalities, personality);
                if (!req->creds)
-                       return -EINVAL;
+                       return io_init_fail_req(req, -EINVAL);
                get_cred(req->creds);
                ret = security_uring_override_creds(req->creds);
                if (ret) {
                        put_cred(req->creds);
-                       return ret;
+                       return io_init_fail_req(req, ret);
                }
                req->flags |= REQ_F_CREDS;
        }