* Written by the application, shouldn't be modified by the
         * kernel.
         */
-       u32                     cq_flags;
+       u32                     cq_flags;
        /*
         * Number of completion events lost because the queue was full;
         * this should be avoided by the application by making sure
        struct hlist_node               hash_node;
        struct async_poll               *apoll;
        struct io_wq_work               work;
-       const struct cred               *creds;
+       const struct cred               *creds;
 
        /* store used ubuf, so we can prevent reloading */
        struct io_mapped_ubuf           *imu;
 {
        struct io_submit_state *state = &ctx->submit_state;
 
-       BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
+       BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
 
        if (!state->free_reqs) {
                gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
        else
                io_rw_done(kiocb, ret);
 
-       if (check_reissue && req->flags & REQ_F_REISSUE) {
+       if (check_reissue && (req->flags & REQ_F_REISSUE)) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        req_ref_get(req);
 static int __io_splice_prep(struct io_kiocb *req,
                            const struct io_uring_sqe *sqe)
 {
-       struct io_splice* sp = &req->splice;
+       struct io_splice *sp = &req->splice;
        unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
 
 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_splice* sp = &req->splice;
+       struct io_splice *sp = &req->splice;
 
        sp->off_in = READ_ONCE(sqe->splice_off_in);
        sp->off_out = READ_ONCE(sqe->off);
        ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
        if (IS_ERR(ctx->cq_ev_fd)) {
                int ret = PTR_ERR(ctx->cq_ev_fd);
+
                ctx->cq_ev_fd = NULL;
                return ret;
        }
                io_cqring_overflow_flush(ctx, false);
 
                ret = -EOWNERDEAD;
-               if (unlikely(ctx->sq_data->thread == NULL)) {
+               if (unlikely(ctx->sq_data->thread == NULL))
                        goto out;
-               }
                if (flags & IORING_ENTER_SQ_WAKEUP)
                        wake_up(&ctx->sq_data->wait);
                if (flags & IORING_ENTER_SQ_WAIT) {