O_RDWR | O_CLOEXEC, NULL);
 }
 
-static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
-                                 struct io_uring_params __user *params)
+int io_uring_fill_params(unsigned entries, struct io_uring_params *p)
 {
-       struct io_ring_ctx *ctx;
-       struct io_uring_task *tctx;
-       struct file *file;
-       int ret;
-
        if (!entries)
                return -EINVAL;
        if (entries > IORING_MAX_ENTRIES) {
                p->cq_entries = 2 * p->sq_entries;
        }
 
+       p->sq_off.head = offsetof(struct io_rings, sq.head);
+       p->sq_off.tail = offsetof(struct io_rings, sq.tail);
+       p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
+       p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
+       p->sq_off.flags = offsetof(struct io_rings, sq_flags);
+       p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
+       p->sq_off.resv1 = 0;
+       if (!(p->flags & IORING_SETUP_NO_MMAP))
+               p->sq_off.user_addr = 0;
+
+       p->cq_off.head = offsetof(struct io_rings, cq.head);
+       p->cq_off.tail = offsetof(struct io_rings, cq.tail);
+       p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
+       p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
+       p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
+       p->cq_off.cqes = offsetof(struct io_rings, cqes);
+       p->cq_off.flags = offsetof(struct io_rings, cq_flags);
+       p->cq_off.resv1 = 0;
+       if (!(p->flags & IORING_SETUP_NO_MMAP))
+               p->cq_off.user_addr = 0;
+
+       return 0;
+}
+
+static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
+                                 struct io_uring_params __user *params)
+{
+       struct io_ring_ctx *ctx;
+       struct io_uring_task *tctx;
+       struct file *file;
+       int ret;
+
+       ret = io_uring_fill_params(entries, p);
+       if (unlikely(ret))
+               return ret;
+
        ctx = io_ring_ctx_alloc(p);
        if (!ctx)
                return -ENOMEM;
        if (ret)
                goto err;
 
+       if (!(p->flags & IORING_SETUP_NO_SQARRAY))
+               p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
+
        ret = io_sq_offload_create(ctx, p);
        if (ret)
                goto err;
        if (ret)
                goto err;
 
-       p->sq_off.head = offsetof(struct io_rings, sq.head);
-       p->sq_off.tail = offsetof(struct io_rings, sq.tail);
-       p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
-       p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
-       p->sq_off.flags = offsetof(struct io_rings, sq_flags);
-       p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
-       if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
-               p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
-       p->sq_off.resv1 = 0;
-       if (!(ctx->flags & IORING_SETUP_NO_MMAP))
-               p->sq_off.user_addr = 0;
-
-       p->cq_off.head = offsetof(struct io_rings, cq.head);
-       p->cq_off.tail = offsetof(struct io_rings, cq.tail);
-       p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
-       p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
-       p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
-       p->cq_off.cqes = offsetof(struct io_rings, cqes);
-       p->cq_off.flags = offsetof(struct io_rings, cq_flags);
-       p->cq_off.resv1 = 0;
-       if (!(ctx->flags & IORING_SETUP_NO_MMAP))
-               p->cq_off.user_addr = 0;
-
        p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
                        IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |