static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq,
                                 struct io_uring_zcrx_ifq_reg *reg,
-                                struct io_uring_region_desc *rd)
+                                struct io_uring_region_desc *rd,
+                                u32 id)
 {
+       u64 mmap_offset;
        size_t off, size;
        void *ptr;
        int ret;
        if (size > rd->size)
                return -EINVAL;
 
-       ret = io_create_region(ifq->ctx, &ifq->region, rd, IORING_MAP_OFF_ZCRX_REGION);
+       mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
+       mmap_offset += id << IORING_OFF_PBUF_SHIFT;
+
+       ret = io_create_region(ifq->ctx, &ifq->region, rd, mmap_offset);
        if (ret < 0)
                return ret;
 
 
 static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
 {
-       if (WARN_ON_ONCE(ifq->ctx->ifq))
-               return;
-
        io_free_region(ifq->ctx, &ifq->region);
        ifq->rq_ring = NULL;
        ifq->rqes = NULL;
 struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
                                            unsigned int id)
 {
+       struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id);
+
        lockdep_assert_held(&ctx->mmap_lock);
 
-       if (id != 0 || !ctx->ifq)
-               return NULL;
-       return &ctx->ifq->region;
+       return ifq ? &ifq->region : NULL;
 }
 
 int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
        struct io_uring_region_desc rd;
        struct io_zcrx_ifq *ifq;
        int ret;
+       u32 id;
 
        /*
         * 1. Interface queue allocation.
        if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
              ctx->flags & IORING_SETUP_CQE32))
                return -EINVAL;
-       if (ctx->ifq)
-               return -EBUSY;
        if (copy_from_user(®, arg, sizeof(reg)))
                return -EFAULT;
        if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
        if (!ifq)
                return -ENOMEM;
 
-       ret = io_allocate_rbuf_ring(ifq, ®, &rd);
+       scoped_guard(mutex, &ctx->mmap_lock) {
+               /* preallocate id */
+               ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
+               if (ret)
+                       goto ifq_free;
+       }
+
+       ret = io_allocate_rbuf_ring(ifq, ®, &rd, id);
        if (ret)
                goto err;
 
        reg.offsets.rqes = sizeof(struct io_uring);
        reg.offsets.head = offsetof(struct io_uring, head);
        reg.offsets.tail = offsetof(struct io_uring, tail);
+       reg.zcrx_id = id;
+
+       scoped_guard(mutex, &ctx->mmap_lock) {
+               /* publish ifq */
+               ret = -ENOMEM;
+               if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
+                       goto err;
+       }
 
        if (copy_to_user(arg, ®, sizeof(reg)) ||
            copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
                ret = -EFAULT;
                goto err;
        }
-       scoped_guard(mutex, &ctx->mmap_lock)
-               ctx->ifq = ifq;
        return 0;
 err:
+       scoped_guard(mutex, &ctx->mmap_lock)
+               xa_erase(&ctx->zcrx_ctxs, id);
+ifq_free:
        io_zcrx_ifq_free(ifq);
        return ret;
 }
 
 void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
 {
-       struct io_zcrx_ifq *ifq = ctx->ifq;
+       struct io_zcrx_ifq *ifq;
+       unsigned long id;
 
        lockdep_assert_held(&ctx->uring_lock);
 
-       if (!ifq)
-               return;
+       while (1) {
+               scoped_guard(mutex, &ctx->mmap_lock) {
+                       ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
+                       if (ifq)
+                               xa_erase(&ctx->zcrx_ctxs, id);
+               }
+               if (!ifq)
+                       break;
+               io_zcrx_ifq_free(ifq);
+       }
 
-       scoped_guard(mutex, &ctx->mmap_lock)
-               ctx->ifq = NULL;
-       io_zcrx_ifq_free(ifq);
+       xa_destroy(&ctx->zcrx_ctxs);
 }
 
 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
 
 void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx)
 {
+       struct io_zcrx_ifq *ifq;
+       unsigned long index;
+
        lockdep_assert_held(&ctx->uring_lock);
 
-       if (!ctx->ifq)
-               return;
-       io_zcrx_scrub(ctx->ifq);
-       io_close_queue(ctx->ifq);
+       xa_for_each(&ctx->zcrx_ctxs, index, ifq) {
+               io_zcrx_scrub(ifq);
+               io_close_queue(ifq);
+       }
 }
 
 static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq)