void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
        struct list_head        rq_list;
 
-       void                    *rq_xprtdata;   /* Per-xprt private data */
        void                    *rq_buffer;     /* Call XDR encode buffer */
        size_t                  rq_callsize;
        void                    *rq_rbuffer;    /* Reply XDR decode buffer */
 
        spin_unlock(&buf->rb_reqslock);
 
        rpcrdma_destroy_req(req);
-
-       kfree(rqst);
 }
 
-static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
-                                struct rpc_rqst *rqst)
+static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
+                                unsigned int count)
 {
-       struct rpcrdma_regbuf *rb;
-       struct rpcrdma_req *req;
-       size_t size;
+       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+       struct rpc_rqst *rqst;
+       unsigned int i;
+
+       for (i = 0; i < (count << 1); i++) {
+               struct rpcrdma_regbuf *rb;
+               struct rpcrdma_req *req;
+               size_t size;
+
+               req = rpcrdma_create_req(r_xprt);
+               if (IS_ERR(req))
+                       return PTR_ERR(req);
+               rqst = &req->rl_slot;
+
+               rqst->rq_xprt = xprt;
+               INIT_LIST_HEAD(&rqst->rq_list);
+               INIT_LIST_HEAD(&rqst->rq_bc_list);
+               __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
+               spin_lock_bh(&xprt->bc_pa_lock);
+               list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
+               spin_unlock_bh(&xprt->bc_pa_lock);
 
-       req = rpcrdma_create_req(r_xprt);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
-
-       size = r_xprt->rx_data.inline_rsize;
-       rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
-       if (IS_ERR(rb))
-               goto out_fail;
-       req->rl_sendbuf = rb;
-       xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
-                    min_t(size_t, size, PAGE_SIZE));
-       rpcrdma_set_xprtdata(rqst, req);
+               size = r_xprt->rx_data.inline_rsize;
+               rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
+               if (IS_ERR(rb))
+                       goto out_fail;
+               req->rl_sendbuf = rb;
+               xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
+                            min_t(size_t, size, PAGE_SIZE));
+       }
        return 0;
 
 out_fail:
 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
 {
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
-       struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
-       struct rpc_rqst *rqst;
-       unsigned int i;
        int rc;
 
        /* The backchannel reply path returns each rpc_rqst to the
        if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
                goto out_err;
 
-       for (i = 0; i < (reqs << 1); i++) {
-               rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
-               if (!rqst)
-                       goto out_free;
-
-               dprintk("RPC:       %s: new rqst %p\n", __func__, rqst);
-
-               rqst->rq_xprt = &r_xprt->rx_xprt;
-               INIT_LIST_HEAD(&rqst->rq_list);
-               INIT_LIST_HEAD(&rqst->rq_bc_list);
-               __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
-
-               if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
-                       goto out_free;
-
-               spin_lock_bh(&xprt->bc_pa_lock);
-               list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
-               spin_unlock_bh(&xprt->bc_pa_lock);
-       }
+       rc = rpcrdma_bc_setup_reqs(r_xprt, reqs);
+       if (rc)
+               goto out_free;
 
        rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
        if (rc)
        if (rc)
                goto out_free;
 
-       buffer->rb_bc_srv_max_requests = reqs;
+       r_xprt->rx_buf.rb_bc_srv_max_requests = reqs;
        request_module("svcrdma");
        trace_xprtrdma_cb_setup(r_xprt, reqs);
        return 0;
 
                return ERR_PTR(-EBADF);
        }
 
-       xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
-                       xprt_rdma_slot_table_entries,
-                       xprt_rdma_slot_table_entries);
+       xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 0);
        if (xprt == NULL) {
                dprintk("RPC:       %s: couldn't allocate rpcrdma_xprt\n",
                        __func__);
                xprt_set_bound(xprt);
        xprt_rdma_format_addresses(xprt, sap);
 
-       cdata.max_requests = xprt->max_reqs;
+       cdata.max_requests = xprt_rdma_slot_table_entries;
 
        cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
        cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
 static void
 xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
 {
-       struct rpc_rqst *rqst;
+       struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+       struct rpcrdma_req *req;
 
-       spin_lock(&xprt->reserve_lock);
-       if (list_empty(&xprt->free))
+       req = rpcrdma_buffer_get(&r_xprt->rx_buf);
+       if (!req)
                goto out_sleep;
-       rqst = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
-       list_del(&rqst->rq_list);
-       spin_unlock(&xprt->reserve_lock);
-
-       task->tk_rqstp = rqst;
+       task->tk_rqstp = &req->rl_slot;
        task->tk_status = 0;
        return;
 
 out_sleep:
        rpc_sleep_on(&xprt->backlog, task, NULL);
-       spin_unlock(&xprt->reserve_lock);
        task->tk_status = -EAGAIN;
 }
 
 xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
 {
        memset(rqst, 0, sizeof(*rqst));
-
-       spin_lock(&xprt->reserve_lock);
-       list_add(&rqst->rq_list, &xprt->free);
+       rpcrdma_buffer_put(rpcr_to_rdmar(rqst));
        rpc_wake_up_next(&xprt->backlog);
-       spin_unlock(&xprt->reserve_lock);
 }
 
 static bool
 {
        struct rpc_rqst *rqst = task->tk_rqstp;
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
-       struct rpcrdma_req *req;
+       struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
        gfp_t flags;
 
-       req = rpcrdma_buffer_get(&r_xprt->rx_buf);
-       if (req == NULL)
-               goto out_get;
-
        flags = RPCRDMA_DEF_GFP;
        if (RPC_IS_SWAPPER(task))
                flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
        if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
                goto out_fail;
 
-       rpcrdma_set_xprtdata(rqst, req);
        rqst->rq_buffer = req->rl_sendbuf->rg_base;
        rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
        trace_xprtrdma_allocate(task, req);
        return 0;
 
 out_fail:
-       rpcrdma_buffer_put(req);
-out_get:
        trace_xprtrdma_allocate(task, NULL);
        return -ENOMEM;
 }
        if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
                rpcrdma_release_rqst(r_xprt, req);
        trace_xprtrdma_rpc_done(task, req);
-       rpcrdma_buffer_put(req);
 }
 
 /**
 
 struct rpcrdma_buffer;
 struct rpcrdma_req {
        struct list_head        rl_list;
+       struct rpc_rqst         rl_slot;
        struct rpcrdma_buffer   *rl_buffer;
        struct rpcrdma_rep      *rl_reply;
        struct xdr_stream       rl_stream;
        RPCRDMA_REQ_F_TX_RESOURCES,
 };
 
-static inline void
-rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
-{
-       rqst->rq_xprtdata = req;
-}
-
 static inline struct rpcrdma_req *
 rpcr_to_rdmar(const struct rpc_rqst *rqst)
 {
-       return rqst->rq_xprtdata;
+       return container_of(rqst, struct rpcrdma_req, rl_slot);
 }
 
 static inline void