return PTR_ERR(req);
        req->rl_backchannel = true;
 
-       size = RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
+       size = r_xprt->rx_data.inline_wsize;
        rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
        if (IS_ERR(rb))
                goto out_fail;
        req->rl_rdmabuf = rb;
 
-       size += RPCRDMA_INLINE_READ_THRESHOLD(rqst);
+       size += r_xprt->rx_data.inline_rsize;
        rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
        if (IS_ERR(rb))
                goto out_fail;
 
                goto out_unmap;
        hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
 
-       if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
+       if (hdrlen + rpclen > r_xprt->rx_data.inline_wsize)
                goto out_overflow;
 
        dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
 
        return req->rl_sendbuf->rg_base;
 
 out_rdmabuf:
-       min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
+       min_size = r_xprt->rx_data.inline_wsize;
        rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
        if (IS_ERR(rb))
                goto out_fail;
         * reply will be large, but slush is provided here to allow
         * flexibility when marshaling.
         */
-       min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp);
-       min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
+       min_size = r_xprt->rx_data.inline_rsize;
+       min_size += r_xprt->rx_data.inline_wsize;
        if (size < min_size)
                size = min_size;
 
 
        unsigned int    padding;        /* non-rdma write header padding */
 };
 
-#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
-       (rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
-
-#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
-       (rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
-
-#define RPCRDMA_INLINE_PAD_VALUE(rq)\
-       rpcx_to_rdmad(rq->rq_xprt).padding
-
 /*
  * Statistics for RPCRDMA
  */