* Twice as many rpc_rqsts are prepared to ensure there is
* always an rpc_rqst available as soon as a reply is sent.
*/
+ if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
+ goto out_err;
+
for (i = 0; i < (reqs << 1); i++) {
rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
if (!rqst) {
out_free:
xprt_rdma_bc_destroy(xprt, reqs);
+out_err:
pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
return -ENOMEM;
}
return -ENOMEM;
}
- max_qp_wr = devattr->max_qp_wr - 1;
+ max_qp_wr = devattr->max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
/* check provider's send/recv wr limits */
- if (cdata->max_requests > devattr->max_qp_wr)
+ if (cdata->max_requests > max_qp_wr)
cdata->max_requests = devattr->max_qp_wr;
ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
ep->rep_attr.qp_context = ep;
ep->rep_attr.srq = NULL;
- ep->rep_attr.cap.max_send_wr = cdata->max_requests + 1;
+ ep->rep_attr.cap.max_send_wr = cdata->max_requests +
+ RPCRDMA_BACKWARD_WRS + 1;
rc = ia->ri_ops->ro_open(ia, ep, cdata);
if (rc)
return rc;
- ep->rep_attr.cap.max_recv_wr = cdata->max_requests + 1;
+ ep->rep_attr.cap.max_recv_wr = cdata->max_requests +
+ RPCRDMA_BACKWARD_WRS + 1;
ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
ep->rep_attr.cap.max_recv_sge = 1;
ep->rep_attr.cap.max_inline_data = 0;
*/
#define RPCRDMA_LAST_COMPLETION (1ULL)
+/* Pre-allocate extra Work Requests for handling backward receives
+ * and sends. This is a fixed value because the Work Queues are
+ * allocated when the forward channel is set up.
+ */
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+#define RPCRDMA_BACKWARD_WRS (8)
+#else
+#define RPCRDMA_BACKWARD_WRS (0)
+#endif
+
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
*
* The below structure appears at the front of a large region of kmalloc'd