#define SDP_RX_SIZE 0x40
#define SDP_FMR_SIZE (MIN(0x1000, PAGE_SIZE) / sizeof(u64))
-#define SDP_FMR_POOL_SIZE 1024
-#define SDP_FMR_DIRTY_SIZE ( SDP_FMR_POOL_SIZE / 4 )
#define SDP_MAX_RDMA_READ_LEN (PAGE_SIZE * (SDP_FMR_SIZE - 2))
u32 zcopy_cross_send;
u32 zcopy_tx_aborted;
u32 zcopy_tx_error;
+ u32 fmr_alloc_error;
};
static inline void sdpstats_hist(u32 *h, u32 val, u32 maxidx, int is_log)
"Enable data path debug tracing if > 0.");
#endif
+SDP_MODPARAM_SINT(sdp_fmr_pool_size, 20, "Number of FMRs to allocate for pool");
+SDP_MODPARAM_SINT(sdp_fmr_dirty_wm, 5, "Watermark to flush fmr pool");
+
SDP_MODPARAM_SINT(recv_poll, 10, "How many msec to poll recv.");
SDP_MODPARAM_SINT(sdp_keepalive_time, SDP_KEEPALIVE_TIME,
"Default idle time in seconds before keepalive probe sent.");
}
memset(&fmr_param, 0, sizeof fmr_param);
- fmr_param.pool_size = SDP_FMR_POOL_SIZE;
- fmr_param.dirty_watermark = SDP_FMR_DIRTY_SIZE;
+ fmr_param.pool_size = sdp_fmr_pool_size;
+ fmr_param.dirty_watermark = sdp_fmr_dirty_wm;
fmr_param.cache = 1;
fmr_param.max_pages_per_fmr = SDP_FMR_SIZE;
fmr_param.page_shift = PAGE_SHIFT;
seq_printf(seq, "- TX cross send\t\t: %d\n", SDPSTATS_COUNTER_GET(zcopy_cross_send));
seq_printf(seq, "- TX aborted by peer\t: %d\n", SDPSTATS_COUNTER_GET(zcopy_tx_aborted));
seq_printf(seq, "- TX error\t\t: %d\n", SDPSTATS_COUNTER_GET(zcopy_tx_error));
+ seq_printf(seq, "- FMR alloc error\t: %d\n", SDPSTATS_COUNTER_GET(fmr_alloc_error));
__sdpstats_seq_hist_pcpu(seq, "CPU sendmsg", sendmsg);
__sdpstats_seq_hist_pcpu(seq, "CPU recvmsg", recvmsg);
fmr = ib_fmr_pool_map_phys(sdp_sk(sk)->sdp_dev->fmr_pool, pages, n, 0);
if (IS_ERR(fmr)) {
- sdp_warn(sk, "Error allocating fmr: %ld\n", PTR_ERR(fmr));
+ sdp_dbg_data(sk, "Error allocating fmr: %ld\n", PTR_ERR(fmr));
+ SDPSTATS_COUNTER_INC(fmr_alloc_error);
+ rc = PTR_ERR(fmr);
goto err_fmr_alloc;
}
void sdp_free_fmr(struct sock *sk, struct ib_pool_fmr **_fmr, struct ib_umem **_umem)
{
- if (!sdp_sk(sk)->qp_active)
+ if (!sdp_sk(sk)->qp_active) {
+ sdp_warn(sk, "Trying to free fmr after destroying QP! fmr: %p\n",
+ *_fmr);
return;
+ }
ib_fmr_pool_unmap(*_fmr);
*_fmr = NULL;
rc = sdp_alloc_fmr(sk, iov->iov_base, len, &rx_sa->fmr, &rx_sa->umem);
if (rc) {
- sdp_warn(sk, "Error allocating fmr: %d\n", rc);
+ sdp_dbg_data(sk, "Error allocating fmr: %d\n", rc);
goto err_alloc_fmr;
}
err_alloc_fmr:
if (rc && ssk->qp_active) {
- sdp_warn(sk, "Couldn't do RDMA - post sendsm\n");
+ sdp_dbg_data(sk, "Couldn't do RDMA - post sendsm\n");
rx_sa->flags |= RX_SA_ABORTED;
}
rc = sdp_alloc_fmr(sk, iov->iov_base, iov->iov_len,
&tx_sa->fmr, &tx_sa->umem);
if (rc) {
- sdp_warn(sk, "Error allocating fmr: %d\n", rc);
+ sdp_dbg_data(sk, "Error allocating fmr: %d\n", rc);
goto err_alloc_fmr;
}