struct rx_srcavail_state *rx_sa);
int sdp_post_sendsm(struct sock *sk);
void srcavail_cancel_timeout(struct work_struct *work);
+void sdp_unmap_dma(struct sock *sk, u64 *addrs, int page_cnt);
#endif
#define SOCK_REF_CM_TW "CM_TW" /* TIMEWAIT_ENTER -> TIMEWAIT_EXIT */
#define SOCK_REF_SEQ "SEQ" /* during proc read */
#define SOCK_REF_DREQ_TO "DREQ_TO" /* dreq timeout is pending */
+#define SOCK_REF_ZCOPY "ZCOPY" /* zcopy send in process */
#define sock_hold(sk, msg) sock_ref(sk, msg, sock_hold)
#define sock_put(sk, msg) sock_ref(sk, msg, sock_put)
if (ssk->tx_ring.cq)
sdp_xmit_poll(ssk, 1);
+ if (ssk->tx_sa) {
+ sdp_unmap_dma(sk, ssk->tx_sa->addrs, ssk->tx_sa->page_cnt);
+ ssk->tx_sa->addrs = NULL;
+ }
+
if (!(sk->sk_shutdown & RCV_SHUTDOWN) || !sk_stream_memory_free(sk)) {
sdp_dbg(sk, "setting state to error\n");
sdp_set_error(sk, rc);
*timeo_p = current_timeo;
}
- if (!ssk->qp_active) {
- sdp_warn(sk, "qp is not active\n");
- }
-
finish_wait(sk->sk_sleep, &wait);
sdp_dbg_data(sk, "Finished waiting - RdmaRdCompl: %d/%d bytes, flags: 0x%x\n",
return -1;
}
-static void sdp_unmap_dma(struct sock *sk, u64 *addrs, int page_cnt)
+void sdp_unmap_dma(struct sock *sk, u64 *addrs, int page_cnt)
{
int i;
struct ib_device *dev = sdp_sk(sk)->ib_device;
}
lock_sock(sk);
+ sock_hold(&ssk->isk.sk, SOCK_REF_ZCOPY);
SDPSTATS_COUNTER_INC(sendmsg_zcopy_segment);
sdp_warn(sk, "Error sending SrcAvail. rc = %d\n", rc);
- sdp_unmap_dma(sk, tx_sa->addrs, page_cnt);
+ if (tx_sa->addrs)
+ sdp_unmap_dma(sk, tx_sa->addrs, page_cnt);
err_map_dma:
sdp_put_pages(sk, tx_sa->pages, page_cnt);
err_get_pages:
sdp_prf1(sk, NULL, "sdp_sendmsg_zcopy end rc: %d copied: %d", rc, copied);
posts_handler_put(ssk);
release_sock(sk);
+ sock_put(&ssk->isk.sk, SOCK_REF_ZCOPY);
return rc ?: copied;
}