/* Advertised buffer stuff */
u32 mseq;
u32 reported;
+ u32 copied;
u32 len;
u32 rkey;
u64 vaddr;
int sdp_tx_wait_memory(struct sdp_sock *ssk, long *timeo_p, int *credits_needed);
void sdp_skb_entail(struct sock *sk, struct sk_buff *skb);
void sdp_start_cma_timewait_timeout(struct sdp_sock *ssk, int timeo);
+int sdp_abort_rx_srcavail(struct sock *sk);
extern struct rw_semaphore device_removal_lock;
/* sdp_proc.c */
void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
u32 bytes_completed);
int sdp_handle_rdma_read_cqe(struct sdp_sock *ssk);
-int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, struct sk_buff *skb,
- unsigned long *used, u32 offset);
+int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, int msg_iovlen,
+ struct sk_buff *skb, unsigned long *used, u32 offset);
int sdp_post_rdma_rd_compl(struct sock *sk,
- struct rx_srcavail_state *rx_sa, u32 offset);
+ struct rx_srcavail_state *rx_sa);
int sdp_post_sendsm(struct sock *sk);
void sdp_abort_srcavail(struct sock *sk);
void sdp_abort_rdma_read(struct sock *sk);
DECLARE_RWSEM(device_removal_lock);
-static inline int sdp_abort_rx_srcavail(struct sock *sk);
-
static inline unsigned int sdp_keepalive_time_when(const struct sdp_sock *ssk)
{
return ssk->keepalive_time ? : sdp_keepalive_time;
return err;
}
-static inline int sdp_abort_rx_srcavail(struct sock *sk)
+int sdp_abort_rx_srcavail(struct sock *sk)
{
struct sdp_sock *ssk = sdp_sk(sk);
struct sdp_bsdh *h =
h->mid = SDP_MID_DATA;
+ if (sdp_post_rdma_rd_compl(sk, ssk->rx_sa)) {
+ sdp_warn(sk, "Couldn't send RdmaRdComp - "
+ "data corruption might occur\n");
+ }
+
RX_SRCAVAIL_STATE(ssk->rx_sa->skb) = NULL;
kfree(ssk->rx_sa);
ssk->rx_sa = NULL;
if (rx_sa && offset >= skb->len) {
/* No more payload - start rdma copy */
sdp_dbg_data(sk, "RDMA copy of 0x%lx bytes\n", used);
- err = sdp_rdma_to_iovec(sk, msg->msg_iov, skb,
+ err = sdp_rdma_to_iovec(sk, msg->msg_iov, msg->msg_iovlen, skb,
&used, offset);
if (unlikely(err)) {
/* ssk->rx_sa might had been freed when
/* TODO: skip header? */
msg->msg_iov, used);
if (rx_sa && !(flags & MSG_PEEK)) {
+ rx_sa->copied += used;
rx_sa->reported += used;
}
}
if (rx_sa && !(flags & MSG_PEEK)) {
- rc = sdp_post_rdma_rd_compl(sk, rx_sa, offset);
+ rc = sdp_post_rdma_rd_compl(sk, rx_sa);
if (unlikely(rc)) {
sdp_abort_rx_srcavail(sk);
rx_sa = NULL;
case SDP_MID_SRCAVAIL_CANCEL:
if (ssk->rx_sa && after(ntohl(h->mseq), ssk->rx_sa->mseq) &&
!ssk->tx_ring.rdma_inflight) {
- sdp_dbg(sk, "Handling SrcAvailCancel - post SendSM\n");
- RX_SRCAVAIL_STATE(ssk->rx_sa->skb) = NULL;
- kfree(ssk->rx_sa);
- ssk->rx_sa = NULL;
+ sdp_abort_rx_srcavail(sk);
sdp_post_sendsm(sk);
}
break;
return rc;
}
-int sdp_post_rdma_rd_compl(struct sock *sk,
- struct rx_srcavail_state *rx_sa, u32 offset)
+int sdp_post_rdma_rd_compl(struct sock *sk, struct rx_srcavail_state *rx_sa)
{
struct sk_buff *skb;
- int copied = offset - rx_sa->reported;
+ int unreported = rx_sa->copied - rx_sa->reported;
- if (offset <= rx_sa->reported)
+ if (rx_sa->copied <= rx_sa->reported)
return 0;
- skb = sdp_alloc_skb_rdmardcompl(sk, copied, 0);
+ skb = sdp_alloc_skb_rdmardcompl(sk, unreported, 0);
if (unlikely(!skb))
return -ENOMEM;
sdp_skb_entail(sk, skb);
- rx_sa->reported += copied;
+ rx_sa->reported += unreported;
sdp_post_sends(sdp_sk(sk), 0);
return rc;
}
-int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, struct sk_buff *skb,
- unsigned long *used, u32 offset)
+int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, int msg_iovlen,
+ struct sk_buff *skb, unsigned long *used, u32 offset)
{
struct sdp_sock *ssk = sdp_sk(sk);
struct rx_srcavail_state *rx_sa = RX_SRCAVAIL_STATE(skb);
int rc = 0;
int len = *used;
int copied;
+ int i = 0;
if (unlikely(!ssk->ib_device))
return -ENODEV;
- while (!iov->iov_len)
+ while (!iov->iov_len) {
++iov;
+ i++;
+ }
+ WARN_ON(i >= msg_iovlen);
sdp_dbg_data(sk_ssk(ssk), "preparing RDMA read."
" len: 0x%x. buffer len: 0x%zx\n", len, iov->iov_len);
sdp_update_iov_used(sk, iov, copied);
atomic_add(copied, &ssk->rcv_nxt);
*used = copied;
+ rx_sa->copied += copied;
err_wait:
ssk->tx_ring.rdma_inflight = NULL;