skb_pull(skb, sizeof(struct sdp_bsdh));
- if (unlikely(h->mid == SDP_MID_SRCAVAIL))
- skb_pull(skb, sizeof(struct sdp_srcah));
+ if (unlikely(h->mid == SDP_MID_SRCAVAIL)) {
+ if (ssk->rx_sa) {
+ sdp_dbg_data(sk, "SrcAvail in the middle of another SrcAvail. Aborting\n");
+ h->mid = SDP_MID_DATA;
+ sdp_post_sendsm(sk);
+ } else {
+ skb_pull(skb, sizeof(struct sdp_srcah));
+ }
+ }
if (unlikely(h->mid == SDP_MID_DATA && skb->len == 0)) {
/* Credit update is valid even after RCV_SHUTDOWN */
}
static int sdp_alloc_fmr(struct sock *sk, void *uaddr, size_t len,
- struct ib_pool_fmr **_fmr, struct ib_umem **_umem, int access)
+ struct ib_pool_fmr **_fmr, struct ib_umem **_umem, int access, int min_len)
{
struct ib_pool_fmr *fmr;
struct ib_umem *umem;
len = max_lockable_bytes;
}
- if (unlikely(len == 0))
+ if (unlikely(len <= min_len))
return -EAGAIN;
sdp_dbg_data(sk, "user buf: %p, len:0x%zx max_lockable_bytes: 0x%lx\n",
access, 0);
if (IS_ERR(umem)) {
- rc = PTR_ERR(umem);
- sdp_warn(sk, "Error doing umem_get 0x%zx bytes: %d\n", len, rc);
- sdp_warn(sk, "RLIMIT_MEMLOCK: 0x%lx[cur] 0x%lx[max] CAP_IPC_LOCK: %d\n",
+ rc = -EAGAIN;
+ sdp_dbg_data(sk, "Error doing umem_get 0x%zx bytes: %d\n", len, rc);
+ sdp_dbg_data(sk, "RLIMIT_MEMLOCK: 0x%lx[cur] 0x%lx[max] CAP_IPC_LOCK: %d\n",
current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur,
current->signal->rlim[RLIMIT_MEMLOCK].rlim_max,
capable(CAP_IPC_LOCK));
}
rc = sdp_alloc_fmr(sk, iov->iov_base, len, &rx_sa->fmr, &rx_sa->umem,
- IB_ACCESS_LOCAL_WRITE);
+ IB_ACCESS_LOCAL_WRITE, 0);
if (rc) {
sdp_dbg_data(sk, "Error allocating fmr: %d\n", rc);
goto err_alloc_fmr;
unsigned long lock_flags;
rc = sdp_alloc_fmr(sk, iov->iov_base, iov->iov_len,
- &tx_sa->fmr, &tx_sa->umem, IB_ACCESS_REMOTE_READ);
+ &tx_sa->fmr, &tx_sa->umem, IB_ACCESS_REMOTE_READ, sdp_zcopy_thresh);
if (unlikely(rc)) {
sdp_dbg_data(sk, "Error allocating fmr: %d\n", rc);
goto err_alloc_fmr;