From: Eldad Zinger Date: Thu, 17 Jun 2010 06:57:57 +0000 (+0300) Subject: sdp: Fix for warning message when receiving with MSG_PEEK flag, and free skb that... X-Git-Tag: v4.1.12-92~264^2~5^2~161 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=54ac60f478a8d54db360943cc2c8a76e05277e07;p=users%2Fjedix%2Flinux-maple.git sdp: Fix for warning message when receiving with MSG_PEEK flag, and free skb that is not needed any more after all data was read from it. 'rx_sa->used', unlike 'offset', was not updated when MSG_PEEK flag was up and that lead to the behavior that bytes considered to be available to copy while the sequence offset showed that the bytes already consumed. The solution is to discard any use of 'rx_sa->used' and use 'offset' instead. An skb of SDP_MID_SRCAVAIL is not needed anymore when all its inline data was consumed and the RDMA operation was canceled (due to MSG_PEEK flag). This fix eliminates the warning message: "Trying to read beyond SKB". Signed-off-by: Eldad Zinger --- diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 37200011caa19..908ee1f9306d3 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -234,7 +234,6 @@ enum tx_sa_flag { struct rx_srcavail_state { /* Advertised buffer stuff */ u32 mseq; - u32 used; u32 reported; u32 len; u32 rkey; @@ -840,9 +839,9 @@ void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack, u32 bytes_completed); int sdp_handle_rdma_read_cqe(struct sdp_sock *ssk); int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, struct sk_buff *skb, - unsigned long *used); + unsigned long *used, u32 offset); int sdp_post_rdma_rd_compl(struct sock *sk, - struct rx_srcavail_state *rx_sa); + struct rx_srcavail_state *rx_sa, u32 offset); int sdp_post_sendsm(struct sock *sk); void srcavail_cancel_timeout(struct work_struct *work); void sdp_abort_srcavail(struct sock *sk); diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c index e83aa4630bb99..61b05f5cd0c94 100644 --- a/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/drivers/infiniband/ulp/sdp/sdp_main.c @@ -2185,6 +2185,7 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (!skb) break; + offset = *seq - SDP_SKB_CB(skb)->seq; avail_bytes_count = 0; h = (struct sdp_bsdh *)skb_transport_header(skb); @@ -2211,7 +2212,7 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, sdp_dbg_data(sk, "Ignoring src avail " "due to SrcAvailCancel\n"); sdp_post_sendsm(sk); - if (rx_sa->used < skb->len) { + if (offset < skb->len) { sdp_abort_rx_srcavail(sk, skb); sdp_prf(sk, skb, "Converted SA to DATA"); goto sdp_mid_data; @@ -2222,23 +2223,28 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } /* if has payload - handle as if MID_DATA */ - if (rx_sa->used < skb->len) { + if (offset < skb->len) { sdp_dbg_data(sk, "SrcAvail has " "payload: %d/%d\n", - rx_sa->used, + offset, skb->len); avail_bytes_count = skb->len; } else { sdp_dbg_data(sk, "Finished payload. " "RDMAing: %d/%d\n", - rx_sa->used, rx_sa->len); + offset, rx_sa->len); if (flags & MSG_PEEK) { + u32 real_offset = + ssk->copied_seq - + SDP_SKB_CB(skb)->seq; sdp_dbg_data(sk, "Peek on RDMA data - " "fallback to BCopy\n"); sdp_abort_rx_srcavail(sk, skb); sdp_post_sendsm(sk); rx_sa = NULL; + if (real_offset >= skb->len) + goto force_skb_cleanup; } else { avail_bytes_count = rx_sa->len; } @@ -2262,7 +2268,6 @@ sdp_mid_data: break; } - offset = *seq - SDP_SKB_CB(skb)->seq; if (offset < avail_bytes_count) goto found_ok_skb; @@ -2374,10 +2379,11 @@ sdp_mid_data: } } if (!(flags & MSG_TRUNC)) { - if (rx_sa && rx_sa->used >= skb->len) { + if (rx_sa && offset >= skb->len) { /* No more payload - start rdma copy */ sdp_dbg_data(sk, "RDMA copy of %lx bytes\n", used); - err = sdp_rdma_to_iovec(sk, msg->msg_iov, skb, &used); + err = sdp_rdma_to_iovec(sk, msg->msg_iov, skb, + &used, offset); sdp_dbg_data(sk, "used = %lx bytes\n", used); if (err == -EAGAIN) { sdp_dbg_data(sk, "RDMA Read aborted\n"); @@ -2391,7 +2397,6 @@ sdp_mid_data: /* TODO: skip header? */ msg->msg_iov, used); if (rx_sa && !(flags & MSG_PEEK)) { - rx_sa->used += used; rx_sa->reported += used; } } @@ -2409,7 +2414,7 @@ sdp_mid_data: copied += used; len -= used; *seq += used; - + offset = *seq - SDP_SKB_CB(skb)->seq; sdp_dbg_data(sk, "done copied %d target %d\n", copied, target); sdp_do_posts(sdp_sk(sk)); @@ -2419,15 +2424,15 @@ skip_copy: if (rx_sa && !(flags & MSG_PEEK)) { - rc = sdp_post_rdma_rd_compl(sk, rx_sa); + rc = sdp_post_rdma_rd_compl(sk, rx_sa, offset); BUG_ON(rc); } - if (!rx_sa && used + offset < skb->len) + if (!rx_sa && offset < skb->len) continue; - if (rx_sa && rx_sa->used < rx_sa->len) + if (rx_sa && offset < rx_sa->len) continue; offset = 0; @@ -2448,7 +2453,7 @@ skb_cleanup: rx_sa = NULL; } - +force_skb_cleanup: sdp_dbg_data(sk, "unlinking skb %p\n", skb); skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); diff --git a/drivers/infiniband/ulp/sdp/sdp_rx.c b/drivers/infiniband/ulp/sdp/sdp_rx.c index 92eb692ca06d1..7a763264b03d9 100644 --- a/drivers/infiniband/ulp/sdp/sdp_rx.c +++ b/drivers/infiniband/ulp/sdp/sdp_rx.c @@ -327,7 +327,6 @@ static inline struct sk_buff *sdp_sock_queue_rcv_skb(struct sock *sk, sizeof(struct rx_srcavail_state), GFP_ATOMIC); rx_sa->mseq = ntohl(h->mseq); - rx_sa->used = 0; rx_sa->len = skb_len = ntohl(srcah->len); rx_sa->rkey = ntohl(srcah->rkey); rx_sa->vaddr = be64_to_cpu(srcah->vaddr); diff --git a/drivers/infiniband/ulp/sdp/sdp_zcopy.c b/drivers/infiniband/ulp/sdp/sdp_zcopy.c index 4cb8dff79e9a7..982eaa347ac92 100644 --- a/drivers/infiniband/ulp/sdp/sdp_zcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_zcopy.c @@ -276,12 +276,12 @@ static void sdp_wait_rdma_wr_finished(struct sdp_sock *ssk) } int sdp_post_rdma_rd_compl(struct sock *sk, - struct rx_srcavail_state *rx_sa) + struct rx_srcavail_state *rx_sa, u32 offset) { struct sk_buff *skb; - int copied = rx_sa->used - rx_sa->reported; + int copied = offset - rx_sa->reported; - if (rx_sa->used <= rx_sa->reported) + if (offset <= rx_sa->reported) return 0; skb = sdp_alloc_skb_rdmardcompl(sk, copied, 0); @@ -515,7 +515,8 @@ void sdp_free_fmr(struct sock *sk, struct ib_pool_fmr **_fmr, struct ib_umem **_ *_umem = NULL; } -static int sdp_post_rdma_read(struct sock *sk, struct rx_srcavail_state *rx_sa) +static int sdp_post_rdma_read(struct sock *sk, struct rx_srcavail_state *rx_sa, + u32 offset) { struct sdp_sock *ssk = sdp_sk(sk); struct ib_send_wr *bad_wr; @@ -534,7 +535,7 @@ static int sdp_post_rdma_read(struct sock *sk, struct rx_srcavail_state *rx_sa) sge.length = rx_sa->umem->length; sge.lkey = rx_sa->fmr->fmr->lkey; - wr.wr.rdma.remote_addr = rx_sa->vaddr + rx_sa->used; + wr.wr.rdma.remote_addr = rx_sa->vaddr + offset; wr.num_sge = 1; wr.sg_list = &sge; rx_sa->busy++; @@ -545,7 +546,7 @@ static int sdp_post_rdma_read(struct sock *sk, struct rx_srcavail_state *rx_sa) } int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, struct sk_buff *skb, - unsigned long *used) + unsigned long *used, u32 offset) { struct sdp_sock *ssk = sdp_sk(sk); struct rx_srcavail_state *rx_sa = RX_SRCAVAIL_STATE(skb); @@ -574,7 +575,7 @@ int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, struct sk_buff *skb, goto err_alloc_fmr; } - rc = sdp_post_rdma_read(sk, rx_sa); + rc = sdp_post_rdma_read(sk, rx_sa, offset); if (unlikely(rc)) { sdp_warn(sk, "ib_post_send failed with status %d.\n", rc); sdp_set_error(&ssk->isk.sk, -ECONNRESET); @@ -599,7 +600,6 @@ int sdp_rdma_to_iovec(struct sock *sk, struct iovec *iov, struct sk_buff *skb, copied = rx_sa->umem->length; sdp_update_iov_used(sk, iov, copied); - rx_sa->used += copied; atomic_add(copied, &ssk->rcv_nxt); *used = copied;