From: Eldad Zinger Date: Sun, 17 Oct 2010 17:44:06 +0000 (+0200) Subject: sdp: cosmetics, debug messages, error codes X-Git-Tag: v4.1.12-92~264^2~5^2~85 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=9693592ea4ad5a0b9cc2902409c566f6444259d4;p=users%2Fjedix%2Flinux-maple.git sdp: cosmetics, debug messages, error codes Signed-off-by: Eldad Zinger --- diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index 667041561adcc..84f112478c4ea 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -462,7 +462,6 @@ struct sdp_sock { unsigned long tx_packets; unsigned long rx_packets; - unsigned long tx_bytes; unsigned long rx_bytes; struct sdp_moderation auto_mod; diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c index 0a098397d97c4..9647fb606733a 100644 --- a/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/drivers/infiniband/ulp/sdp/sdp_main.c @@ -453,7 +453,7 @@ void sdp_reset(struct sock *sk) { int err; - sdp_dbg(sk, "%s state=%d\n", __func__, sk->sk_state); + sdp_dbg(sk, "%s state=%s\n", __func__, sdp_state_str(sk->sk_state)); if (sk->sk_state != TCP_ESTABLISHED) return; @@ -895,8 +895,8 @@ static struct sock *sdp_accept(struct sock *sk, int flags, int *err) int error; sdp_add_to_history(sk, __func__); - sdp_dbg(sk, "%s state %d expected %d *err %d\n", __func__, - sk->sk_state, TCP_LISTEN, *err); + sdp_dbg(sk, "%s state %s expected %s *err %d\n", __func__, + sdp_state_str(sk->sk_state), "TCP_LISTEN", *err); ssk = sdp_sk(sk); lock_sock(sk); @@ -2413,7 +2413,7 @@ sdp_mid_data: if (!(flags & MSG_TRUNC)) { if (rx_sa && offset >= skb->len) { /* No more payload - start rdma copy */ - sdp_dbg_data(sk, "RDMA copy of %lx bytes\n", used); + sdp_dbg_data(sk, "RDMA copy of 0x%lx bytes\n", used); err = sdp_rdma_to_iovec(sk, msg->msg_iov, skb, &used, offset); if (unlikely(err)) { diff --git a/drivers/infiniband/ulp/sdp/sdp_rx.c b/drivers/infiniband/ulp/sdp/sdp_rx.c index d1ac6812b493e..a21b2ffa7bea5 100644 --- a/drivers/infiniband/ulp/sdp/sdp_rx.c +++ b/drivers/infiniband/ulp/sdp/sdp_rx.c @@ -122,8 +122,8 @@ void sdp_handle_disconn(struct sock *sk) __func__, sdp_state_str(sk->sk_state)); return; default: - sdp_warn(sk, "%s: FIN in unexpected state. sk->sk_state=%d\n", - __func__, sk->sk_state); + sdp_warn(sk, "%s: FIN in unexpected state. sk->sk_state=%s\n", + __func__, sdp_state_str(sk->sk_state)); break; } diff --git a/drivers/infiniband/ulp/sdp/sdp_tx.c b/drivers/infiniband/ulp/sdp/sdp_tx.c index a34211b2dd6c9..324a28f70ff73 100644 --- a/drivers/infiniband/ulp/sdp/sdp_tx.c +++ b/drivers/infiniband/ulp/sdp/sdp_tx.c @@ -85,7 +85,6 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb) goto err; ssk->tx_packets++; - ssk->tx_bytes += skb->len; if (unlikely(h->mid == SDP_MID_SRCAVAIL)) { struct tx_srcavail_state *tx_sa = TX_SRCAVAIL_STATE(skb); diff --git a/drivers/infiniband/ulp/sdp/sdp_zcopy.c b/drivers/infiniband/ulp/sdp/sdp_zcopy.c index 6369b372afa12..c5253d9b1ae3c 100644 --- a/drivers/infiniband/ulp/sdp/sdp_zcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_zcopy.c @@ -411,7 +411,7 @@ static int sdp_alloc_fmr(struct sock *sk, void *uaddr, size_t len, struct ib_device *dev = sdp_sk(sk)->ib_device; u64 *pages; struct ib_umem_chunk *chunk; - int n, j, k; + int n = 0, j, k; int rc = 0; unsigned long max_lockable_bytes; @@ -448,10 +448,10 @@ static int sdp_alloc_fmr(struct sock *sk, void *uaddr, size_t len, umem->offset, umem->length); pages = (u64 *) __get_free_page(GFP_KERNEL); - if (!pages) + if (!pages) { + rc = -ENOMEM; goto err_pages_alloc; - - n = 0; + } list_for_each_entry(chunk, &umem->chunk_list, list) { for (j = 0; j < chunk->nmap; ++j) { @@ -493,7 +493,8 @@ err_umem_get: return rc; } -void sdp_free_fmr(struct sock *sk, struct ib_pool_fmr **_fmr, struct ib_umem **_umem) +static inline void sdp_free_fmr(struct sock *sk, struct ib_pool_fmr **_fmr, + struct ib_umem **_umem) { if (*_fmr) { ib_fmr_pool_unmap(*_fmr); @@ -697,33 +698,25 @@ int sdp_sendmsg_zcopy(struct kiocb *iocb, struct sock *sk, struct iovec *iov) { struct sdp_sock *ssk = sdp_sk(sk); int rc = 0; - long timeo; + long timeo = SDP_SRCAVAIL_ADV_TIMEOUT; struct tx_srcavail_state *tx_sa; - int offset; size_t bytes_to_copy = iov->iov_len; int copied = 0; - sdp_dbg_data(sk, "Sending iov: %p, iov_len: 0x%zx\n", + sdp_dbg_data(sk, "Sending ZCopy iov: %p, iov_len: 0x%zx\n", iov->iov_base, iov->iov_len); - sdp_prf1(sk, NULL, "sdp_sendmsg_zcopy start"); if (ssk->rx_sa) { /* Don't want both sides to send SrcAvail because both of them * will wait on sendmsg() until timeout. - * Don't need to lock 'rx_ring.lock' because when SrcAvail is - * received, sk_sleep'ers are woken up. */ sdp_dbg_data(sk, "Deadlock prevent: crossing SrcAvail\n"); return 0; } sock_hold(&ssk->isk.sk, SOCK_REF_ZCOPY); - SDPSTATS_COUNTER_INC(sendmsg_zcopy_segment); - timeo = SDP_SRCAVAIL_ADV_TIMEOUT ; - /* Ok commence sending. */ - offset = (unsigned long)iov->iov_base & (PAGE_SIZE - 1); tx_sa = kmalloc(sizeof(struct tx_srcavail_state), GFP_KERNEL); if (!tx_sa) {