From: Amir Vadai Date: Sun, 21 Jun 2009 14:13:35 +0000 (+0300) Subject: sdp: fixed coding style X-Git-Tag: v4.1.12-92~264^2~5^2~267 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=28fd8ad3b5ef85b32a04f305342973471f859ede;p=users%2Fjedix%2Flinux-maple.git sdp: fixed coding style Signed-off-by: Amir Vadai --- diff --git a/drivers/infiniband/ulp/sdp/sdp.h b/drivers/infiniband/ulp/sdp/sdp.h index cc38f5b16d381..53f4b9539e2d1 100644 --- a/drivers/infiniband/ulp/sdp/sdp.h +++ b/drivers/infiniband/ulp/sdp/sdp.h @@ -8,15 +8,8 @@ #include #include -#undef SDPSTATS_ON -#undef SDP_PROFILING -//#undef CONFIG_INFINIBAND_SDP_DEBUG_DATA -//#undef CONFIG_INFINIBAND_SDP_DEBUG - #define SDPSTATS_ON -//#define SDP_PROFILING -//#define CONFIG_INFINIBAND_SDP_DEBUG_DATA -//#define CONFIG_INFINIBAND_SDP_DEBUG +/* #define SDP_PROFILING */ #define _sdp_printk(func, line, level, sk, format, arg...) \ printk(level "%s:%d sdp_sock(%5d:%d %d:%d): " format, \ @@ -76,7 +69,8 @@ static inline unsigned long long current_nsec(void) return tv.tv_sec * NSEC_PER_SEC + tv.tv_nsec; } #define sdp_prf1(sk, s, format, arg...) ({ \ - struct sdpprf_log *l = &sdpprf_log[sdpprf_log_count++ & (SDPPRF_LOG_SIZE - 1)]; \ + struct sdpprf_log *l = \ + &sdpprf_log[sdpprf_log_count++ & (SDPPRF_LOG_SIZE - 1)]; \ l->idx = sdpprf_log_count - 1; \ l->pid = current->pid; \ l->sk_num = (sk) ? inet_sk(sk)->num : -1; \ @@ -90,7 +84,7 @@ static inline unsigned long long current_nsec(void) 1; \ }) #define sdp_prf(sk, s, format, arg...) -//#define sdp_prf(sk, s, format, arg...) sdp_prf1(sk, s, format, ## arg) +/* #define sdp_prf(sk, s, format, arg...) sdp_prf1(sk, s, format, ## arg) */ #else #define sdp_prf1(sk, s, format, arg...) @@ -119,9 +113,9 @@ extern int sdp_debug_level; }) #define sk_common_release(sk) do { \ - sdp_dbg(sk, "%s:%d - sock_put(" SOCK_REF_BORN ") - refcount = %d " \ - "from withing sk_common_release\n",\ - __FUNCTION__, __LINE__, atomic_read(&(sk)->sk_refcnt)); \ + sdp_dbg(sk, "%s:%d - sock_put(" SOCK_REF_BORN \ + ") - refcount = %d from withing sk_common_release\n",\ + __func__, __LINE__, atomic_read(&(sk)->sk_refcnt));\ sk_common_release(sk); \ } while (0) @@ -134,15 +128,15 @@ extern int sdp_debug_level; #ifdef CONFIG_INFINIBAND_SDP_DEBUG_DATA extern int sdp_data_debug_level; -#define sdp_dbg_data(sk, format, arg...) \ - do { \ - if (sdp_data_debug_level & 0x2) \ - sdp_printk(KERN_DEBUG, sk, format , ## arg); \ +#define sdp_dbg_data(sk, format, arg...) \ + do { \ + if (sdp_data_debug_level & 0x2) \ + sdp_printk(KERN_DEBUG, sk, format , ## arg); \ } while (0) -#define SDP_DUMP_PACKET(sk, str, skb, h) \ - do { \ - if (sdp_data_debug_level & 0x1) \ - dump_packet(sk, str, skb, h); \ +#define SDP_DUMP_PACKET(sk, str, skb, h) \ + do { \ + if (sdp_data_debug_level & 0x1) \ + dump_packet(sk, str, skb, h); \ } while (0) #else #define sdp_dbg_data(priv, format, arg...) @@ -190,9 +184,10 @@ static inline void sdpstats_hist(u32 *h, u32 val, u32 maxidx, int is_log) h[idx]++; } -#define SDPSTATS_COUNTER_INC(stat) do {sdpstats.stat++;} while (0) -#define SDPSTATS_COUNTER_ADD(stat, val) do {sdpstats.stat+=val;} while (0) -#define SDPSTATS_COUNTER_MID_INC(stat, mid) do {sdpstats.stat[mid]++;} while (0) +#define SDPSTATS_COUNTER_INC(stat) do { sdpstats.stat++; } while (0) +#define SDPSTATS_COUNTER_ADD(stat, val) do { sdpstats.stat += val; } while (0) +#define SDPSTATS_COUNTER_MID_INC(stat, mid) do { sdpstats.stat[mid]++; } \ + while (0) #define SDPSTATS_HIST(stat, size) \ sdpstats_hist(sdpstats.stat, size, ARRAY_SIZE(sdpstats.stat) - 1, 1) @@ -222,7 +217,7 @@ static inline void sdpstats_hist(u32 *h, u32 val, u32 maxidx, int is_log) instead of interrupts (in per-core Tx rings) - should be power of 2 */ #define SDP_TX_POLL_MODER 16 #define SDP_TX_POLL_TIMEOUT (HZ / 4) -#define SDP_NAGLE_TIMEOUT (HZ / 10) +#define SDP_NAGLE_TIMEOUT (HZ / 10) #define SDP_RESOLVE_TIMEOUT 1000 #define SDP_ROUTE_TIMEOUT 1000 @@ -298,11 +293,11 @@ struct sdp_bsdh { }; union cma_ip_addr { - struct in6_addr ip6; - struct { - __u32 pad[3]; - __u32 addr; - } ip4; + struct in6_addr ip6; + struct { + __u32 pad[3]; + __u32 addr; + } ip4; }; /* TODO: too much? Can I avoid having the src/dst and port here? */ @@ -370,20 +365,12 @@ struct sdp_chrecvbuf { u32 size; }; -#define posts_handler(ssk) ({\ - atomic_read(&ssk->somebody_is_doing_posts); \ -}) - -#define posts_handler_get(ssk) ({\ - sdp_prf(&ssk->isk.sk, NULL, "posts handler get. %d", posts_handler(ssk)); \ - atomic_inc(&ssk->somebody_is_doing_posts); \ -}) - -#define posts_handler_put(ssk) ({\ - sdp_prf(&ssk->isk.sk, NULL, "posts handler put. %d", posts_handler(ssk)); \ +#define posts_handler(ssk) atomic_read(&ssk->somebody_is_doing_posts) +#define posts_handler_get(ssk) atomic_inc(&ssk->somebody_is_doing_posts) +#define posts_handler_put(ssk) do {\ atomic_dec(&ssk->somebody_is_doing_posts); \ sdp_do_posts(ssk); \ -}) +} while (0) struct sdp_moderation { unsigned long last_moder_packets; @@ -550,9 +537,10 @@ static inline int _sdp_exch_state(const char *func, int line, struct sock *sk, int old; spin_lock_irqsave(&sdp_sk(sk)->lock, flags); - + sdp_dbg(sk, "%s:%d - set state: %s -> %s 0x%x\n", func, line, - sdp_state_str(sk->sk_state), sdp_state_str(state), from_states); + sdp_state_str(sk->sk_state), + sdp_state_str(state), from_states); if ((1 << sk->sk_state) & ~from_states) { sdp_warn(sk, "trying to exchange state from unexpected state " @@ -590,36 +578,41 @@ static inline void sdp_set_error(struct sock *sk, int err) #ifdef CONFIG_INFINIBAND_SDP_DEBUG_DATA void _dump_packet(const char *func, int line, struct sock *sk, char *str, struct sk_buff *skb, const struct sdp_bsdh *h); -#define dump_packet(sk, str, skb, h) _dump_packet(__func__, __LINE__, sk, str, skb, h) +#define dump_packet(sk, str, skb, h) \ + _dump_packet(__func__, __LINE__, sk, str, skb, h) #endif -int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *); -void sdp_reset(struct sock *sk); -void sdp_reset_sk(struct sock *sk, int rc); -int sdp_post_credits(struct sdp_sock *ssk); -void sdp_destroy_work(struct work_struct *work); -void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk); -void sdp_dreq_wait_timeout_work(struct work_struct *work); -void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb); -void sdp_add_sock(struct sdp_sock *ssk); -void sdp_remove_sock(struct sdp_sock *ssk); -void sdp_remove_large_sock(struct sdp_sock *ssk); -void sdp_post_keepalive(struct sdp_sock *ssk); -void sdp_start_keepalive_timer(struct sock *sk); + +/* sdp_main.c */ +void sdp_set_default_moderation(struct sdp_sock *ssk); int sdp_init_sock(struct sock *sk); +void sdp_start_keepalive_timer(struct sock *sk); +void sdp_remove_sock(struct sdp_sock *ssk); +void sdp_add_sock(struct sdp_sock *ssk); +void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb); +void sdp_dreq_wait_timeout_work(struct work_struct *work); +void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk); +void sdp_destroy_work(struct work_struct *work); +void sdp_reset_sk(struct sock *sk, int rc); +void sdp_reset(struct sock *sk); + +/* sdp_proc.c */ int __init sdp_proc_init(void); void sdp_proc_unregister(void); -/* sdp_main.c */ -void sdp_set_default_moderation(struct sdp_sock *ssk); + +/* sdp_cma.c */ +int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *); + +/* sdp_bcopy.c */ +int sdp_post_credits(struct sdp_sock *ssk); /* sdp_tx.c */ int sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device); void sdp_tx_ring_destroy(struct sdp_sock *ssk); -int _sdp_xmit_poll(const char *func, int line, struct sdp_sock *ssk, int force); -#define sdp_xmit_poll(ssk, force) _sdp_xmit_poll(__func__, __LINE__, ssk, force) +int sdp_xmit_poll(struct sdp_sock *ssk, int force); void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid); -void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonagle); -#define sdp_post_sends(ssk, nonagle) _sdp_post_sends(__func__, __LINE__, ssk, nonagle) +void sdp_post_sends(struct sdp_sock *ssk, int nonagle); void sdp_nagle_timeout(unsigned long data); +void sdp_post_keepalive(struct sdp_sock *ssk); /* sdp_rx.c */ void sdp_rx_ring_init(struct sdp_sock *ssk); @@ -629,12 +622,13 @@ int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size); int sdp_init_buffers(struct sdp_sock *ssk, u32 new_size); void sdp_do_posts(struct sdp_sock *ssk); void sdp_rx_comp_full(struct sdp_sock *ssk); +void sdp_remove_large_sock(struct sdp_sock *ssk); static inline void sdp_arm_rx_cq(struct sock *sk) { sdp_prf(sk, NULL, "Arming RX cq"); sdp_dbg_data(sk, "Arming RX cq\n"); - + ib_req_notify_cq(sdp_sk(sk)->rx_ring.cq, IB_CQ_NEXT_COMP); } @@ -642,8 +636,8 @@ static inline void sdp_arm_tx_cq(struct sock *sk) { sdp_prf(sk, NULL, "Arming TX cq"); sdp_dbg_data(sk, "Arming TX cq. credits: %d, posted: %d\n", - tx_credits(sdp_sk(sk)), ring_posted(sdp_sk(sk)->tx_ring)); - + tx_credits(sdp_sk(sk)), ring_posted(sdp_sk(sk)->tx_ring)); + ib_req_notify_cq(sdp_sk(sk)->tx_ring.cq, IB_CQ_NEXT_COMP); } @@ -666,7 +660,8 @@ static inline char *mid2str(int mid) return mid2str[mid]; } -static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) +static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, + gfp_t gfp) { struct sk_buff *skb; @@ -691,5 +686,4 @@ static inline struct sk_buff *sdp_stream_alloc_skb(struct sock *sk, int size, gf return NULL; } - #endif diff --git a/drivers/infiniband/ulp/sdp/sdp_bcopy.c b/drivers/infiniband/ulp/sdp/sdp_bcopy.c index 944c973aac117..e2c4ffdd52c31 100644 --- a/drivers/infiniband/ulp/sdp/sdp_bcopy.c +++ b/drivers/infiniband/ulp/sdp/sdp_bcopy.c @@ -43,46 +43,43 @@ void _dump_packet(const char *func, int line, struct sock *sk, char *str, struct sk_buff *skb, const struct sdp_bsdh *h) { + struct sdp_hh *hh; + struct sdp_hah *hah; + struct sdp_chrecvbuf *req_size; int len = 0; char buf[256]; - len += snprintf(buf, 255-len, "%s skb: %p mid: %2x:%-20s flags: 0x%x bufs: %d " - "len: %d mseq: %d mseq_ack: %d", - str, skb, h->mid, mid2str(h->mid), h->flags, - ntohs(h->bufs), ntohl(h->len),ntohl(h->mseq), - ntohl(h->mseq_ack)); + len += snprintf(buf, 255-len, "%s skb: %p mid: %2x:%-20s flags: 0x%x " + "bufs: %d len: %d mseq: %d mseq_ack: %d | ", + str, skb, h->mid, mid2str(h->mid), h->flags, + ntohs(h->bufs), ntohl(h->len), ntohl(h->mseq), + ntohl(h->mseq_ack)); switch (h->mid) { - case SDP_MID_HELLO: - { - const struct sdp_hh *hh = (struct sdp_hh *)h; - len += snprintf(buf + len, 255-len, - " | max_adverts: %d majv_minv: %d localrcvsz: %d " - "desremrcvsz: %d |", - hh->max_adverts, - hh->majv_minv, - ntohl(hh->localrcvsz), - ntohl(hh->desremrcvsz)); - } - break; - case SDP_MID_HELLO_ACK: - { - const struct sdp_hah *hah = (struct sdp_hah *)h; - len += snprintf(buf + len, 255-len, " | actrcvz: %d |", - ntohl(hah->actrcvsz)); - } - break; - case SDP_MID_CHRCVBUF: - case SDP_MID_CHRCVBUF_ACK: - { - struct sdp_chrecvbuf *req_size = (struct sdp_chrecvbuf *)(h+1); - len += snprintf(buf + len, 255-len, - " | req_size: %d |", ntohl(req_size->size)); - } - break; - case SDP_MID_DATA: - len += snprintf(buf + len, 255-len, " | data_len: %ld |", ntohl(h->len) - sizeof(struct sdp_bsdh)); - default: - break; + case SDP_MID_HELLO: + hh = (struct sdp_hh *)h; + len += snprintf(buf + len, 255-len, + "max_adverts: %d majv_minv: %d " + "localrcvsz: %d desremrcvsz: %d |", + hh->max_adverts, hh->majv_minv, + ntohl(hh->localrcvsz), + ntohl(hh->desremrcvsz)); + break; + case SDP_MID_HELLO_ACK: + hah = (struct sdp_hah *)h; + len += snprintf(buf + len, 255-len, "actrcvz: %d |", + ntohl(hah->actrcvsz)); + break; + case SDP_MID_CHRCVBUF: + case SDP_MID_CHRCVBUF_ACK: + req_size = (struct sdp_chrecvbuf *)(h+1); + len += snprintf(buf + len, 255-len, "req_size: %d |", + ntohl(req_size->size)); + break; + case SDP_MID_DATA: + len += snprintf(buf + len, 255-len, "data_len: %ld |", + ntohl(h->len) - sizeof(struct sdp_bsdh)); + default: + break; } buf[len] = 0; _sdp_printk(func, line, KERN_WARNING, sk, "%s: %s\n", str, buf); @@ -118,13 +115,14 @@ static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb) ssk->nagle_last_unacked = mseq; } else { if (!timer_pending(&ssk->nagle_timer)) { - mod_timer(&ssk->nagle_timer, jiffies + SDP_NAGLE_TIMEOUT); + mod_timer(&ssk->nagle_timer, + jiffies + SDP_NAGLE_TIMEOUT); sdp_dbg_data(&ssk->isk.sk, "Starting nagle timer\n"); } } sdp_dbg_data(&ssk->isk.sk, "send_now = %d last_unacked = %ld\n", send_now, ssk->nagle_last_unacked); - + return send_now; } @@ -133,7 +131,7 @@ void sdp_nagle_timeout(unsigned long data) struct sdp_sock *ssk = (struct sdp_sock *)data; struct sock *sk = &ssk->isk.sk; - sdp_dbg_data(&ssk->isk.sk, "last_unacked = %ld\n", ssk->nagle_last_unacked); + sdp_dbg_data(sk, "last_unacked = %ld\n", ssk->nagle_last_unacked); if (!ssk->nagle_last_unacked) goto out2; @@ -141,7 +139,7 @@ void sdp_nagle_timeout(unsigned long data) /* Only process if the socket is not in use */ bh_lock_sock(sk); if (sock_owned_by_user(sk)) { - sdp_dbg_data(&ssk->isk.sk, "socket is busy - will try later\n"); + sdp_dbg_data(sk, "socket is busy - will try later\n"); goto out; } @@ -189,7 +187,7 @@ int sdp_post_credits(struct sdp_sock *ssk) return post_count; } -void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonagle) +void sdp_post_sends(struct sdp_sock *ssk, int nonagle) { /* TODO: nonagle? */ struct sk_buff *skb; @@ -197,8 +195,6 @@ void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonag gfp_t gfp_page; int post_count = 0; - sdp_dbg_data(&ssk->isk.sk, "called from %s:%d\n", func, line); - if (unlikely(!ssk->id)) { if (ssk->isk.sk.sk_send_head) { sdp_dbg(&ssk->isk.sk, @@ -214,10 +210,6 @@ void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonag else gfp_page = GFP_KERNEL; - sdp_dbg_data(&ssk->isk.sk, "credits: %d tx ring slots left: %d send_head: %p\n", - tx_credits(ssk), sdp_tx_ring_slots_left(&ssk->tx_ring), - ssk->isk.sk.sk_send_head); - if (sdp_tx_ring_slots_left(&ssk->tx_ring) < SDP_TX_SIZE / 2) { int wc_processed = sdp_xmit_poll(ssk, 1); sdp_dbg_data(&ssk->isk.sk, "freed %d\n", wc_processed); @@ -235,7 +227,8 @@ void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonag gfp_page); /* FIXME */ BUG_ON(!skb); - resp_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof *resp_size); + resp_size = (struct sdp_chrecvbuf *)skb_put(skb, + sizeof *resp_size); resp_size->size = htonl(ssk->recv_frags * PAGE_SIZE); sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF_ACK); post_count++; @@ -243,10 +236,9 @@ void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonag if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS && sdp_tx_ring_slots_left(&ssk->tx_ring) && - (skb = ssk->isk.sk.sk_send_head) && - sdp_nagle_off(ssk, skb)) { + ssk->isk.sk.sk_send_head && + sdp_nagle_off(ssk, ssk->isk.sk.sk_send_head)) { SDPSTATS_COUNTER_INC(send_miss_no_credits); - sdp_prf(&ssk->isk.sk, skb, "no credits. called from %s:%d", func, line); } while (tx_credits(ssk) > SDP_MIN_TX_CREDITS && @@ -259,25 +251,6 @@ void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonag post_count++; } - if (0 && tx_credits(ssk) == SDP_MIN_TX_CREDITS && - !ssk->sent_request && - ring_head(ssk->tx_ring) > ssk->sent_request_head + SDP_RESIZE_WAIT && - sdp_tx_ring_slots_left(&ssk->tx_ring)) { - struct sdp_chrecvbuf *req_size; - skb = sdp_stream_alloc_skb(&ssk->isk.sk, - sizeof(struct sdp_bsdh) + - sizeof(*req_size), - gfp_page); - /* FIXME */ - BUG_ON(!skb); - ssk->sent_request = (SDP_MAX_SEND_SKB_FRAGS-1) * PAGE_SIZE; - ssk->sent_request_head = ring_head(ssk->tx_ring); - req_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof *req_size); - req_size->size = htonl(ssk->sent_request); - sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF); - post_count++; - } - c = remote_credits(ssk); if (likely(c > SDP_MIN_TX_CREDITS)) c *= 2; @@ -298,7 +271,7 @@ void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonag } /* send DisConn if needed - * Do not send DisConn if there is only 1 credit. Compliance with CA4-82: + * Do not send DisConn if there is only 1 credit. Compliance with CA4-82 * If one credit is available, an implementation shall only send SDP * messages that provide additional credits and also do not contain ULP * payload. */ @@ -315,9 +288,6 @@ void _sdp_post_sends(const char *func, int line, struct sdp_sock *ssk, int nonag post_count++; } - if (post_count) { + if (post_count) sdp_xmit_poll(ssk, 0); - - sdp_prf(&ssk->isk.sk, NULL, "post_sends finished polling [%s:%d].", func, line); - } } diff --git a/drivers/infiniband/ulp/sdp/sdp_cma.c b/drivers/infiniband/ulp/sdp/sdp_cma.c index 55ade25aa91b5..f7dc7c6708c09 100644 --- a/drivers/infiniband/ulp/sdp/sdp_cma.c +++ b/drivers/infiniband/ulp/sdp/sdp_cma.c @@ -91,10 +91,12 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id) sdp_sk(sk)->mr = mr; - if ((rc = sdp_rx_ring_create(sdp_sk(sk), device))) + rc = sdp_rx_ring_create(sdp_sk(sk), device); + if (rc) goto err_rx; - if ((rc = sdp_tx_ring_create(sdp_sk(sk), device))) + rc = sdp_tx_ring_create(sdp_sk(sk), device); + if (rc) goto err_tx; qp_init_attr.recv_cq = sdp_sk(sk)->rx_ring.cq; @@ -170,20 +172,14 @@ static int sdp_connect_handler(struct sock *sk, struct rdma_cm_id *id, sdp_sk(child)->xmit_size_goal = ntohl(h->localrcvsz) - sizeof(struct sdp_bsdh); sdp_sk(child)->send_frags = PAGE_ALIGN(sdp_sk(child)->xmit_size_goal) / - PAGE_SIZE + 1; /* The +1 is to conpensate on not aligned buffers */ - sdp_init_buffers(sdp_sk(child), rcvbuf_initial_size); - - sdp_dbg(child, "%s recv_frags: %d tx credits %d xmit_size_goal %d send trigger %d\n", - __func__, - sdp_sk(child)->recv_frags, - tx_credits(sdp_sk(child)), - sdp_sk(child)->xmit_size_goal, - sdp_sk(child)->min_bufs); + PAGE_SIZE + 1; /* +1 to conpensate on not aligned buffers */ + sdp_init_buffers(sdp_sk(child), rcvbuf_initial_size); id->context = child; sdp_sk(child)->id = id; - list_add_tail(&sdp_sk(child)->backlog_queue, &sdp_sk(sk)->backlog_queue); + list_add_tail(&sdp_sk(child)->backlog_queue, + &sdp_sk(sk)->backlog_queue); sdp_sk(child)->parent = sk; sdp_exch_state(child, TCPF_LISTEN | TCPF_CLOSE, TCP_SYN_RECV); @@ -216,18 +212,14 @@ static int sdp_response_handler(struct sock *sk, struct rdma_cm_id *id, sdp_sk(sk)->max_bufs = ntohs(h->bsdh.bufs); atomic_set(&sdp_sk(sk)->tx_ring.credits, sdp_sk(sk)->max_bufs); sdp_sk(sk)->min_bufs = tx_credits(sdp_sk(sk)) / 4; - sdp_sk(sk)->xmit_size_goal = ntohl(h->actrcvsz) - sizeof(struct sdp_bsdh); + sdp_sk(sk)->xmit_size_goal = + ntohl(h->actrcvsz) - sizeof(struct sdp_bsdh); sdp_sk(sk)->send_frags = MIN(PAGE_ALIGN(sdp_sk(sk)->xmit_size_goal) / - PAGE_SIZE, MAX_SKB_FRAGS) + 1; /* The +1 is to conpensate on not aligned buffers */ - sdp_sk(sk)->xmit_size_goal = MIN(sdp_sk(sk)->xmit_size_goal, + PAGE_SIZE, MAX_SKB_FRAGS) + 1; /* +1 to conpensate on not */ + /* aligned buffers */ + sdp_sk(sk)->xmit_size_goal = MIN(sdp_sk(sk)->xmit_size_goal, sdp_sk(sk)->send_frags * PAGE_SIZE); - sdp_dbg(sk, "tx credits %d xmit_size_goal %d send_frags: %d credits update trigger %d\n", - tx_credits(sdp_sk(sk)), - sdp_sk(sk)->xmit_size_goal, - sdp_sk(sk)->send_frags, - sdp_sk(sk)->min_bufs); - sdp_sk(sk)->poll_cq = 1; sk->sk_state_change(sk); @@ -263,18 +255,12 @@ static int sdp_connected_handler(struct sock *sk, struct rdma_cm_event *event) sdp_dbg(sk, "parent is going away.\n"); goto done; } -#if 0 - /* TODO: backlog */ - if (sk_acceptq_is_full(parent)) { - sdp_dbg(parent, "%s ECONNREFUSED: parent accept queue full: %d > %d\n", __func__, parent->sk_ack_backlog, parent->sk_max_ack_backlog); - release_sock(parent); - return -ECONNREFUSED; - } -#endif + sk_acceptq_added(parent); sdp_dbg(parent, "%s child connection established\n", __func__); list_del_init(&sdp_sk(sk)->backlog_queue); - list_add_tail(&sdp_sk(sk)->accept_queue, &sdp_sk(parent)->accept_queue); + list_add_tail(&sdp_sk(sk)->accept_queue, + &sdp_sk(parent)->accept_queue); parent->sk_state_change(parent); sk_wake_async(parent, 0, POLL_OUT); @@ -346,7 +332,8 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) rc = sdp_init_qp(sk, id); if (rc) break; - atomic_set(&sdp_sk(sk)->remote_credits, ring_posted(sdp_sk(sk)->rx_ring)); + atomic_set(&sdp_sk(sk)->remote_credits, + ring_posted(sdp_sk(sk)->rx_ring)); memset(&hh, 0, sizeof hh); hh.bsdh.mid = SDP_MID_HELLO; hh.bsdh.bufs = htons(remote_credits(sdp_sk(sk))); @@ -355,7 +342,7 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) hh.majv_minv = SDP_MAJV_MINV; sdp_init_buffers(sdp_sk(sk), rcvbuf_initial_size); hh.localrcvsz = hh.desremrcvsz = htonl(sdp_sk(sk)->recv_frags * - PAGE_SIZE + sizeof(struct sdp_bsdh)); + PAGE_SIZE + sizeof(struct sdp_bsdh)); hh.max_adverts = 0x1; inet_sk(sk)->saddr = inet_sk(sk)->rcv_saddr = ((struct sockaddr_in *)&id->route.addr.src_addr)->sin_addr.s_addr; @@ -380,7 +367,8 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) break; } child = id->context; - atomic_set(&sdp_sk(child)->remote_credits, ring_posted(sdp_sk(child)->rx_ring)); + atomic_set(&sdp_sk(child)->remote_credits, + ring_posted(sdp_sk(child)->rx_ring)); memset(&hah, 0, sizeof hah); hah.bsdh.mid = SDP_MID_HELLO_ACK; hah.bsdh.bufs = htons(remote_credits(sdp_sk(child))); @@ -450,8 +438,9 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) if (sk->sk_state != TCP_TIME_WAIT) { if (sk->sk_state == TCP_CLOSE_WAIT) { - sdp_dbg(sk, "IB teardown while in TCP_CLOSE_WAIT " - "taking reference to let close() finish the work\n"); + sdp_dbg(sk, "IB teardown while in " + "TCP_CLOSE_WAIT taking reference to " + "let close() finish the work\n"); sock_hold(sk, SOCK_REF_CM_TW); } sdp_set_error(sk, EPIPE); @@ -489,7 +478,6 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) sdp_dbg(sk, "event %d done. status %d\n", event->event, rc); if (parent) { - sdp_dbg(parent, "deleting child %d done. status %d\n", event->event, rc); lock_sock(parent); if (!sdp_sk(parent)->id) { /* TODO: look at SOCK_DEAD? */ sdp_dbg(sk, "parent is going away.\n"); diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c index b34743da13007..8e1a6d7bc7ffd 100644 --- a/drivers/infiniband/ulp/sdp/sdp_main.c +++ b/drivers/infiniband/ulp/sdp/sdp_main.c @@ -82,30 +82,33 @@ MODULE_LICENSE("Dual BSD/GPL"); SDP_MODPARAM_INT(sdp_debug_level, 0, "Enable debug tracing if > 0."); #endif #ifdef CONFIG_INFINIBAND_SDP_DEBUG -SDP_MODPARAM_INT(sdp_data_debug_level, 0, "Enable data path debug tracing if > 0."); +SDP_MODPARAM_INT(sdp_data_debug_level, 0, + "Enable data path debug tracing if > 0."); #endif SDP_MODPARAM_SINT(recv_poll_hit, -1, "How many times recv poll helped."); SDP_MODPARAM_SINT(recv_poll_miss, -1, "How many times recv poll missed."); SDP_MODPARAM_SINT(recv_poll, 1000, "How many times to poll recv."); -SDP_MODPARAM_SINT(sdp_keepalive_time, SDP_KEEPALIVE_TIME, +SDP_MODPARAM_SINT(sdp_keepalive_time, SDP_KEEPALIVE_TIME, "Default idle time in seconds before keepalive probe sent."); SDP_MODPARAM_SINT(sdp_zcopy_thresh, 65536, "Zero copy send threshold; 0=0ff."); #define SDP_RX_COAL_TIME_HIGH 128 SDP_MODPARAM_SINT(sdp_rx_coal_target, 0x50000, - "Target number of bytes to coalesce with interrupt moderation (bytes)."); + "Target number of bytes to coalesce with interrupt moderation."); SDP_MODPARAM_SINT(sdp_rx_coal_time, 0x10, "rx coal time (jiffies)."); SDP_MODPARAM_SINT(sdp_rx_rate_low, 80000, "rx_rate low (packets/sec)."); -SDP_MODPARAM_SINT(sdp_rx_coal_time_low, 0, "low moderation time val (usec)."); +SDP_MODPARAM_SINT(sdp_rx_coal_time_low, 0, "low moderation usec."); SDP_MODPARAM_SINT(sdp_rx_rate_high, 100000, "rx_rate high (packets/sec)."); -SDP_MODPARAM_SINT(sdp_rx_coal_time_high, 128, "high moderation time val (usec)."); +SDP_MODPARAM_SINT(sdp_rx_coal_time_high, 128, "high moderation usec."); SDP_MODPARAM_SINT(sdp_rx_rate_thresh, (200000 / SDP_RX_COAL_TIME_HIGH), "rx rate thresh ()."); SDP_MODPARAM_SINT(sdp_sample_interval, (HZ / 4), "sample interval (jiffies)."); -SDP_MODPARAM_INT(hw_int_mod_count, -1, "forced hw int moderation val. -1 for auto (packets)."); -SDP_MODPARAM_INT(hw_int_mod_usec, -1, "forced hw int moderation val. -1 for auto (usec)."); +SDP_MODPARAM_INT(hw_int_mod_count, -1, + "forced hw int moderation val. -1 for auto (packets)."); +SDP_MODPARAM_INT(hw_int_mod_usec, -1, + "forced hw int moderation val. -1 for auto (usec)."); struct workqueue_struct *sdp_wq; struct workqueue_struct *rx_comp_wq; @@ -292,6 +295,7 @@ void sdp_start_keepalive_timer(struct sock *sk) void sdp_set_default_moderation(struct sdp_sock *ssk) { + struct sock *sk = &ssk->isk.sk; struct sdp_moderation *mod = &ssk->auto_mod; int rx_buf_size; @@ -301,17 +305,20 @@ void sdp_set_default_moderation(struct sdp_sock *ssk) mod->adaptive_rx_coal = 0; if (hw_int_mod_count > 0 && hw_int_mod_usec > 0) { - err = ib_modify_cq(ssk->rx_ring.cq, hw_int_mod_count, hw_int_mod_usec); + err = ib_modify_cq(ssk->rx_ring.cq, hw_int_mod_count, + hw_int_mod_usec); if (err) - sdp_warn(&ssk->isk.sk, "Failed modifying moderation for cq"); - else - sdp_dbg(&ssk->isk.sk, "Using fixed interrupt moderation\n"); + sdp_warn(sk, + "Failed modifying moderation for cq"); + else + sdp_dbg(sk, + "Using fixed interrupt moderation\n"); } return; } mod->adaptive_rx_coal = 1; - sdp_dbg(&ssk->isk.sk, "Using adaptive interrupt moderation\n"); + sdp_dbg(sk, "Using adaptive interrupt moderation\n"); /* If we haven't received a specific coalescing setting * (module param), we set the moderation paramters as follows: @@ -322,7 +329,7 @@ void sdp_set_default_moderation(struct sdp_sock *ssk) rx_buf_size = (ssk->recv_frags * PAGE_SIZE) + sizeof(struct sdp_bsdh); mod->moder_cnt = sdp_rx_coal_target / rx_buf_size + 1; mod->moder_time = sdp_rx_coal_time; - sdp_dbg(&ssk->isk.sk, "Default coalesing params for buf size:%d - " + sdp_dbg(sk, "Default coalesing params for buf size:%d - " "moder_cnt:%d moder_time:%d\n", rx_buf_size, mod->moder_cnt, mod->moder_time); @@ -332,7 +339,7 @@ void sdp_set_default_moderation(struct sdp_sock *ssk) mod->pkt_rate_high = sdp_rx_rate_high; mod->rx_usecs_high = sdp_rx_coal_time_high; mod->sample_interval = sdp_sample_interval; - + mod->last_moder_time = SDP_AUTO_CONF; mod->last_moder_jiffies = 0; mod->last_moder_packets = 0; @@ -340,11 +347,33 @@ void sdp_set_default_moderation(struct sdp_sock *ssk) mod->last_moder_bytes = 0; } +/* If tx and rx packet rates are not balanced, assume that + * traffic is mainly BW bound and apply maximum moderation. + * Otherwise, moderate according to packet rate */ +static inline int calc_moder_time(int rate, struct sdp_moderation *mod, + int tx_pkt_diff, int rx_pkt_diff) +{ + if (2 * tx_pkt_diff > 3 * rx_pkt_diff || + 2 * rx_pkt_diff > 3 * tx_pkt_diff) + return mod->rx_usecs_high; + + if (rate < mod->pkt_rate_low) + return mod->rx_usecs_low; + + if (rate > mod->pkt_rate_high) + return mod->rx_usecs_high; + + return (rate - mod->pkt_rate_low) * + (mod->rx_usecs_high - mod->rx_usecs_low) / + (mod->pkt_rate_high - mod->pkt_rate_low) + + mod->rx_usecs_low; +} + static void sdp_auto_moderation(struct sdp_sock *ssk) { struct sdp_moderation *mod = &ssk->auto_mod; - unsigned long period = (unsigned long) (jiffies - mod->last_moder_jiffies); + unsigned long period = jiffies - mod->last_moder_jiffies; unsigned long packets; unsigned long rate; unsigned long avg_pkt_size; @@ -374,42 +403,31 @@ static void sdp_auto_moderation(struct sdp_sock *ssk) /* Apply auto-moderation only when packet rate exceeds a rate that * it matters */ if (rate > sdp_rx_rate_thresh) { - /* If tx and rx packet rates are not balanced, assume that - * traffic is mainly BW bound and apply maximum moderation. - * Otherwise, moderate according to packet rate */ - if (2 * tx_pkt_diff > 3 * rx_pkt_diff || - 2 * rx_pkt_diff > 3 * tx_pkt_diff) { - moder_time = mod->rx_usecs_high; - } else { - if (rate < mod->pkt_rate_low) { - moder_time = mod->rx_usecs_low; - } else if (rate > mod->pkt_rate_high) - moder_time = mod->rx_usecs_high; - else - moder_time = (rate - mod->pkt_rate_low) * - (mod->rx_usecs_high - mod->rx_usecs_low) / - (mod->pkt_rate_high - mod->pkt_rate_low) + - mod->rx_usecs_low; - } + moder_time = calc_moder_time(rate, mod, tx_pkt_diff, + rx_pkt_diff); } else { - /* When packet rate is low, use default moderation rather than - * 0 to prevent interrupt storms if traffic suddenly increases */ + /* When packet rate is low, use default moderation rather + * than 0 to prevent interrupt storms if traffic suddenly + * increases */ moder_time = mod->moder_time; } sdp_dbg_data(&ssk->isk.sk, "tx rate:%lu rx_rate:%lu\n", tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); - sdp_dbg_data(&ssk->isk.sk, "Rx moder_time changed from:%d to %d period:%lu " - "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", + sdp_dbg_data(&ssk->isk.sk, "Rx moder_time changed from:%d to %d " + "period:%lu [jiff] packets:%lu avg_pkt_size:%lu " + "rate:%lu [p/s])\n", mod->last_moder_time, moder_time, period, packets, avg_pkt_size, rate); if (moder_time != mod->last_moder_time) { mod->last_moder_time = moder_time; err = ib_modify_cq(ssk->rx_ring.cq, mod->moder_cnt, moder_time); - if (err) - sdp_dbg_data(&ssk->isk.sk, "Failed modifying moderation for cq"); + if (err) { + sdp_dbg_data(&ssk->isk.sk, + "Failed modifying moderation for cq"); + } } out: @@ -431,15 +449,12 @@ void sdp_reset_sk(struct sock *sk, int rc) sdp_xmit_poll(ssk, 1); if (!(sk->sk_shutdown & RCV_SHUTDOWN) || !sk_stream_memory_free(sk)) { - sdp_warn(sk, "setting state to error. shutdown: %d, mem_free: %d\n", - !(sk->sk_shutdown & RCV_SHUTDOWN), - !sk_stream_memory_free(sk)); + sdp_warn(sk, "setting state to error\n"); sdp_set_error(sk, rc); } sdp_destroy_qp(ssk); - sdp_dbg(sk, "memset on sdp_sock\n"); memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id)); sk->sk_state_change(sk); @@ -524,7 +539,7 @@ static void sdp_destruct(struct sock *sk) ssk->destructed_already = 1; sdp_remove_sock(ssk); - + sdp_close_sk(sk); if (ssk->parent) @@ -852,7 +867,8 @@ static struct sock *sdp_accept(struct sock *sk, int flags, int *err) goto out_err; } - newssk = list_entry(ssk->accept_queue.next, struct sdp_sock, accept_queue); + newssk = list_entry(ssk->accept_queue.next, struct sdp_sock, + accept_queue); list_del_init(&newssk->accept_queue); newssk->parent = NULL; sk_acceptq_removed(sk); @@ -926,7 +942,7 @@ static int sdp_ioctl(struct sock *sk, int cmd, unsigned long arg) /* TODO: Need to handle: case SIOCOUTQ: */ - return put_user(answ, (int __user *)arg); + return put_user(answ, (int __user *)arg); } static inline void sdp_start_dreq_wait_timeout(struct sdp_sock *ssk, int timeo) @@ -955,7 +971,8 @@ void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk) void sdp_destroy_work(struct work_struct *work) { - struct sdp_sock *ssk = container_of(work, struct sdp_sock, destroy_work); + struct sdp_sock *ssk = container_of(work, struct sdp_sock, + destroy_work); struct sock *sk = &ssk->isk.sk; sdp_dbg(sk, "%s: refcnt %d\n", __func__, atomic_read(&sk->sk_refcnt)); @@ -997,12 +1014,10 @@ void sdp_dreq_wait_timeout_work(struct work_struct *work) release_sock(sk); - if (sdp_sk(sk)->id) { + if (sdp_sk(sk)->id) rdma_disconnect(sdp_sk(sk)->id); - } else { - sdp_warn(sk, "NOT SENDING DREQ - no need to wait for timewait exit\n"); + else sock_put(sk, SOCK_REF_CM_TW); - } out: sock_put(sk, SOCK_REF_DREQ_TO); @@ -1166,8 +1181,11 @@ static int sdp_setsockopt(struct sock *sk, int level, int optname, ssk->keepalive_time = val * HZ; if (sock_flag(sk, SOCK_KEEPOPEN) && - !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) - sdp_reset_keepalive_timer(sk, ssk->keepalive_time); + !((1 << sk->sk_state) & + (TCPF_CLOSE | TCPF_LISTEN))) { + sdp_reset_keepalive_timer(sk, + ssk->keepalive_time); + } } break; case SDP_ZCOPY_THRESH: @@ -1298,7 +1316,8 @@ static int sdp_recv_urg(struct sock *sk, long timeo, return -EAGAIN; } -static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, int flags) +static inline void sdp_mark_urg(struct sock *sk, struct sdp_sock *ssk, + int flags) { if (unlikely(flags & MSG_OOB)) { struct sk_buff *skb = sk->sk_write_queue.prev; @@ -1403,7 +1422,8 @@ static struct bzcopy_state *sdp_bz_setup(struct sdp_sock *ssk, bz->busy = 0; bz->ssk = ssk; bz->page_cnt = PAGE_ALIGN(len + bz->cur_offset) >> PAGE_SHIFT; - bz->pages = kcalloc(bz->page_cnt, sizeof(struct page *), GFP_KERNEL); + bz->pages = kcalloc(bz->page_cnt, sizeof(struct page *), + GFP_KERNEL); if (!bz->pages) { kfree(bz); @@ -1498,7 +1518,6 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb, /* Time to copy data. We are close to * the end! */ SDPSTATS_COUNTER_ADD(memcpy_count, copy); - sdp_dbg_data(sk, "Copying from: %p offset: %d size: %d\n", from, off, copy); err = skb_copy_to_page(sk, from, skb, page, off, copy); if (err) { @@ -1517,8 +1536,6 @@ static inline int sdp_bcopy_get(struct sock *sk, struct sk_buff *skb, skb_shinfo(skb)->frags[i - 1].size += copy; } else { - sdp_dbg_data(sk, "Adding to frage %d: page: %p off: %d size: %d\n", - i, page, off, copy); skb_fill_page_desc(skb, i, page, off, copy); if (TCP_PAGE(sk)) { get_page(page); @@ -1661,13 +1678,9 @@ static int sdp_bzcopy_wait_memory(struct sdp_sock *ssk, long *timeo_p, set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; - sdp_prf1(sk, NULL, "credits: %d, head: %d, tail: %d, busy: %d", - tx_credits(ssk), ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring), - bz->busy); - if (tx_credits(ssk) > SDP_MIN_TX_CREDITS) { + if (tx_credits(ssk) > SDP_MIN_TX_CREDITS) sdp_arm_tx_cq(sk); - } sk_wait_event(sk, ¤t_timeo, sdp_bzcopy_slots_avail(ssk, bz) && vm_wait); @@ -1692,9 +1705,6 @@ static int sdp_bzcopy_wait_memory(struct sdp_sock *ssk, long *timeo_p, return err; } -//#undef rdtscll -//#define rdtscll(x) ({ x = current_nsec(); }) - /* Like tcp_sendmsg */ /* TODO: check locking */ static int sdp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, @@ -1790,7 +1800,8 @@ new_segment: goto wait_for_sndbuf; } - skb = sdp_stream_alloc_skb(sk, 0, sk->sk_allocation); + skb = sdp_stream_alloc_skb(sk, 0, + sk->sk_allocation); if (!skb) goto wait_for_memory; @@ -1804,12 +1815,12 @@ new_segment: NETIF_F_HW_CSUM)) skb->ip_summed = CHECKSUM_PARTIAL; - sdp_dbg_data(sk, "entail new skb: %p len = %d\n", skb, skb->len); skb_entail(sk, ssk, skb); copy = size_goal; } else { sdp_dbg_data(sk, "adding to existing skb: %p" - " len = %d, sk_send_head: %p copy: %d\n", + " len = %d, sk_send_head: %p " + "copy: %d\n", skb, skb->len, sk->sk_send_head, copy); } @@ -1871,15 +1882,11 @@ wait_for_memory: err = sdp_bzcopy_wait_memory(ssk, &timeo, bz); } else { posts_handler_put(ssk); - sdp_prf1(sk, NULL, "wait for mem. credits: %d, head: %d, tail: %d", - tx_credits(ssk), ring_head(ssk->tx_ring), - ring_tail(ssk->tx_ring)); sdp_arm_tx_cq(sk); err = sk_stream_wait_memory(sk, &timeo); - sdp_prf1(sk, skb, "finished wait for mem. err: %d", err); posts_handler_get(ssk); sdp_do_posts(ssk); } @@ -1932,25 +1939,11 @@ out_err: return err; } -int dummy_memcpy_toiovec(struct iovec *iov, int len) -{ - while (len > 0) { - if (iov->iov_len) { - int copy = min_t(unsigned int, iov->iov_len, len); - len -= copy; - iov->iov_len -= copy; - iov->iov_base += copy; - } - iov++; - } - - return 0; -} /* Like tcp_recvmsg */ /* Maybe use skb_recv_datagram here? */ /* Note this does not seem to handle vectored messages. Relevant? */ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, + size_t len, int noblock, int flags, int *addr_len) { struct sk_buff *skb = NULL; @@ -1991,12 +1984,14 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, do { u32 offset; - /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ + /* Are we at urgent data? Stop if we have read anything or have + * SIGURG pending. */ if (ssk->urg_data && ssk->urg_seq == *seq) { if (copied) break; if (signal_pending(current)) { - copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; + copied = timeo ? sock_intr_errno(timeo) : + -EAGAIN; break; } } @@ -2006,10 +2001,8 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (!skb) break; - if ((skb_transport_header(skb))[0] == SDP_MID_DISCONN) { - sdp_prf(sk, NULL, "Got DISCONN skb - killing it"); + if ((skb_transport_header(skb))[0] == SDP_MID_DISCONN) goto found_fin_ok; - } if (before(*seq, TCP_SKB_CB(skb)->seq)) { sdp_warn(sk, "recvmsg bug: copied %X seq %X\n", @@ -2078,8 +2071,7 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, release_sock(sk); lock_sock(sk); } else if (rc) { - sdp_dbg_data(sk, "%s: sk_wait_data %ld\n", __func__, timeo); - sdp_prf(sk, NULL, "waiting for data"); + sdp_dbg_data(sk, "sk_wait_data %ld\n", timeo); posts_handler_put(ssk); @@ -2094,9 +2086,9 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, continue; found_ok_skb: - sdp_dbg_data(sk, "%s: found_ok_skb len %d\n", __func__, skb->len); - sdp_dbg_data(sk, "%s: len %Zd offset %d\n", __func__, len, offset); - sdp_dbg_data(sk, "%s: copied %d target %d\n", __func__, copied, target); + sdp_dbg_data(sk, "found_ok_skb len %d\n", skb->len); + sdp_dbg_data(sk, "len %Zd offset %d\n", len, offset); + sdp_dbg_data(sk, "copied %d target %d\n", copied, target); used = skb->len - offset; if (len < used) used = len; @@ -2122,8 +2114,6 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, err = skb_copy_datagram_iovec(skb, offset, /* TODO: skip header? */ msg->msg_iov, used); -// err = dummy_memcpy_toiovec(msg->msg_iov, used); - sdp_prf(sk, skb, "Copied to user %ld bytes. err = %d", used, err); if (err) { sdp_dbg(sk, "%s: skb_copy_datagram_iovec failed" "offset %d size %ld status %d\n", @@ -2139,7 +2129,7 @@ static int sdp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, len -= used; *seq += used; - sdp_dbg_data(sk, "%s: done copied %d target %d\n", __func__, copied, target); + sdp_dbg_data(sk, "done copied %d target %d\n", copied, target); sdp_do_posts(sdp_sk(sk)); skip_copy: @@ -2165,7 +2155,7 @@ found_fin_ok: __kfree_skb(skb); } break; - + } while (len > 0); err = copied; @@ -2266,8 +2256,8 @@ static unsigned int sdp_poll(struct file *file, struct socket *socket, /* * Adjust for memory in later kernels */ - if (!sk_stream_memory_free(sk) || !tx_slots_free(ssk)) - mask &= ~(POLLOUT | POLLWRNORM | POLLWRBAND); + if (!sk_stream_memory_free(sk) || !tx_slots_free(ssk)) + mask &= ~(POLLOUT | POLLWRNORM | POLLWRBAND); /* TODO: Slightly ugly: it would be nicer if there was function * like datagram_poll that didn't include poll_wait, @@ -2359,7 +2349,7 @@ static int sdp_create_socket(struct net *net, struct socket *sock, int protocol) struct sock *sk; int rc; - sdp_dbg(NULL, "%s: type %d protocol %d\n", __func__, sock->type, protocol); + sdp_dbg(NULL, "type %d protocol %d\n", sock->type, protocol); if (net != &init_net) return -EAFNOSUPPORT; @@ -2451,12 +2441,12 @@ kill_socks: sk->sk_shutdown |= RCV_SHUTDOWN; sdp_reset(sk); - if ((1 << sk->sk_state) & + if ((1 << sk->sk_state) & (TCPF_FIN_WAIT1 | TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_TIME_WAIT)) { sock_put(sk, SOCK_REF_CM_TW); } - + schedule(); spin_lock_irq(&sock_list_lock); @@ -2514,13 +2504,13 @@ static int __init sdp_init(void) rc = proto_register(&sdp_proto, 1); if (rc) { - printk(KERN_WARNING "%s: proto_register failed: %d\n", __func__, rc); + printk(KERN_WARNING "proto_register failed: %d\n", rc); goto error_proto_reg; } rc = sock_register(&sdp_net_proto); if (rc) { - printk(KERN_WARNING "%s: sock_register failed: %d\n", __func__, rc); + printk(KERN_WARNING "sock_register failed: %d\n", rc); goto error_sock_reg; } diff --git a/drivers/infiniband/ulp/sdp/sdp_proc.c b/drivers/infiniband/ulp/sdp/sdp_proc.c index eb8a462d15898..924e2ee8acd23 100644 --- a/drivers/infiniband/ulp/sdp/sdp_proc.c +++ b/drivers/infiniband/ulp/sdp/sdp_proc.c @@ -72,7 +72,7 @@ static void *sdp_get_idx(struct seq_file *seq, loff_t pos) static void *sdp_seq_start(struct seq_file *seq, loff_t *pos) { void *start = NULL; - struct sdp_iter_state* st = seq->private; + struct sdp_iter_state *st = seq->private; st->num = 0; @@ -90,7 +90,7 @@ static void *sdp_seq_start(struct seq_file *seq, loff_t *pos) static void *sdp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct sdp_iter_state* st = seq->private; + struct sdp_iter_state *st = seq->private; void *next = NULL; spin_lock_irq(&sock_list_lock); @@ -116,7 +116,7 @@ static void sdp_seq_stop(struct seq_file *seq, void *v) static int sdp_seq_show(struct seq_file *seq, void *v) { - struct sdp_iter_state* st; + struct sdp_iter_state *st; struct sock *sk = v; char tmpbuf[TMPSZ + 1]; unsigned int dest; @@ -129,8 +129,8 @@ static int sdp_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) { seq_printf(seq, "%-*s\n", TMPSZ - 1, - " sl local_address rem_address uid inode" - " rx_queue tx_queue state"); + " sl local_address rem_address " + "uid inode rx_queue tx_queue state"); goto out; } @@ -200,7 +200,8 @@ static struct sdp_seq_afinfo sdp_seq_afinfo = { #ifdef SDPSTATS_ON struct sdpstats sdpstats = { { 0 } }; -static void sdpstats_seq_hist(struct seq_file *seq, char *str, u32 *h, int n, int is_log) +static void sdpstats_seq_hist(struct seq_file *seq, char *str, u32 *h, int n, + int is_log) { int i; u32 max = 0; @@ -240,17 +241,19 @@ static int sdpstats_seq_show(struct seq_file *seq, void *v) sdpstats_seq_hist(seq, "send_size", sdpstats.send_size, ARRAY_SIZE(sdpstats.send_size), 1); - sdpstats_seq_hist(seq, "credits_before_update", sdpstats.credits_before_update, + sdpstats_seq_hist(seq, "credits_before_update", + sdpstats.credits_before_update, ARRAY_SIZE(sdpstats.credits_before_update), 0); -// sdpstats_seq_hist(seq, "send_interval", sdpstats.send_interval, -// ARRAY_SIZE(sdpstats.send_interval)); - seq_printf(seq, "sdp_sendmsg() calls\t\t: %d\n", sdpstats.sendmsg); - seq_printf(seq, "bcopy segments \t\t: %d\n", sdpstats.sendmsg_bcopy_segment); - seq_printf(seq, "bzcopy segments \t\t: %d\n", sdpstats.sendmsg_bzcopy_segment); - seq_printf(seq, "post_send_credits \t\t: %d\n", sdpstats.post_send_credits); - seq_printf(seq, "memcpy_count \t\t: %u\n", sdpstats.memcpy_count); + seq_printf(seq, "bcopy segments \t\t: %d\n", + sdpstats.sendmsg_bcopy_segment); + seq_printf(seq, "bzcopy segments \t\t: %d\n", + sdpstats.sendmsg_bzcopy_segment); + seq_printf(seq, "post_send_credits \t\t: %d\n", + sdpstats.post_send_credits); + seq_printf(seq, "memcpy_count \t\t: %u\n", + sdpstats.memcpy_count); for (i = 0; i < ARRAY_SIZE(sdpstats.post_send); i++) { if (mid2str(i)) { @@ -261,9 +264,12 @@ static int sdpstats_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "\n"); seq_printf(seq, "post_recv \t\t: %d\n", sdpstats.post_recv); - seq_printf(seq, "BZCopy poll miss \t\t: %d\n", sdpstats.bzcopy_poll_miss); - seq_printf(seq, "send_wait_for_mem \t\t: %d\n", sdpstats.send_wait_for_mem); - seq_printf(seq, "send_miss_no_credits\t\t: %d\n", sdpstats.send_miss_no_credits); + seq_printf(seq, "BZCopy poll miss \t\t: %d\n", + sdpstats.bzcopy_poll_miss); + seq_printf(seq, "send_wait_for_mem \t\t: %d\n", + sdpstats.send_wait_for_mem); + seq_printf(seq, "send_miss_no_credits\t\t: %d\n", + sdpstats.send_miss_no_credits); seq_printf(seq, "rx_poll_miss \t\t: %d\n", sdpstats.rx_poll_miss); seq_printf(seq, "tx_poll_miss \t\t: %d\n", sdpstats.tx_poll_miss); @@ -274,10 +280,14 @@ static int sdpstats_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "- RX interrupts\t\t: %d\n", sdpstats.rx_int_count); seq_printf(seq, "- TX interrupts\t\t: %d\n", sdpstats.tx_int_count); - seq_printf(seq, "bz_clean \t\t: %d\n", sdpstats.sendmsg ? sdpstats.bz_clean_sum / sdpstats.sendmsg : 0); - seq_printf(seq, "bz_setup \t\t: %d\n", sdpstats.sendmsg ? sdpstats.bz_setup_sum / sdpstats.sendmsg : 0); - seq_printf(seq, "tx_copy \t\t: %d\n", sdpstats.sendmsg ? sdpstats.tx_copy_sum / sdpstats.sendmsg : 0); - seq_printf(seq, "sendmsg \t\t: %d\n", sdpstats.sendmsg ? sdpstats.sendmsg_sum / sdpstats.sendmsg : 0); + seq_printf(seq, "bz_clean \t\t: %d\n", sdpstats.sendmsg ? + sdpstats.bz_clean_sum / sdpstats.sendmsg : 0); + seq_printf(seq, "bz_setup \t\t: %d\n", sdpstats.sendmsg ? + sdpstats.bz_setup_sum / sdpstats.sendmsg : 0); + seq_printf(seq, "tx_copy \t\t: %d\n", sdpstats.sendmsg ? + sdpstats.tx_copy_sum / sdpstats.sendmsg : 0); + seq_printf(seq, "sendmsg \t\t: %d\n", sdpstats.sendmsg ? + sdpstats.sendmsg_sum / sdpstats.sendmsg : 0); return 0; } @@ -285,7 +295,7 @@ static ssize_t sdpstats_write(struct file *file, const char __user *buf, size_t count, loff_t *offs) { memset(&sdpstats, 0, sizeof(sdpstats)); - printk("Cleared sdp statistics\n"); + printk(KERN_WARNING "Cleared sdp statistics\n"); return count; } @@ -308,9 +318,9 @@ static struct file_operations sdpstats_fops = { #ifdef SDP_PROFILING struct sdpprf_log sdpprf_log[SDPPRF_LOG_SIZE]; -int sdpprf_log_count = 0; +int sdpprf_log_count; -unsigned long long start_t = 0; +static unsigned long long start_t; static int sdpprf_show(struct seq_file *m, void *v) { @@ -325,12 +335,13 @@ static int sdpprf_show(struct seq_file *m, void *v) t = l->time - start_t; nsec_rem = do_div(t, 1000000000); - seq_printf(m, "%-6d: [%5lu.%06lu] %-50s - [%d{%d} %d:%d] skb: %p %s:%d\n", + seq_printf(m, "%-6d: [%5lu.%06lu] %-50s - [%d{%d} %d:%d] " + "skb: %p %s:%d\n", l->idx, (unsigned long)t, nsec_rem/1000, l->msg, l->pid, l->cpu, l->sk_num, l->sk_dport, l->skb, l->func, l->line); out: - return 0; + return 0; } static void *sdpprf_start(struct seq_file *p, loff_t *pos) @@ -341,12 +352,12 @@ static void *sdpprf_start(struct seq_file *p, loff_t *pos) if (!sdpprf_log_count) return SEQ_START_TOKEN; } - + if (*pos >= MIN(sdpprf_log_count, SDPPRF_LOG_SIZE - 1)) return NULL; if (sdpprf_log_count >= SDPPRF_LOG_SIZE - 1) { - int off = sdpprf_log_count & ( SDPPRF_LOG_SIZE - 1); + int off = sdpprf_log_count & (SDPPRF_LOG_SIZE - 1); idx = (idx + off) & (SDPPRF_LOG_SIZE - 1); } @@ -367,7 +378,7 @@ static void *sdpprf_next(struct seq_file *p, void *v, loff_t *pos) if (l - &sdpprf_log[0] >= SDPPRF_LOG_SIZE - 1) return &sdpprf_log[0]; - return l; + return l; } static void sdpprf_stop(struct seq_file *p, void *v) @@ -375,7 +386,7 @@ static void sdpprf_stop(struct seq_file *p, void *v) } -struct seq_operations sdpprf_ops = { +const struct seq_operations sdpprf_ops = { .start = sdpprf_start, .stop = sdpprf_stop, .next = sdpprf_next, @@ -388,11 +399,6 @@ static int sdpprf_open(struct inode *inode, struct file *file) res = seq_open(file, &sdpprf_ops); - if (sdpprf_log_count >= SDPPRF_LOG_SIZE - 1) { - printk("ATTENTION: performance log was wrapped around! count = %d\n", - sdpprf_log_count); - } - return res; } @@ -400,16 +406,16 @@ static ssize_t sdpprf_write(struct file *file, const char __user *buf, size_t count, loff_t *offs) { sdpprf_log_count = 0; - printk("Cleared sdpprf statistics\n"); + printk(KERN_INFO "Cleared sdpprf statistics\n"); return count; } -static struct file_operations sdpprf_fops = { - .open = sdpprf_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, +static const struct file_operations sdpprf_fops = { + .open = sdpprf_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, .write = sdpprf_write, }; #endif /* SDP_PROFILING */ @@ -435,16 +441,16 @@ int __init sdp_proc_init(void) #ifdef SDPSTATS_ON - sdpstats = proc_net_fops_create(&init_net, PROC_SDP_STATS, - S_IRUGO | S_IWUGO, &sdpstats_fops); + sdpstats = proc_net_fops_create(&init_net, PROC_SDP_STATS, + S_IRUGO | S_IWUGO, &sdpstats_fops); if (!sdpstats) goto no_mem; #endif #ifdef SDP_PROFILING - sdpprf = proc_net_fops_create(&init_net, PROC_SDP_PERF, - S_IRUGO | S_IWUGO, &sdpprf_fops); + sdpprf = proc_net_fops_create(&init_net, PROC_SDP_PERF, + S_IRUGO | S_IWUGO, &sdpprf_fops); if (!sdpprf) goto no_mem; #endif diff --git a/drivers/infiniband/ulp/sdp/sdp_rx.c b/drivers/infiniband/ulp/sdp/sdp_rx.c index e6e01b1fb7498..ed5c2ea1a62ca 100644 --- a/drivers/infiniband/ulp/sdp/sdp_rx.c +++ b/drivers/infiniband/ulp/sdp/sdp_rx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2009 Mellanox Technologies Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -28,8 +28,6 @@ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. - * - * $Id$ */ #include #include @@ -37,17 +35,22 @@ #include #include "sdp.h" -SDP_MODPARAM_INT(rcvbuf_initial_size, 32 * 1024, "Receive buffer initial size in bytes."); -SDP_MODPARAM_SINT(rcvbuf_scale, 0x10, "Receive buffer size scale factor."); -SDP_MODPARAM_SINT(top_mem_usage, 0, "Top system wide sdp memory usage for recv (in MB)."); +SDP_MODPARAM_INT(rcvbuf_initial_size, 32 * 1024, + "Receive buffer initial size in bytes."); +SDP_MODPARAM_SINT(rcvbuf_scale, 0x10, + "Receive buffer size scale factor."); +SDP_MODPARAM_SINT(top_mem_usage, 0, + "Top system wide sdp memory usage for recv (in MB)."); #ifdef CONFIG_PPC -SDP_MODPARAM_SINT(max_large_sockets, 100, "Max number of large sockets (32k buffers)."); +SDP_MODPARAM_SINT(max_large_sockets, 100, + "Max number of large sockets (32k buffers)."); #else -SDP_MODPARAM_SINT(max_large_sockets, 1000, "Max number of large sockets (32k buffers)."); +SDP_MODPARAM_SINT(max_large_sockets, 1000, + "Max number of large sockets (32k buffers)."); #endif -static int curr_large_sockets = 0; +static int curr_large_sockets; atomic_t sdp_current_mem_usage; spinlock_t sdp_large_sockets_lock; @@ -186,7 +189,7 @@ static int sdp_post_recv(struct sdp_sock *ssk) skb->truesize += frag->size; } - rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1)); + rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1)); rx_req->skb = skb; dev = ssk->ib_device; addr = ib_dma_map_single(dev, h, SDP_HEAD_SIZE, DMA_FROM_DEVICE); @@ -219,7 +222,7 @@ static int sdp_post_recv(struct sdp_sock *ssk) rc = ib_post_recv(ssk->qp, &rx_wr, &bad_wr); atomic_inc(&ssk->rx_ring.head); if (unlikely(rc)) { - sdp_warn(&ssk->isk.sk, "ib_post_recv failed with status %d\n", rc); + sdp_warn(&ssk->isk.sk, "ib_post_recv failed. status %d\n", rc); lock_sock(&ssk->isk.sk); sdp_reset(&ssk->isk.sk); @@ -241,24 +244,26 @@ static inline int sdp_post_recvs_needed(struct sdp_sock *ssk) int buffer_size = SDP_HEAD_SIZE + ssk->recv_frags * PAGE_SIZE; unsigned long max_bytes; - if (top_mem_usage && - (top_mem_usage * 0x100000) < atomic_read(&sdp_current_mem_usage) * PAGE_SIZE) + if (top_mem_usage && (top_mem_usage * 0x100000) < + atomic_read(&sdp_current_mem_usage) * PAGE_SIZE) { scale = 1; + } max_bytes = sk->sk_rcvbuf * scale; - if (unlikely(ring_posted(ssk->rx_ring) >= SDP_RX_SIZE)) { + if (unlikely(ring_posted(ssk->rx_ring) >= SDP_RX_SIZE)) return 0; - } if (likely(ring_posted(ssk->rx_ring) > SDP_MIN_TX_CREDITS)) { - unsigned long bytes_in_process = - (ring_posted(ssk->rx_ring) - SDP_MIN_TX_CREDITS) * buffer_size; + unsigned long bytes_in_process = + (ring_posted(ssk->rx_ring) - SDP_MIN_TX_CREDITS) * + buffer_size; bytes_in_process += rcv_nxt(ssk) - ssk->copied_seq; if (bytes_in_process >= max_bytes) { - sdp_prf(sk, NULL, "bytes_in_process:%ld > max_bytes:%ld", - bytes_in_process, max_bytes); + sdp_prf(sk, NULL, + "bytes_in_process:%ld > max_bytes:%ld", + bytes_in_process, max_bytes); return 0; } } @@ -268,9 +273,9 @@ static inline int sdp_post_recvs_needed(struct sdp_sock *ssk) static inline void sdp_post_recvs(struct sdp_sock *ssk) { -again: +again: while (sdp_post_recvs_needed(ssk)) { - if (sdp_post_recv(ssk)) + if (sdp_post_recv(ssk)) goto out; } @@ -325,15 +330,16 @@ int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size) trigger a HW error/rc to be interpreted as a IB_WC_LOC_LEN_ERR */ u32 max_size = (SDP_HEAD_SIZE + SDP_MAX_RECV_SKB_FRAGS * PAGE_SIZE) <= 32784 ? - (SDP_HEAD_SIZE + SDP_MAX_RECV_SKB_FRAGS * PAGE_SIZE): 32784; -#else + (SDP_HEAD_SIZE + SDP_MAX_RECV_SKB_FRAGS * PAGE_SIZE) : 32784; +#else u32 max_size = SDP_HEAD_SIZE + SDP_MAX_RECV_SKB_FRAGS * PAGE_SIZE; #endif if (new_size > curr_size && new_size <= max_size && sdp_get_large_socket(ssk)) { ssk->rcvbuf_scale = rcvbuf_scale; - ssk->recv_frags = PAGE_ALIGN(new_size - SDP_HEAD_SIZE) / PAGE_SIZE; + ssk->recv_frags = PAGE_ALIGN(new_size - SDP_HEAD_SIZE) / + PAGE_SIZE; if (ssk->recv_frags > SDP_MAX_RECV_SKB_FRAGS) ssk->recv_frags = SDP_MAX_RECV_SKB_FRAGS; return 0; @@ -341,7 +347,8 @@ int sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size) return -1; } -static void sdp_handle_resize_request(struct sdp_sock *ssk, struct sdp_chrecvbuf *buf) +static void sdp_handle_resize_request(struct sdp_sock *ssk, + struct sdp_chrecvbuf *buf) { if (sdp_resize_buffers(ssk, ntohl(buf->size)) == 0) ssk->recv_request_head = ring_head(ssk->rx_ring) + 1; @@ -350,7 +357,8 @@ static void sdp_handle_resize_request(struct sdp_sock *ssk, struct sdp_chrecvbuf ssk->recv_request = 1; } -static void sdp_handle_resize_ack(struct sdp_sock *ssk, struct sdp_chrecvbuf *buf) +static void sdp_handle_resize_ack(struct sdp_sock *ssk, + struct sdp_chrecvbuf *buf) { u32 new_size = ntohl(buf->size); @@ -391,7 +399,7 @@ static struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id) } dev = ssk->ib_device; - rx_req = &ssk->rx_ring.buffer[id & (SDP_RX_SIZE - 1)]; + rx_req = &ssk->rx_ring.buffer[id & (SDP_RX_SIZE - 1)]; skb = rx_req->skb; ib_dma_unmap_single(dev, rx_req->mapping[0], SDP_HEAD_SIZE, DMA_FROM_DEVICE); @@ -419,7 +427,7 @@ static int sdp_process_rx_ctl_skb(struct sdp_sock *ssk, struct sk_buff *skb) /* got data in RCV_SHUTDOWN */ if (sk->sk_state == TCP_FIN_WAIT1) { - sdp_warn(sk, "socket in shutdown, state = FIN_WAIT1 and got data\n"); + sdp_warn(sk, "RX data when state = FIN_WAIT1\n"); /* go into abortive close */ sdp_exch_state(sk, TCPF_FIN_WAIT1, TCP_TIME_WAIT); @@ -471,13 +479,13 @@ static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb) mseq_ack = ntohl(h->mseq_ack); credits_before = tx_credits(ssk); - atomic_set(&ssk->tx_ring.credits, mseq_ack - ring_head(ssk->tx_ring) + 1 + - ntohs(h->bufs)); + atomic_set(&ssk->tx_ring.credits, mseq_ack - ring_head(ssk->tx_ring) + + 1 + ntohs(h->bufs)); if (mseq_ack >= ssk->nagle_last_unacked) ssk->nagle_last_unacked = 0; sdp_prf1(&ssk->isk.sk, skb, "RX %s +%d c:%d->%d mseq:%d ack:%d", - mid2str(h->mid), ntohs(h->bufs), credits_before, + mid2str(h->mid), ntohs(h->bufs), credits_before, tx_credits(ssk), ntohl(h->mseq), ntohl(h->mseq_ack)); frags = skb_shinfo(skb)->nr_frags; @@ -493,8 +501,9 @@ static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb) sk_send_sigurg(sk);*/ skb_pull(skb, sizeof(struct sdp_bsdh)); - - if (h->mid != SDP_MID_DATA || unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) { + + if (h->mid != SDP_MID_DATA || + unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) { sdp_prf(sk, NULL, "Control skb - queing to control queue"); skb_queue_tail(&ssk->rx_ctl_q, skb); return 0; @@ -505,7 +514,8 @@ static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb) return 0; } - sdp_prf(sk, NULL, "queueing a %s skb", (h->mid == SDP_MID_DATA ? "data" : "disconnect")); + sdp_prf(sk, NULL, "queueing a %s skb", + (h->mid == SDP_MID_DATA ? "data" : "disconnect")); skb = sdp_sock_queue_rcv_skb(sk, skb); /* if (unlikely(h->flags & SDP_OOB_PRES)) @@ -515,13 +525,14 @@ static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb) } /* called only from irq */ -static struct sk_buff *sdp_process_rx_wc(struct sdp_sock *ssk, struct ib_wc *wc) +static struct sk_buff *sdp_process_rx_wc(struct sdp_sock *ssk, + struct ib_wc *wc) { struct sk_buff *skb; struct sdp_bsdh *h; struct sock *sk = &ssk->isk.sk; int mseq; - + skb = sdp_recv_completion(ssk, wc->wr_id); if (unlikely(!skb)) return NULL; @@ -580,9 +591,10 @@ static void sdp_bzcopy_write_space(struct sdp_sock *ssk) struct socket *sock = sk->sk_socket; if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { - sdp_prf1(&ssk->isk.sk, NULL, "credits: %d, min_bufs: %d. tx_head: %d, tx_tail: %d", - tx_credits(ssk), ssk->min_bufs, - ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring)); + sdp_prf1(&ssk->isk.sk, NULL, "credits: %d, min_bufs: %d. " + "tx_head: %d, tx_tail: %d", + tx_credits(ssk), ssk->min_bufs, + ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring)); } if (tx_credits(ssk) >= ssk->min_bufs && sock != NULL) { @@ -628,7 +640,8 @@ static int sdp_poll_rx_cq(struct sdp_sock *ssk) void sdp_rx_comp_work(struct work_struct *work) { - struct sdp_sock *ssk = container_of(work, struct sdp_sock, rx_comp_work); + struct sdp_sock *ssk = container_of(work, struct sdp_sock, + rx_comp_work); struct sock *sk = &ssk->isk.sk; sdp_prf(sk, NULL, "%s", __func__); @@ -644,8 +657,6 @@ void sdp_rx_comp_work(struct work_struct *work) if (unlikely(!ssk->poll_cq)) { struct rdma_cm_id *id = ssk->id; - sdp_warn(sk, "poll cq is 0. socket was reset or wasn't initialized\n"); - sdp_prf(sk, NULL, "poll cq is 0"); if (id && id->qp) rdma_notify(id, RDMA_CM_EVENT_ESTABLISHED); return; @@ -664,14 +675,11 @@ void sdp_do_posts(struct sdp_sock *ssk) int xmit_poll_force; struct sk_buff *skb; - while ((skb = skb_dequeue(&ssk->rx_ctl_q))) { + while ((skb = skb_dequeue(&ssk->rx_ctl_q))) sdp_process_rx_ctl_skb(ssk, skb); - } - if (sk->sk_state == TCP_TIME_WAIT) { - sdp_prf(sk, NULL, "in TIMEWAIT. qp=%p cq=%p", ssk->qp, ssk->rx_ring.cq); + if (sk->sk_state == TCP_TIME_WAIT) return; - } if (!ssk->rx_ring.cq || !ssk->tx_ring.cq) return; @@ -685,7 +693,8 @@ void sdp_do_posts(struct sdp_sock *ssk) sk_stream_mem_reclaim(sk); - xmit_poll_force = sk->sk_write_pending && (tx_credits(ssk) > SDP_MIN_TX_CREDITS); + xmit_poll_force = sk->sk_write_pending && + (tx_credits(ssk) > SDP_MIN_TX_CREDITS); if (credit_update_needed(ssk) || xmit_poll_force) { /* if has pending tx because run out of tx_credits - xmit it */ @@ -720,9 +729,6 @@ static void sdp_rx_irq(struct ib_cq *cq, void *cq_context) credits_before = tx_credits(ssk); - if (unlikely(!ssk->poll_cq)) - sdp_warn(sk, "poll cq is 0. socket was reset or wasn't initialized\n"); - if (!ssk->rx_ring.cq) { sdp_warn(&ssk->isk.sk, "WARNING: rx irq after cq destroyed\n"); @@ -736,15 +742,15 @@ static void sdp_rx_irq(struct ib_cq *cq, void *cq_context) sdp_prf(&ssk->isk.sk, NULL, "credits: %d -> %d", credits_before, tx_credits(ssk)); - if (posts_handler(ssk) || - (sk->sk_socket && test_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags))) { + if (posts_handler(ssk) || (sk->sk_socket && + test_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags))) { - sdp_prf(&ssk->isk.sk, NULL, "Somebody is doing the post work for me. %d", + sdp_prf(&ssk->isk.sk, NULL, + "Somebody is doing the post work for me. %d", posts_handler(ssk)); } else { - sdp_prf(&ssk->isk.sk, NULL, "Queuing work. others: %d, ctl_q: %d", - posts_handler(ssk), + sdp_prf(&ssk->isk.sk, NULL, "Queuing work. ctl_q: %d", !skb_queue_empty(&ssk->rx_ctl_q)); queue_work(rx_comp_wq, &ssk->rx_comp_work); } @@ -788,18 +794,21 @@ int sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device) atomic_set(&ssk->rx_ring.head, 1); atomic_set(&ssk->rx_ring.tail, 1); - ssk->rx_ring.buffer = kmalloc(sizeof *ssk->rx_ring.buffer * SDP_RX_SIZE, - GFP_KERNEL); + ssk->rx_ring.buffer = kmalloc( + sizeof *ssk->rx_ring.buffer * SDP_RX_SIZE, GFP_KERNEL); if (!ssk->rx_ring.buffer) { rc = -ENOMEM; - sdp_warn(&ssk->isk.sk, "Unable to allocate RX Ring size %zd.\n", + sdp_warn(&ssk->isk.sk, + "Unable to allocate RX Ring size %zd.\n", sizeof(*ssk->rx_ring.buffer) * SDP_RX_SIZE); goto out; } + /* TODO: use vector=IB_CQ_VECTOR_LEAST_ATTACHED when implemented + * in ofed-1.5 */ rx_cq = ib_create_cq(device, sdp_rx_irq, sdp_rx_cq_event_handler, - &ssk->isk.sk, SDP_RX_SIZE, 0/*IB_CQ_VECTOR_LEAST_ATTACHED*/); + &ssk->isk.sk, SDP_RX_SIZE, 0); if (IS_ERR(rx_cq)) { rc = PTR_ERR(rx_cq); diff --git a/drivers/infiniband/ulp/sdp/sdp_tx.c b/drivers/infiniband/ulp/sdp/sdp_tx.c index 6fc4746b66967..d53e838fd1704 100644 --- a/drivers/infiniband/ulp/sdp/sdp_tx.c +++ b/drivers/infiniband/ulp/sdp/sdp_tx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2009 Mellanox Technologies Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -28,8 +28,6 @@ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. - * - * $Id$ */ #include #include @@ -39,11 +37,12 @@ #define sdp_cnt(var) do { (var)++; } while (0) -SDP_MODPARAM_SINT(sdp_keepalive_probes_sent, 0, "Total number of keepalive probes sent."); +SDP_MODPARAM_SINT(sdp_keepalive_probes_sent, 0, + "Total number of keepalive probes sent."); static int sdp_process_tx_cq(struct sdp_sock *ssk); -int _sdp_xmit_poll(const char *func, int line, struct sdp_sock *ssk, int force) +int sdp_xmit_poll(struct sdp_sock *ssk, int force) { int wc_processed = 0; @@ -55,15 +54,14 @@ int _sdp_xmit_poll(const char *func, int line, struct sdp_sock *ssk, int force) mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT); /* Poll the CQ every SDP_TX_POLL_MODER packets */ - if (force || (++ssk->tx_ring.poll_cnt & (SDP_TX_POLL_MODER - 1)) == 0) { + if (force || (++ssk->tx_ring.poll_cnt & (SDP_TX_POLL_MODER - 1)) == 0) wc_processed = sdp_process_tx_cq(ssk); - sdp_prf(&ssk->isk.sk, NULL, "processed %d wc's. inflight=%d", wc_processed, - ring_posted(ssk->tx_ring)); - } - return wc_processed; + return wc_processed; } +static unsigned long last_send; + void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid) { struct sdp_buf *tx_req; @@ -73,6 +71,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid) u64 addr; struct ib_device *dev; struct ib_send_wr *bad_wr; + int delta; struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1]; struct ib_sge *sge = ibsge; @@ -96,7 +95,8 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid) h->mseq_ack = htonl(mseq_ack(ssk)); sdp_prf1(&ssk->isk.sk, skb, "TX: %s bufs: %d mseq:%ld ack:%d", - mid2str(mid), ring_posted(ssk->rx_ring), mseq, ntohl(h->mseq_ack)); + mid2str(mid), ring_posted(ssk->rx_ring), mseq, + ntohl(h->mseq_ack)); SDP_DUMP_PACKET(&ssk->isk.sk, "TX", skb, h); @@ -135,22 +135,19 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid) tx_wr.send_flags = IB_SEND_SIGNALED; if (unlikely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG)) tx_wr.send_flags |= IB_SEND_SOLICITED; - - { - static unsigned long last_send = 0; - int delta = jiffies - last_send; - - if (likely(last_send)) - SDPSTATS_HIST(send_interval, delta); - - last_send = jiffies; - } + + delta = jiffies - last_send; + if (likely(last_send)) + SDPSTATS_HIST(send_interval, delta); + last_send = jiffies; + rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr); atomic_inc(&ssk->tx_ring.head); atomic_dec(&ssk->tx_ring.credits); atomic_set(&ssk->remote_credits, ring_posted(ssk->rx_ring)); if (unlikely(rc)) { - sdp_dbg(&ssk->isk.sk, "ib_post_send failed with status %d.\n", rc); + sdp_dbg(&ssk->isk.sk, + "ib_post_send failed with status %d.\n", rc); sdp_set_error(&ssk->isk.sk, -ECONNRESET); wake_up(&ssk->wq); } @@ -171,7 +168,7 @@ static struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq) } dev = ssk->ib_device; - tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)]; + tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)]; skb = tx_req->skb; ib_dma_unmap_single(dev, tx_req->mapping[0], skb->len - skb->data_len, DMA_TO_DEVICE); @@ -217,11 +214,12 @@ static int sdp_handle_send_comp(struct sdp_sock *ssk, struct ib_wc *wc) } } - { - struct sdp_bsdh *h = (struct sdp_bsdh *)skb->data; - sdp_prf1(&ssk->isk.sk, skb, "tx completion. mseq:%d", ntohl(h->mseq)); - } - +#ifdef SDP_PROFILING +{ + struct sdp_bsdh *h = (struct sdp_bsdh *)skb->data; + sdp_prf1(&ssk->isk.sk, skb, "tx completion. mseq:%d", ntohl(h->mseq)); +} +#endif sk_wmem_free_skb(&ssk->isk.sk, skb); return 0; @@ -257,10 +255,10 @@ static int sdp_process_tx_cq(struct sdp_sock *ssk) int wc_processed = 0; if (!ssk->tx_ring.cq) { - sdp_warn(&ssk->isk.sk, "WARNING: tx irq when tx_cq is destroyed\n"); + sdp_warn(&ssk->isk.sk, "tx irq on destroyed tx_cq\n"); return 0; } - + do { n = ib_poll_cq(ssk->tx_ring.cq, SDP_NUM_WC, ibwc); for (i = 0; i < n; ++i) { @@ -279,7 +277,7 @@ static int sdp_process_tx_cq(struct sdp_sock *ssk) sk_stream_write_space(&ssk->isk.sk); } - return wc_processed; + return wc_processed; } static void sdp_poll_tx_timeout(unsigned long data) @@ -307,8 +305,6 @@ static void sdp_poll_tx_timeout(unsigned long data) goto out; wc_processed = sdp_process_tx_cq(ssk); - sdp_prf(&ssk->isk.sk, NULL, "processed %d wc's. inflight=%d", wc_processed, - ring_posted(ssk->tx_ring)); if (!wc_processed) SDPSTATS_COUNTER_INC(tx_poll_miss); else @@ -366,7 +362,8 @@ void sdp_post_keepalive(struct sdp_sock *ssk) rc = ib_post_send(ssk->qp, &wr, &bad_wr); if (rc) { - sdp_dbg(&ssk->isk.sk, "ib_post_keepalive failed with status %d.\n", rc); + sdp_dbg(&ssk->isk.sk, + "ib_post_keepalive failed with status %d.\n", rc); sdp_set_error(&ssk->isk.sk, -ECONNRESET); wake_up(&ssk->wq); } @@ -386,11 +383,11 @@ int sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device) atomic_set(&ssk->tx_ring.head, 1); atomic_set(&ssk->tx_ring.tail, 1); - ssk->tx_ring.buffer = kmalloc(sizeof *ssk->tx_ring.buffer * SDP_TX_SIZE, - GFP_KERNEL); + ssk->tx_ring.buffer = kmalloc( + sizeof *ssk->tx_ring.buffer * SDP_TX_SIZE, GFP_KERNEL); if (!ssk->tx_ring.buffer) { rc = -ENOMEM; - sdp_warn(&ssk->isk.sk, "Unable to allocate TX Ring size %zd.\n", + sdp_warn(&ssk->isk.sk, "Can't allocate TX Ring size %zd.\n", sizeof(*ssk->tx_ring.buffer) * SDP_TX_SIZE); goto out;