]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sdp: better sequence-number handling and cosmetic updates
authorEldad Zinger <eldadz@mellanox.co.il>
Tue, 28 Sep 2010 10:21:58 +0000 (12:21 +0200)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:05:23 +0000 (05:05 -0700)
Signed-off-by: Eldad Zinger <eldadz@mellanox.co.il>
drivers/infiniband/ulp/sdp/sdp.h
drivers/infiniband/ulp/sdp/sdp_bcopy.c
drivers/infiniband/ulp/sdp/sdp_cma.c
drivers/infiniband/ulp/sdp/sdp_rx.c
drivers/infiniband/ulp/sdp/sdp_zcopy.c

index fef5a11de20011cf38262372577d4dbe976c8581..1f916c247b413afd39f5102989dab52c1937f70b 100644 (file)
@@ -23,7 +23,6 @@
 
 #define SDP_SRCAVAIL_CANCEL_TIMEOUT (HZ * 5)
 #define SDP_SRCAVAIL_ADV_TIMEOUT (1 * HZ)
-#define SDP_SRCAVAIL_PAYLOAD_LEN 1
 
 #define SDP_RESOLVE_TIMEOUT 1000
 #define SDP_ROUTE_TIMEOUT 1000
@@ -65,7 +64,6 @@
 #define SDP_BZCOPY_POLL_TIMEOUT (HZ / 10)
 
 #define SDP_AUTO_CONF  0xffff
-#define AUTO_MOD_DELAY (HZ / 4)
 
 struct sdp_skb_cb {
        __u32           seq;            /* Starting sequence number     */
@@ -290,7 +288,7 @@ struct sdp_tx_ring {
        atomic_t                tail;
        struct ib_cq            *cq;
 
-       int                     una_seq;
+       u32                     una_seq;
        atomic_t                credits;
 #define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
 
@@ -376,7 +374,7 @@ struct sdp_sock {
 #define rcv_nxt(ssk) atomic_read(&(ssk->rcv_nxt))
        atomic_t rcv_nxt;
 
-       int write_seq;
+       u32 write_seq;
        int xmit_size_goal;
        int nonagle;
 
@@ -408,7 +406,7 @@ struct sdp_sock {
        unsigned max_bufs;      /* Initial buffers offered by other side */
        unsigned min_bufs;      /* Low water mark to wake senders */
 
-       unsigned long nagle_last_unacked; /* mseq of lastest unacked packet */
+       u32 nagle_last_unacked; /* mseq of lastest unacked packet */
        struct timer_list nagle_timer; /* timeout waiting for ack */
 
        atomic_t               remote_credits;
index 9ea92ee27c1aa268654caa571929b077bf587d5c..c8b966dbda58ae67c184e801b2e5a629af82fa4c 100644 (file)
@@ -137,7 +137,7 @@ static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb)
                        sdp_dbg_data(&ssk->isk.sk, "Starting nagle timer\n");
                }
        }
-       sdp_dbg_data(&ssk->isk.sk, "send_now = %d last_unacked = %ld\n",
+       sdp_dbg_data(&ssk->isk.sk, "send_now = %d last_unacked = %u\n",
                send_now, ssk->nagle_last_unacked);
 
        return send_now;
@@ -149,7 +149,7 @@ void sdp_nagle_timeout(unsigned long data)
        struct sock *sk = &ssk->isk.sk;
 
        SDPSTATS_COUNTER_INC(nagle_timer);
-       sdp_dbg_data(sk, "last_unacked = %ld\n", ssk->nagle_last_unacked);
+       sdp_dbg_data(sk, "last_unacked = %u\n", ssk->nagle_last_unacked);
 
        if (!ssk->nagle_last_unacked)
                goto out2;
@@ -170,7 +170,7 @@ void sdp_nagle_timeout(unsigned long data)
        sdp_post_sends(ssk, GFP_ATOMIC);
 
        if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
-               sk_stream_write_space(&ssk->isk.sk);
+               sk_stream_write_space(sk);
 out:
        bh_unlock_sock(sk);
 out2:
@@ -194,17 +194,16 @@ void sdp_post_sends(struct sdp_sock *ssk, gfp_t gfp)
        struct sock *sk = &ssk->isk.sk;
 
        if (unlikely(!ssk->id)) {
-               if (ssk->isk.sk.sk_send_head) {
-                       sdp_dbg(&ssk->isk.sk,
-                               "Send on socket without cmid ECONNRESET.\n");
+               if (sk->sk_send_head) {
+                       sdp_dbg(sk, "Send on socket without cmid ECONNRESET\n");
                        /* TODO: flush send queue? */
-                       sdp_reset(&ssk->isk.sk);
+                       sdp_reset(sk);
                }
                return;
        }
 again:
        if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2)
-               sdp_xmit_poll(ssk,  1);
+               sdp_xmit_poll(ssk, 1);
 
        /* Run out of credits, check if got a credit update */
        if (unlikely(tx_credits(ssk) <= SDP_MIN_TX_CREDITS)) {
@@ -229,17 +228,17 @@ again:
 
        if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS &&
               sdp_tx_ring_slots_left(ssk) &&
-              ssk->isk.sk.sk_send_head &&
-               sdp_nagle_off(ssk, ssk->isk.sk.sk_send_head)) {
+              sk->sk_send_head &&
+               sdp_nagle_off(ssk, sk->sk_send_head)) {
                SDPSTATS_COUNTER_INC(send_miss_no_credits);
        }
 
        while (tx_credits(ssk) > SDP_MIN_TX_CREDITS &&
               sdp_tx_ring_slots_left(ssk) &&
-              (skb = ssk->isk.sk.sk_send_head) &&
+              (skb = sk->sk_send_head) &&
                sdp_nagle_off(ssk, skb)) {
-               update_send_head(&ssk->isk.sk, skb);
-               __skb_dequeue(&ssk->isk.sk.sk_write_queue);
+               update_send_head(sk, skb);
+               __skb_dequeue(&sk->sk_write_queue);
 
                sdp_post_send(ssk, skb);
 
@@ -247,10 +246,10 @@ again:
        }
 
        if (credit_update_needed(ssk) &&
-           likely((1 << ssk->isk.sk.sk_state) &
+           likely((1 << sk->sk_state) &
                    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
 
-               skb = sdp_alloc_skb_data(&ssk->isk.sk, 0, gfp);
+               skb = sdp_alloc_skb_data(sk, 0, gfp);
                if (likely(skb)) {
                        sdp_post_send(ssk, skb);
                        SDPSTATS_COUNTER_INC(post_send_credits);
@@ -264,7 +263,7 @@ again:
         * messages that provide additional credits and also do not contain ULP
         * payload. */
        if (unlikely(ssk->sdp_disconnect) &&
-                       !ssk->isk.sk.sk_send_head &&
+                       !sk->sk_send_head &&
                        tx_credits(ssk) > 1) {
                skb = sdp_alloc_skb_disconnect(sk, gfp);
                if (likely(skb)) {
index 950e46d374de0230bd81b96d0bea190262255be9..b255b4bd6e7462bdbdb3b59f872477edf0fa303a 100644 (file)
 SDP_MODPARAM_SINT(sdp_link_layer_ib_only, 1, "Support only link layer of "
                "type Infiniband");
 
-enum {
-       SDP_HH_SIZE = 76,
-       SDP_HAH_SIZE = 180,
-};
-
 static void sdp_qp_event_handler(struct ib_event *event, void *data)
 {
 }
@@ -385,7 +380,7 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
                conn_param.responder_resources = 4 /* TODO */;
                conn_param.initiator_depth = 4 /* TODO */;
                conn_param.retry_count = SDP_RETRY_COUNT;
-               SDP_DUMP_PACKET(NULL, "TX", NULL, &hh.bsdh);
+               SDP_DUMP_PACKET(sk, "TX", NULL, &hh.bsdh);
                rc = rdma_connect(id, &conn_param);
                break;
        case RDMA_CM_EVENT_ROUTE_ERROR:
index a65cb4ba722a1fb97529d8438d5fa9df7aaa21f4..f7db62cc7086e48a8f5a87510c642d37b08b2618 100644 (file)
@@ -558,7 +558,7 @@ static int sdp_process_rx_skb(struct sdp_sock *ssk, struct sk_buff *skb)
        credits_before = tx_credits(ssk);
        atomic_set(&ssk->tx_ring.credits, mseq_ack - ring_head(ssk->tx_ring) +
                        1 + ntohs(h->bufs));
-       if (mseq_ack >= ssk->nagle_last_unacked)
+       if (!before(mseq_ack, ssk->nagle_last_unacked))
                ssk->nagle_last_unacked = 0;
 
        sdp_prf1(&ssk->isk.sk, skb, "RX: %s +%d c:%d->%d mseq:%d ack:%d",
index f86907b9ff8446e7524b04aa3f6ba6f62f9636cc..5f975c578f19e0f2d3b7434150b0b89e5b1e8de0 100644 (file)
@@ -338,7 +338,7 @@ void sdp_handle_sendsm(struct sdp_sock *ssk, u32 mseq_ack)
                goto out;
        }
 
-       if (ssk->tx_sa->mseq > mseq_ack) {
+       if (after(ssk->tx_sa->mseq, mseq_ack)) {
                sdp_dbg_data(sk, "SendSM arrived for old SrcAvail. "
                        "SendSM mseq_ack: 0x%x, SrcAvail mseq: 0x%x\n",
                        mseq_ack, ssk->tx_sa->mseq);
@@ -371,7 +371,7 @@ void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
                goto out;
        }
 
-       if (ssk->tx_sa->mseq > mseq_ack) {
+       if (after(ssk->tx_sa->mseq, mseq_ack)) {
                sdp_dbg_data(sk, "RdmaRdCompl arrived for old SrcAvail. "
                        "SendSM mseq_ack: 0x%x, SrcAvail mseq: 0x%x\n",
                        mseq_ack, ssk->tx_sa->mseq);
@@ -385,7 +385,6 @@ void sdp_handle_rdma_read_compl(struct sdp_sock *ssk, u32 mseq_ack,
 
 out:
        spin_unlock_irqrestore(&ssk->tx_sa_lock, flags);
-       return;
 }
 
 static unsigned long sdp_get_max_memlockable_bytes(unsigned long offset)