]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sdp: move tx_ring into dedicated structre + many cosmetic fixes
authorAmir Vadai <amirv@mellanox.co.il>
Tue, 21 Apr 2009 13:56:37 +0000 (16:56 +0300)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:04:27 +0000 (05:04 -0700)
Signed-off-by: Amir Vadai <amirv@mellanox.co.il>
drivers/infiniband/ulp/sdp/sdp.h
drivers/infiniband/ulp/sdp/sdp_bcopy.c
drivers/infiniband/ulp/sdp/sdp_cma.c
drivers/infiniband/ulp/sdp/sdp_main.c
drivers/infiniband/ulp/sdp/sdp_proc.c

index 8b67d49b0104e22523aa556e545f25bb4509b138..cbef0eb03736a7a1d58872d273c8ae16fae832df 100644 (file)
@@ -234,6 +234,22 @@ struct sdp_buf {
         u64             mapping[SDP_MAX_SEND_SKB_FRAGS + 1];
 };
 
+
+struct sdp_tx_ring {
+       struct sdp_buf   *buffer;
+       unsigned          head;
+       unsigned          tail;
+
+       int               una_seq;
+       unsigned          credits;
+       u16               poll_cnt;
+};
+
+static inline int sdp_tx_ring_slots_left(struct sdp_tx_ring *tx_ring)
+{
+       return SDP_TX_SIZE - (tx_ring->head - tx_ring->tail);
+}
+
 struct sdp_sock {
        /* sk has to be the first member of inet_sock */
        struct inet_sock isk;
@@ -255,7 +271,6 @@ struct sdp_sock {
        u32 rcv_nxt;
 
        int write_seq;
-       int snd_una;
        int pushed_seq;
        int xmit_size_goal;
        int nonagle;
@@ -274,13 +289,8 @@ struct sdp_sock {
        int sdp_disconnect;
        int destruct_in_process;
 
-       struct sdp_buf *rx_ring;
-       struct sdp_buf   *tx_ring;
+       
 
-       /* rdma specific */
-       struct ib_qp *qp;
-       struct ib_cq *cq;
-       struct ib_mr *mr;
        /* Data below will be reset on error */
        struct rdma_cm_id *id;
        struct ib_device *ib_device;
@@ -290,15 +300,19 @@ struct sdp_sock {
        unsigned rx_head;
        unsigned rx_tail;
        unsigned mseq_ack;
-       unsigned tx_credits;
        unsigned max_bufs;      /* Initial buffers offered by other side */
        unsigned min_bufs;      /* Low water mark to wake senders */
 
        int               remote_credits;
        int               poll_cq;
 
-       unsigned          tx_head;
-       unsigned          tx_tail;
+       /* rdma specific */
+       struct ib_qp *qp;
+       struct ib_cq *cq;
+       struct ib_mr *mr;
+
+       struct sdp_buf *rx_ring;
+       struct sdp_tx_ring tx_ring;
        struct ib_send_wr tx_wr;
 
        /* SDP slow start */
@@ -317,7 +331,6 @@ struct sdp_sock {
        int   zcopy_thresh;
 
        struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1];
-       struct ib_wc  ibwc[SDP_NUM_WC];
 };
 
 /* Context used for synchronous zero copy bcopy (BZCOY) */
@@ -336,7 +349,7 @@ struct bzcopy_state {
 extern int rcvbuf_initial_size;
 
 extern struct proto sdp_proto;
-extern struct workqueue_struct *sdp_workqueue;
+extern struct workqueue_struct *comp_wq;
 
 extern atomic_t sdp_current_mem_usage;
 extern spinlock_t sdp_large_sockets_lock;
@@ -427,7 +440,12 @@ static inline void sdp_set_error(struct sock *sk, int err)
        sk->sk_error_report(sk);
 }
 
-extern struct workqueue_struct *sdp_workqueue;
+static inline void sdp_arm_cq(struct sock *sk)
+{
+       sdp_dbg_data(sk, "ib_req_notify_cq on cq\n");
+       
+       ib_req_notify_cq(sdp_sk(sk)->cq, IB_CQ_NEXT_COMP);
+}
 
 #ifdef CONFIG_INFINIBAND_SDP_DEBUG_DATA
 void dump_packet(struct sock *sk, char *str, struct sk_buff *skb, const struct sdp_bsdh *h);
index 205b09583c861439db411a5a25499432f24ec8b9..9f7f4a07f8913a0a31acd5057bafb66f1231247a 100644 (file)
@@ -248,7 +248,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
 {
        struct sdp_buf *tx_req;
        struct sdp_bsdh *h = (struct sdp_bsdh *)skb_push(skb, sizeof *h);
-       unsigned mseq = ssk->tx_head;
+       unsigned mseq = ssk->tx_ring.head;
        int i, rc, frags;
        u64 addr;
        struct ib_device *dev;
@@ -270,7 +270,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
        h->mseq_ack = htonl(ssk->mseq_ack);
 
        SDP_DUMP_PACKET(&ssk->isk.sk, "TX", skb, h);
-       tx_req = &ssk->tx_ring[mseq & (SDP_TX_SIZE - 1)];
+       tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
        tx_req->skb = skb;
        dev = ssk->ib_device;
        sge = ssk->ibsge;
@@ -299,7 +299,7 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
        }
 
        ssk->tx_wr.next = NULL;
-       ssk->tx_wr.wr_id = ssk->tx_head | SDP_OP_SEND;
+       ssk->tx_wr.wr_id = ssk->tx_ring.head | SDP_OP_SEND;
        ssk->tx_wr.sg_list = ssk->ibsge;
        ssk->tx_wr.num_sge = frags + 1;
        ssk->tx_wr.opcode = IB_WR_SEND;
@@ -317,8 +317,8 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid)
                last_send = jiffies;
        }
        rc = ib_post_send(ssk->qp, &ssk->tx_wr, &bad_wr);
-       ++ssk->tx_head;
-       --ssk->tx_credits;
+       ++ssk->tx_ring.head;
+       --ssk->tx_ring.credits;
        ssk->remote_credits = ssk->rx_head - ssk->rx_tail;
        if (unlikely(rc)) {
                sdp_dbg(&ssk->isk.sk, "ib_post_send failed with status %d.\n", rc);
@@ -331,18 +331,18 @@ struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
 {
        struct ib_device *dev;
        struct sdp_buf *tx_req;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        struct bzcopy_state *bz;
        int i, frags;
-
-       if (unlikely(mseq != ssk->tx_tail)) {
+       struct sdp_tx_ring *tx_ring = &ssk->tx_ring;
+       if (unlikely(mseq != tx_ring->tail)) {
                printk(KERN_WARNING "Bogus send completion id %d tail %d\n",
-                       mseq, ssk->tx_tail);
-               return NULL;
+                       mseq, tx_ring->tail);
+               goto out;
        }
 
        dev = ssk->ib_device;
-        tx_req = &ssk->tx_ring[mseq & (SDP_TX_SIZE - 1)];
+        tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)];
        skb = tx_req->skb;
        ib_dma_unmap_single(dev, tx_req->mapping[0], skb->len - skb->data_len,
                            DMA_TO_DEVICE);
@@ -353,14 +353,16 @@ struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
                                  DMA_TO_DEVICE);
        }
 
-       ssk->snd_una += TCP_SKB_CB(skb)->end_seq;
-       ++ssk->tx_tail;
+       tx_ring->una_seq += TCP_SKB_CB(skb)->end_seq;
 
        /* TODO: AIO and real zcopy cdoe; add their context support here */
        bz = BZCOPY_STATE(skb);
        if (bz)
                bz->busy--;
 
+       ++tx_ring->tail;
+
+out:
        return skb;
 }
 
@@ -555,15 +557,15 @@ static inline int sdp_nagle_off(struct sdp_sock *ssk, struct sk_buff *skb)
        return (ssk->nonagle & TCP_NAGLE_OFF) ||
                skb->next != (struct sk_buff *)&ssk->isk.sk.sk_write_queue ||
                skb->len + sizeof(struct sdp_bsdh) >= ssk->xmit_size_goal ||
-               (ssk->tx_tail == ssk->tx_head &&
+               (ssk->tx_ring.tail == ssk->tx_ring.head &&
                 !(ssk->nonagle & TCP_NAGLE_CORK)) ||
                (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_PSH);
 }
 
 int sdp_post_credits(struct sdp_sock *ssk)
 {
-       if (likely(ssk->tx_credits > 1) &&
-           likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE)) {
+       if (likely(ssk->tx_ring.credits > 1) &&
+           likely(sdp_tx_ring_slots_left(&ssk->tx_ring))) {
                struct sk_buff *skb;
                skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh),
@@ -599,8 +601,8 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
 
        if (ssk->recv_request &&
            ssk->rx_tail >= ssk->recv_request_head &&
-           ssk->tx_credits >= SDP_MIN_TX_CREDITS &&
-           ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
+           ssk->tx_ring.credits >= SDP_MIN_TX_CREDITS &&
+           ssk->tx_ring.head - ssk->tx_ring.tail < SDP_TX_SIZE) {
                struct sdp_chrecvbuf *resp_size;
                ssk->recv_request = 0;
                skb = sdp_stream_alloc_skb(&ssk->isk.sk,
@@ -614,8 +616,19 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
                sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF_ACK);
        }
 
-       while (ssk->tx_credits > SDP_MIN_TX_CREDITS &&
-              ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE &&
+       if (ssk->tx_ring.credits <= SDP_MIN_TX_CREDITS &&
+              sdp_tx_ring_slots_left(&ssk->tx_ring) &&
+              (skb = ssk->isk.sk.sk_send_head) &&
+               sdp_nagle_off(ssk, skb)) {
+               SDPSTATS_COUNTER_INC(send_miss_no_credits);
+       }
+
+       sdp_dbg_data(&ssk->isk.sk, "credits: %d tx ring slots left: %d send_head: %p\n",
+               ssk->tx_ring.credits, sdp_tx_ring_slots_left(&ssk->tx_ring),
+               ssk->isk.sk.sk_send_head);
+
+       while (ssk->tx_ring.credits > SDP_MIN_TX_CREDITS &&
+              sdp_tx_ring_slots_left(&ssk->tx_ring) &&
               (skb = ssk->isk.sk.sk_send_head) &&
                sdp_nagle_off(ssk, skb)) {
                update_send_head(&ssk->isk.sk, skb);
@@ -623,10 +636,10 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
                sdp_post_send(ssk, skb, SDP_MID_DATA);
        }
 
-       if (ssk->tx_credits == SDP_MIN_TX_CREDITS &&
+       if (ssk->tx_ring.credits == SDP_MIN_TX_CREDITS &&
            !ssk->sent_request &&
-           ssk->tx_head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
-           ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) {
+           ssk->tx_ring.head > ssk->sent_request_head + SDP_RESIZE_WAIT &&
+           ssk->tx_ring.head - ssk->tx_ring.tail < SDP_TX_SIZE) {
                struct sdp_chrecvbuf *req_size;
                skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh) +
@@ -635,7 +648,7 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
                /* FIXME */
                BUG_ON(!skb);
                ssk->sent_request = SDP_MAX_SEND_SKB_FRAGS * PAGE_SIZE;
-               ssk->sent_request_head = ssk->tx_head;
+               ssk->sent_request_head = ssk->tx_ring.head;
                req_size = (struct sdp_chrecvbuf *)skb_put(skb, sizeof *req_size);
                req_size->size = htonl(ssk->sent_request);
                sdp_post_send(ssk, skb, SDP_MID_CHRCVBUF);
@@ -646,8 +659,8 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
                c *= 2;
 
        if (unlikely(c < ssk->rx_head - ssk->rx_tail) &&
-           likely(ssk->tx_credits > 1) &&
-           likely(ssk->tx_head - ssk->tx_tail < SDP_TX_SIZE) &&
+           likely(ssk->tx_ring.credits > 1) &&
+           likely(ssk->tx_ring.head - ssk->tx_ring.tail < SDP_TX_SIZE) &&
            likely((1 << ssk->isk.sk.sk_state) &
                    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1))) {
                skb = sdp_stream_alloc_skb(&ssk->isk.sk,
@@ -659,9 +672,14 @@ void sdp_post_sends(struct sdp_sock *ssk, int nonagle)
                sdp_post_send(ssk, skb, SDP_MID_DATA);
        }
 
+       /* send DisConn if needed
+        * Do not send DisConn if there is only 1 credit. Compliance with CA4-82:
+        * If one credit is available, an implementation shall only send SDP
+        * messages that provide additional credits and also do not contain ULP
+        * payload. */
        if (unlikely(ssk->sdp_disconnect) &&
                !ssk->isk.sk.sk_send_head &&
-               ssk->tx_credits > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
+               ssk->tx_ring.credits > (ssk->remote_credits >= ssk->rx_head - ssk->rx_tail)) {
                ssk->sdp_disconnect = 0;
                skb = sdp_stream_alloc_skb(&ssk->isk.sk,
                                          sizeof(struct sdp_bsdh),
@@ -781,8 +799,8 @@ static int sdp_handle_recv_comp(struct sdp_sock *ssk, struct ib_wc *wc)
                printk(KERN_WARNING "SDP BUG! mseq %d != wrid %d\n",
                                ssk->mseq_ack, (int)wc->wr_id);
 
-       SDPSTATS_HIST_LINEAR(credits_before_update, ssk->tx_credits);
-       ssk->tx_credits = ntohl(h->mseq_ack) - ssk->tx_head + 1 +
+       SDPSTATS_HIST_LINEAR(credits_before_update, ssk->tx_ring.credits);
+       ssk->tx_ring.credits = ntohl(h->mseq_ack) - ssk->tx_ring.head + 1 +
                ntohs(h->bufs);
 
        frags = skb_shinfo(skb)->nr_frags;
@@ -863,7 +881,7 @@ static int sdp_handle_send_comp(struct sdp_sock *ssk, struct ib_wc *wc)
                        sdp_set_error(sk, -ECONNRESET);
                        wake_up(&ssk->wq);
 
-                       queue_work(sdp_workqueue, &ssk->destroy_work);
+                       queue_work(comp_wq, &ssk->destroy_work);
                }
                goto out;
        }
@@ -922,12 +940,13 @@ void sdp_completion_handler(struct ib_cq *cq, void *cq_context)
 
 int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq)
 {
+       struct ib_wc ibwc[SDP_NUM_WC];
        int n, i;
        int ret = -EAGAIN;
        do {
-               n = ib_poll_cq(cq, SDP_NUM_WC, ssk->ibwc);
+               n = ib_poll_cq(cq, SDP_NUM_WC, ibwc);
                for (i = 0; i < n; ++i) {
-                       sdp_handle_wc(ssk, ssk->ibwc + i);
+                       sdp_handle_wc(ssk, &ibwc[i]);
                        ret = 0;
                }
        } while (n == SDP_NUM_WC);
@@ -936,10 +955,14 @@ int sdp_poll_cq(struct sdp_sock *ssk, struct ib_cq *cq)
                struct sock *sk = &ssk->isk.sk;
 
                sdp_post_recvs(ssk);
+
+               /* update credits */
                sdp_post_sends(ssk, 0);
 
                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                        sk_stream_write_space(&ssk->isk.sk);
+       } else {
+               SDPSTATS_COUNTER_INC(rx_poll_miss);
        }
 
        return ret;
@@ -972,7 +995,7 @@ void sdp_work(struct work_struct *work)
        cq = ssk->cq;
        if (unlikely(!cq))
                goto out;
-       ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+       sdp_arm_cq(sk);
        sdp_poll_cq(ssk, cq);
 out:
        release_sock(sk);
index 429f47b9fe407178f3ba805ad8672b380b736cf9..1e1ff9dde5623130f68609841046d5eb5064b250 100644 (file)
@@ -80,17 +80,17 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
 
        sdp_dbg(sk, "%s\n", __func__);
 
-       sdp_sk(sk)->tx_head = 1;
-       sdp_sk(sk)->tx_tail = 1;
+       sdp_sk(sk)->tx_ring.head = 1;
+       sdp_sk(sk)->tx_ring.tail = 1;
        sdp_sk(sk)->rx_head = 1;
        sdp_sk(sk)->rx_tail = 1;
 
-       sdp_sk(sk)->tx_ring = kmalloc(sizeof *sdp_sk(sk)->tx_ring * SDP_TX_SIZE,
-                                     GFP_KERNEL);
-       if (!sdp_sk(sk)->tx_ring) {
+       sdp_sk(sk)->tx_ring.buffer = kmalloc(sizeof(*sdp_sk(sk)->tx_ring.buffer) *
+                       (SDP_TX_SIZE + 1), GFP_KERNEL);
+       if (!sdp_sk(sk)->tx_ring.buffer) {
                rc = -ENOMEM;
                sdp_warn(sk, "Unable to allocate TX Ring size %zd.\n",
-                        sizeof *sdp_sk(sk)->tx_ring * SDP_TX_SIZE);
+                        sizeof *sdp_sk(sk)->tx_ring.buffer * (SDP_TX_SIZE + 1));
                goto err_tx;
        }
 
@@ -162,8 +162,9 @@ err_pd:
        kfree(sdp_sk(sk)->rx_ring);
        sdp_sk(sk)->rx_ring = NULL;
 err_rx:
-       kfree(sdp_sk(sk)->tx_ring);
-       sdp_sk(sk)->tx_ring = NULL;
+       WARN_ON(sdp_sk(sk)->tx_ring.head != sdp_sk(sk)->tx_ring.tail);
+       kfree(sdp_sk(sk)->tx_ring.buffer);
+       sdp_sk(sk)->tx_ring.buffer = NULL;
 err_tx:
        return rc;
 }
@@ -206,19 +207,18 @@ static int sdp_connect_handler(struct sock *sk, struct rdma_cm_id *id,
 
        sdp_add_sock(sdp_sk(child));
 
-       sdp_sk(child)->max_bufs = sdp_sk(child)->tx_credits = ntohs(h->bsdh.bufs);
-       sdp_sk(child)->min_bufs = sdp_sk(child)->tx_credits / 4;
+       sdp_sk(child)->max_bufs = sdp_sk(child)->tx_ring.credits = ntohs(h->bsdh.bufs);
+       sdp_sk(child)->min_bufs = sdp_sk(child)->tx_ring.credits / 4;
        sdp_sk(child)->xmit_size_goal = ntohl(h->localrcvsz) -
                sizeof(struct sdp_bsdh);
        sdp_sk(child)->send_frags = PAGE_ALIGN(sdp_sk(child)->xmit_size_goal) /
                PAGE_SIZE;
         sdp_init_buffers(sdp_sk(child), rcvbuf_initial_size);
-
        
        sdp_dbg(child, "%s recv_frags: %d tx credits %d xmit_size_goal %d send trigger %d\n",
                __func__,
                sdp_sk(child)->recv_frags,
-               sdp_sk(child)->tx_credits,
+               sdp_sk(child)->tx_ring.credits,
                sdp_sk(child)->xmit_size_goal,
                sdp_sk(child)->min_bufs);
 
@@ -254,8 +254,8 @@ static int sdp_response_handler(struct sock *sk, struct rdma_cm_id *id,
 
        h = event->param.conn.private_data;
        SDP_DUMP_PACKET(sk, "RX", NULL, &h->bsdh);
-       sdp_sk(sk)->max_bufs = sdp_sk(sk)->tx_credits = ntohs(h->bsdh.bufs);
-       sdp_sk(sk)->min_bufs = sdp_sk(sk)->tx_credits / 4;
+       sdp_sk(sk)->max_bufs = sdp_sk(sk)->tx_ring.credits = ntohs(h->bsdh.bufs);
+       sdp_sk(sk)->min_bufs = sdp_sk(sk)->tx_ring.credits / 4;
        sdp_sk(sk)->xmit_size_goal = ntohl(h->actrcvsz) -
                sizeof(struct sdp_bsdh);
        sdp_sk(sk)->send_frags = MIN(PAGE_ALIGN(sdp_sk(sk)->xmit_size_goal) /
@@ -265,13 +265,13 @@ static int sdp_response_handler(struct sock *sk, struct rdma_cm_id *id,
 
        sdp_dbg(sk, "%s bufs %d xmit_size_goal %d send_frags: %d send trigger %d\n",
                __func__,
-               sdp_sk(sk)->tx_credits,
+               sdp_sk(sk)->tx_ring.credits,
                sdp_sk(sk)->xmit_size_goal,
                sdp_sk(sk)->send_frags,
                sdp_sk(sk)->min_bufs);
 
        sdp_sk(sk)->poll_cq = 1;
-       ib_req_notify_cq(sdp_sk(sk)->cq, IB_CQ_NEXT_COMP);
+       sdp_arm_cq(sk);
        sdp_poll_cq(sdp_sk(sk), sdp_sk(sk)->cq);
 
        sk->sk_state_change(sk);
index e7f65f92e399c16a8df7a7a283f4241c8b5ea96d..6b7494f5ff50e27128629a66cd7445d674f987bb 100644 (file)
@@ -135,7 +135,7 @@ static int sdp_zcopy_thresh = 65536;
 module_param_named(sdp_zcopy_thresh, sdp_zcopy_thresh, int, 0644);
 MODULE_PARM_DESC(sdp_zcopy_thresh, "Zero copy send threshold; 0=0ff.");
 
-struct workqueue_struct *sdp_workqueue;
+struct workqueue_struct *comp_wq;
 
 struct list_head sock_list;
 spinlock_t sock_list_lock;
@@ -212,6 +212,7 @@ static void sdp_destroy_qp(struct sdp_sock *ssk)
                cq = ssk->cq;
                ssk->cq = NULL;
                ib_destroy_qp(ssk->qp);
+               ssk->qp = NULL;
 
                while (ssk->rx_head != ssk->rx_tail) {
                        struct sk_buff *skb;
@@ -221,9 +222,9 @@ static void sdp_destroy_qp(struct sdp_sock *ssk)
                        atomic_sub(SDP_MAX_SEND_SKB_FRAGS, &sdp_current_mem_usage);
                        __kfree_skb(skb);
                }
-               while (ssk->tx_head != ssk->tx_tail) {
+               while (ssk->tx_ring.head != ssk->tx_ring.tail) {
                        struct sk_buff *skb;
-                       skb = sdp_send_completion(ssk, ssk->tx_tail);
+                       skb = sdp_send_completion(ssk, ssk->tx_ring.tail);
                        if (!skb)
                                break;
                        __kfree_skb(skb);
@@ -233,8 +234,10 @@ static void sdp_destroy_qp(struct sdp_sock *ssk)
        if (cq)
                ib_destroy_cq(cq);
 
-       if (ssk->mr)
+       if (ssk->mr) {
                ib_dereg_mr(ssk->mr);
+               ssk->mr = NULL;
+       }
 
        if (pd)
                ib_dealloc_pd(pd);
@@ -245,20 +248,19 @@ static void sdp_destroy_qp(struct sdp_sock *ssk)
                kfree(ssk->rx_ring);
                ssk->rx_ring = NULL;
        }
-       if (ssk->tx_ring) {
-               kfree(ssk->tx_ring);
-               ssk->tx_ring = NULL;
+       if (ssk->tx_ring.buffer) {
+               kfree(ssk->tx_ring.buffer);
+               ssk->tx_ring.buffer = NULL;
        }
 }
 
-
 static void sdp_reset_keepalive_timer(struct sock *sk, unsigned long len)
 {
        struct sdp_sock *ssk = sdp_sk(sk);
 
        sdp_dbg(sk, "%s\n", __func__);
 
-       ssk->keepalive_tx_head = ssk->tx_head;
+       ssk->keepalive_tx_head = ssk->tx_ring.head;
        ssk->keepalive_rx_head = ssk->rx_head;
 
        sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
@@ -294,7 +296,7 @@ static void sdp_keepalive_timer(unsigned long data)
            sk->sk_state == TCP_CLOSE)
                goto out;
 
-       if (ssk->keepalive_tx_head == ssk->tx_head &&
+       if (ssk->keepalive_tx_head == ssk->tx_ring.head &&
            ssk->keepalive_rx_head == ssk->rx_head)
                sdp_post_keepalive(ssk);
 
@@ -345,13 +347,15 @@ void sdp_reset_sk(struct sock *sk, int rc)
        if (!(sk->sk_shutdown & RCV_SHUTDOWN) || !sk_stream_memory_free(sk))
                sdp_set_error(sk, rc);
 
+       sdp_destroy_qp(ssk);
+
        memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
 
        sk->sk_state_change(sk);
 
        /* Don't destroy socket before destroy work does its job */
        sock_hold(sk, SOCK_REF_RESET);
-       queue_work(sdp_workqueue, &ssk->destroy_work);
+       queue_work(comp_wq, &ssk->destroy_work);
 
        read_unlock(&device_removal_lock);
 }
@@ -729,7 +733,7 @@ static int sdp_wait_for_connect(struct sock *sk, long timeo)
 /* Like inet_csk_accept */
 static struct sock *sdp_accept(struct sock *sk, int flags, int *err)
 {
-       struct sdp_sock *newssk, *ssk;
+       struct sdp_sock *newssk = NULL, *ssk;
        struct sock *newsk;
        int error;
 
@@ -772,7 +776,7 @@ out:
                if (newssk->cq) {
                        sdp_dbg(newsk, "%s: ib_req_notify_cq\n", __func__);
                        newssk->poll_cq = 1;
-                       ib_req_notify_cq(newssk->cq, IB_CQ_NEXT_COMP);
+                       sdp_arm_cq(&newssk->isk.sk);
                        sdp_poll_cq(newssk, newssk->cq);
                }
                release_sock(newsk);
@@ -828,7 +832,7 @@ static int sdp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
                        answ = 0;
                else
-                       answ = ssk->write_seq - ssk->snd_una;
+                       answ = ssk->write_seq - ssk->tx_ring.una_seq;
                break;
        default:
                return -ENOIOCTLCMD;
@@ -843,7 +847,7 @@ static inline void sdp_start_dreq_wait_timeout(struct sdp_sock *ssk, int timeo)
 {
        sdp_dbg(&ssk->isk.sk, "Starting dreq wait timeout\n");
 
-       queue_delayed_work(sdp_workqueue, &ssk->dreq_wait_work, timeo);
+       queue_delayed_work(comp_wq, &ssk->dreq_wait_work, timeo);
        ssk->dreq_wait_timeout = 1;
 }
 
@@ -932,7 +936,7 @@ int sdp_init_sock(struct sock *sk)
        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_NO_CSUM;
 
        ssk->rx_ring = NULL;
-       ssk->tx_ring = NULL;
+       ssk->tx_ring.buffer = NULL;
        ssk->sdp_disconnect = 0;
        ssk->destructed_already = 0;
        ssk->destruct_in_process = 0;
@@ -1551,7 +1555,8 @@ static inline int slots_free(struct sdp_sock *ssk)
 {
        int min_free;
 
-       min_free = MIN(ssk->tx_credits, SDP_TX_SIZE - (ssk->tx_head - ssk->tx_tail));
+       min_free = MIN(ssk->tx_ring.credits,
+                       SDP_TX_SIZE - (ssk->tx_ring.head - ssk->tx_ring.tail));
        if (min_free < SDP_MIN_TX_CREDITS)
                return 0;
 
@@ -1632,8 +1637,8 @@ void sdp_bzcopy_write_space(struct sdp_sock *ssk)
        struct sock *sk = &ssk->isk.sk;
        struct socket *sock = sk->sk_socket;
 
-       if (ssk->tx_credits >= ssk->min_bufs &&
-           ssk->tx_head == ssk->tx_tail &&
+       if (ssk->tx_ring.credits >= ssk->min_bufs &&
+           ssk->tx_ring.head == ssk->tx_ring.tail &&
           sock != NULL) {
                clear_bit(SOCK_NOSPACE, &sock->flags);
 
@@ -2361,40 +2366,37 @@ static struct ib_client sdp_client = {
 
 static int __init sdp_init(void)
 {
-       int rc;
+       int rc = -ENOMEM;
 
        INIT_LIST_HEAD(&sock_list);
        spin_lock_init(&sock_list_lock);
        spin_lock_init(&sdp_large_sockets_lock);
 
        sockets_allocated = kmalloc(sizeof(*sockets_allocated), GFP_KERNEL);
+       if (!sockets_allocated)
+               goto no_mem_sockets_allocated;
+
        orphan_count = kmalloc(sizeof(*orphan_count), GFP_KERNEL);
+       if (!orphan_count)
+               goto no_mem_orphan_count;
+
        percpu_counter_init(sockets_allocated, 0);
        percpu_counter_init(orphan_count, 0);
 
        sdp_proto.sockets_allocated = sockets_allocated;
        sdp_proto.orphan_count = orphan_count;
 
-
-       sdp_workqueue = create_singlethread_workqueue("sdp");
-       if (!sdp_workqueue) {
-               return -ENOMEM;
-       }
+       comp_wq = create_singlethread_workqueue("comp_wq");
+       if (!comp_wq)
+               goto no_mem_rx_wq;
 
        rc = proto_register(&sdp_proto, 1);
-       if (rc) {
-               printk(KERN_WARNING "%s: proto_register failed: %d\n", __func__, rc);
-               destroy_workqueue(sdp_workqueue);
-               return rc;
-       }
+       if (rc)
+               goto error_proto_reg;
 
        rc = sock_register(&sdp_net_proto);
-       if (rc) {
-               printk(KERN_WARNING "%s: sock_register failed: %d\n", __func__, rc);
-               proto_unregister(&sdp_proto);
-               destroy_workqueue(sdp_workqueue);
-               return rc;
-       }
+       if (rc)
+               goto error_sock_reg;
 
        sdp_proc_init();
 
@@ -2403,6 +2405,17 @@ static int __init sdp_init(void)
        ib_register_client(&sdp_client);
 
        return 0;
+
+error_sock_reg:
+       proto_unregister(&sdp_proto);
+error_proto_reg:
+       destroy_workqueue(comp_wq);
+no_mem_rx_wq:
+       kfree(orphan_count);
+no_mem_orphan_count:
+       kfree(sockets_allocated);
+no_mem_sockets_allocated:
+       return rc;
 }
 
 static void __exit sdp_exit(void)
@@ -2413,7 +2426,9 @@ static void __exit sdp_exit(void)
        if (percpu_counter_read_positive(orphan_count))
                printk(KERN_WARNING "%s: orphan_count %lld\n", __func__,
                       percpu_counter_read_positive(orphan_count));
-       destroy_workqueue(sdp_workqueue);
+
+       destroy_workqueue(comp_wq);
+
        flush_scheduled_work();
 
        BUG_ON(!list_empty(&sock_list));
index b8614a595f0dddffb80ec6a2fc64c4908a72c9d5..e759864e95c4cebd8d095d264b9c87c1d504c819 100644 (file)
@@ -127,7 +127,7 @@ static int sdp_seq_show(struct seq_file *seq, void *v)
        uid = sock_i_uid(sk);
        inode = sock_i_ino(sk);
        rx_queue = sdp_sk(sk)->rcv_nxt - sdp_sk(sk)->copied_seq;
-       tx_queue = sdp_sk(sk)->write_seq - sdp_sk(sk)->snd_una;
+       tx_queue = sdp_sk(sk)->write_seq - sdp_sk(sk)->tx_ring.una_seq;
 
        sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %5d %lu       %08X:%08X %X",
                st->num, src, srcp, dest, destp, uid, inode,