]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
IB/sdp: Three bugfixes in SDP sockets.
authorMichael S. Tsirkin <mst@mellanox.co.il>
Thu, 3 Aug 2006 09:53:07 +0000 (12:53 +0300)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:03:54 +0000 (05:03 -0700)
1. reset socket on cma error
2. move head/tail init to where we init the ring buffers
3. poll to handle DREQ bypassing disconn

drivers/infiniband/ulp/sdp/sdp.h
drivers/infiniband/ulp/sdp/sdp_bcopy.c
drivers/infiniband/ulp/sdp/sdp_cma.c
drivers/infiniband/ulp/sdp/sdp_main.c

index 0c61658d7c6bc773b1c616a123f5f33515f073b7..606787c6952bd2cd43ad17ed2ce5fc7338d4da2b 100644 (file)
@@ -86,12 +86,15 @@ struct sdp_sock {
        struct list_head accept_queue;
        struct list_head backlog_queue;
        struct sock *parent;
-       /* rdma specific */
-       struct rdma_cm_id *id;
-       struct ib_qp *qp;
-       struct ib_cq *cq;
-       struct ib_mr *mr;
-       struct device *dma_device;
+
+       struct work_struct work;
+       wait_queue_head_t wq;
+
+       struct work_struct time_wait_work;
+       struct work_struct destroy_work;
+
+       int time_wait;
+
        /* Like tcp_sock */
        __u16 urg_data;
        int offset; /* like seq in tcp */
@@ -99,17 +102,18 @@ struct sdp_sock {
        int xmit_size_goal;
        int write_seq;
        int pushed_seq;
-       int nonagle;
 
-       /* SDP specific */
-       wait_queue_head_t wq;
-
-       struct work_struct time_wait_work;
-       struct work_struct destroy_work;
+       int nonagle;
 
-       int time_wait;
+       /* Data below will be reset on error */
+       /* rdma specific */
+       struct rdma_cm_id *id;
+       struct ib_qp *qp;
+       struct ib_cq *cq;
+       struct ib_mr *mr;
+       struct device *dma_device;
 
-       spinlock_t lock;
+       /* SDP specific */
        struct sdp_buf *rx_ring;
        struct ib_recv_wr rx_wr;
        unsigned rx_head;
@@ -119,7 +123,6 @@ struct sdp_sock {
 
        int               remote_credits;
 
-       spinlock_t        tx_lock;
        struct sdp_buf   *tx_ring;
        unsigned          tx_head;
        unsigned          tx_tail;
@@ -127,7 +130,6 @@ struct sdp_sock {
 
        struct ib_sge ibsge[SDP_MAX_SEND_SKB_FRAGS + 1];
        struct ib_wc  ibwc[SDP_NUM_WC];
-       struct work_struct work;
 };
 
 extern struct proto sdp_proto;
@@ -163,7 +165,8 @@ static inline void sdp_set_state(struct sock *sk, int state)
 extern struct workqueue_struct *sdp_workqueue;
 
 int sdp_cma_handler(struct rdma_cm_id *, struct rdma_cm_event *);
-void sdp_close_sk(struct sock *sk);
+void sdp_reset_sk(struct sock *sk, int rc);
+void sdp_time_wait_destroy_sk(struct sdp_sock *ssk);
 void sdp_completion_handler(struct ib_cq *cq, void *cq_context);
 void sdp_work(void *);
 void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb, u8 mid);
index abfd8bd7b5c3aca0883f6446c45f3c39a29c4245..371fc487bee82910d2ba370867014c025db89cd2 100644 (file)
@@ -447,11 +447,9 @@ static void sdp_handle_wc(struct sdp_sock *ssk, struct ib_wc *wc)
 
        if (ssk->time_wait && !ssk->isk.sk.sk_send_head &&
            ssk->tx_head == ssk->tx_tail) {
-               ssk->time_wait = 0;
-               ssk->isk.sk.sk_state = TCP_CLOSE;
                sdp_dbg(&ssk->isk.sk, "%s: destroy in time wait state\n",
                        __func__);
-               queue_work(sdp_workqueue, &ssk->destroy_work);
+               sdp_time_wait_destroy_sk(ssk);
        }
 }
 
index 527f86bf98c4faf8b491447117e10baf13e2e52e..678287392d9c03a6b4f3b89eaccd9eaf0169c591 100644 (file)
@@ -108,6 +108,11 @@ int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
 
        sdp_dbg(sk, "%s\n", __func__);
 
+       sdp_sk(sk)->tx_head = 1;
+       sdp_sk(sk)->tx_tail = 1;
+       sdp_sk(sk)->rx_head = 1;
+       sdp_sk(sk)->rx_tail = 1;
+
        sdp_sk(sk)->tx_ring = kmalloc(sizeof *sdp_sk(sk)->tx_ring * SDP_TX_SIZE,
                                      GFP_KERNEL);
        if (!sdp_sk(sk)->tx_ring) {
@@ -459,8 +464,9 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
                sdp_sk(sk)->id = NULL;
                id->qp = NULL;
                id->context = NULL;
-               sdp_set_error(sk, rc);
                parent = sdp_sk(sk)->parent;
+               if (!parent)
+                       sdp_reset_sk(sk, rc);
        }
 
        release_sock(sk);
@@ -482,7 +488,6 @@ int sdp_cma_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
 done:
                release_sock(parent);
                sk_common_release(child);
-       } else if (rc) {
        }
        return rc;
 }
index 4e3b1b5c6b619c391f593547f286c72def90380c..241c6668cd33f89aa4069422ba0a2a3686d38edf 100644 (file)
@@ -120,41 +120,15 @@ static int sdp_get_port(struct sock *sk, unsigned short snum)
        return 0;
 }
 
-/* TODO: linger? */
-void sdp_close_sk(struct sock *sk)
+static void sdp_destroy_qp(struct sdp_sock *ssk)
 {
-       struct sdp_sock *ssk = sdp_sk(sk);
-       struct rdma_cm_id *id = NULL;
        struct ib_pd *pd = NULL;
        struct ib_cq *cq = NULL;
 
-       sdp_dbg(sk, "%s\n", __func__);
-
-       lock_sock(sk);
-
-       sk->sk_send_head = NULL;
-       skb_queue_purge(&sk->sk_write_queue);
-        /*
-         * If sendmsg cached page exists, toss it.
-         */
-        if (sk->sk_sndmsg_page) {
-                __free_page(sk->sk_sndmsg_page);
-                sk->sk_sndmsg_page = NULL;
-        }
-
-       id = ssk->id;
-       if (ssk->id) {
-               id->qp = NULL;
-               ssk->id = NULL;
-               release_sock(sk);
-               rdma_destroy_id(id);
-               lock_sock(sk);
-       }
-
        if (ssk->qp) {
                pd = ssk->qp->pd;
                cq = ssk->cq;
-               sdp_sk(sk)->cq = NULL;
+               ssk->cq = NULL;
                ib_destroy_qp(ssk->qp);
 
                while (ssk->rx_head != ssk->rx_tail) {
@@ -182,10 +156,63 @@ void sdp_close_sk(struct sock *sk)
        if (pd)
                ib_dealloc_pd(pd);
 
+       kfree(ssk->rx_ring);
+       kfree(ssk->tx_ring);
+}
+
+void sdp_reset_sk(struct sock *sk, int rc)
+{
+       struct sdp_sock *ssk = sdp_sk(sk);
+
+       sdp_dbg(sk, "%s\n", __func__);
+
+       if (ssk->cq)
+               sdp_poll_cq(ssk, ssk->cq);
+
+       if (!(sk->sk_shutdown & RCV_SHUTDOWN))
+               sdp_set_error(sk, rc);
+
+       sdp_destroy_qp(ssk);
+
+       memset((void *)&ssk->id, 0, sizeof(*ssk) - offsetof(typeof(*ssk), id));
+
+       if (ssk->time_wait) {
+               sdp_dbg(sk, "%s: destroy in time wait state\n", __func__);
+               sdp_time_wait_destroy_sk(ssk);
+       }
+}
+
+/* TODO: linger? */
+static void sdp_close_sk(struct sock *sk)
+{
+       struct sdp_sock *ssk = sdp_sk(sk);
+       struct rdma_cm_id *id = NULL;
+       sdp_dbg(sk, "%s\n", __func__);
+
+       lock_sock(sk);
+
+       sk->sk_send_head = NULL;
+       skb_queue_purge(&sk->sk_write_queue);
+        /*
+         * If sendmsg cached page exists, toss it.
+         */
+        if (sk->sk_sndmsg_page) {
+                __free_page(sk->sk_sndmsg_page);
+                sk->sk_sndmsg_page = NULL;
+        }
+
+       id = ssk->id;
+       if (ssk->id) {
+               id->qp = NULL;
+               ssk->id = NULL;
+               release_sock(sk);
+               rdma_destroy_id(id);
+               lock_sock(sk);
+       }
+
        skb_queue_purge(&sk->sk_receive_queue);
 
-       kfree(sdp_sk(sk)->rx_ring);
-       kfree(sdp_sk(sk)->tx_ring);
+       sdp_destroy_qp(ssk);
 
        sdp_dbg(sk, "%s done; releasing sock\n", __func__);
        release_sock(sk);
@@ -558,6 +585,13 @@ void sdp_time_wait_work(void *data)
        sock_put(data);
 }
 
+void sdp_time_wait_destroy_sk(struct sdp_sock *ssk)
+{
+       ssk->time_wait = 0;
+       ssk->isk.sk.sk_state = TCP_CLOSE;
+       queue_work(sdp_workqueue, &ssk->destroy_work);
+}
+
 static int sdp_init_sock(struct sock *sk)
 {
        struct sdp_sock *ssk = sdp_sk(sk);
@@ -569,10 +603,6 @@ static int sdp_init_sock(struct sock *sk)
        INIT_WORK(&ssk->time_wait_work, sdp_time_wait_work, sk);
        INIT_WORK(&ssk->destroy_work, sdp_destroy_work, sk);
 
-       ssk->tx_head = 1;
-       ssk->tx_tail = 1;
-       ssk->rx_head = 1;
-       ssk->rx_tail = 1;
        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_NO_CSUM;
        return 0;
 }