]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sdp: fix a leak when ib_post_xxx fail + small fixes
authorAmir Vadai <amirv@mellanox.co.il>
Mon, 26 Apr 2010 11:53:23 +0000 (14:53 +0300)
committerMukesh Kacker <mukesh.kacker@oracle.com>
Tue, 6 Oct 2015 12:04:56 +0000 (05:04 -0700)
Signed-off-by: Amir Vadai <amirv@mellanox.co.il>
drivers/infiniband/ulp/sdp/sdp.h
drivers/infiniband/ulp/sdp/sdp_cma.c
drivers/infiniband/ulp/sdp/sdp_rx.c
drivers/infiniband/ulp/sdp/sdp_tx.c

index 0e66baeb6201058f3050bdd7c3087a22a26fe722..453a1e5a16d10e0dd01e87c497fe2724961250a8 100644 (file)
@@ -36,8 +36,8 @@
 
 #define SDP_MAX_RDMA_READ_LEN (PAGE_SIZE * (SDP_FMR_SIZE - 2))
 
-#define SDP_MAX_RECV_SGES 17
-#define SDP_MAX_SEND_SGES 17
+#define SDP_MAX_RECV_SGES 9 /* 1 for sdp header + 8 for payload */
+#define SDP_MAX_SEND_SGES 9 /* same as above */
 
 /* skb inlined data len - rest will be rx'ed into frags */
 #define SDP_SKB_HEAD_SIZE (0x500 + sizeof(struct sdp_bsdh))
@@ -749,6 +749,24 @@ static inline void sdpstats_hist(u32 *h, u32 val, u32 maxidx, int is_log)
 #define SDPSTATS_HIST(stat, size)
 #endif
 
+static inline void sdp_cleanup_sdp_buf(struct sdp_sock *ssk, struct sdp_buf *sbuf,
+               size_t head_size, enum dma_data_direction dir)
+{
+       int i;
+       struct sk_buff *skb;
+       struct ib_device *dev = ssk->ib_device;
+
+       skb = sbuf->skb;
+
+       ib_dma_unmap_single(dev, sbuf->mapping[0], head_size, dir);
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               ib_dma_unmap_page(dev, sbuf->mapping[i + 1],
+                                 skb_shinfo(skb)->frags[i].size,
+                                 dir);
+       }
+}
+
 /* sdp_main.c */
 void sdp_set_default_moderation(struct sdp_sock *ssk);
 int sdp_init_sock(struct sock *sk);
index 33c90cf1242cc0f4fb3b7f829ece934125ebdd1f..fdd98a720ec11fdee3a5e2bf83b71aa9ceed2095 100644 (file)
@@ -60,7 +60,7 @@ static void sdp_qp_event_handler(struct ib_event *event, void *data)
 {
 }
 
-static int sdp_get_max_send_sge(struct ib_device *dev)
+static int sdp_get_max_dev_sge(struct ib_device *dev)
 {
        struct ib_device_attr attr;
        static int max_sges = -1;
@@ -90,7 +90,7 @@ static int sdp_init_qp(struct sock *sk, struct rdma_cm_id *id)
 
        sdp_dbg(sk, "%s\n", __func__);
 
-       sdp_sk(sk)->max_sge = sdp_get_max_send_sge(device);
+       sdp_sk(sk)->max_sge = sdp_get_max_dev_sge(device);
        sdp_dbg(sk, "Max sges: %d\n", sdp_sk(sk)->max_sge);
 
        qp_init_attr.cap.max_send_sge = MIN(sdp_sk(sk)->max_sge, SDP_MAX_SEND_SGES);
index 1676a425dbd631e869418f0e9f3901be5335c598..b2743c46e80e923046e5ffc8fe9a2fd952c2385a 100644 (file)
@@ -223,6 +223,9 @@ static int sdp_post_recv(struct sdp_sock *ssk)
        if (unlikely(rc)) {
                sdp_warn(&ssk->isk.sk, "ib_post_recv failed. status %d\n", rc);
 
+               sdp_cleanup_sdp_buf(ssk, rx_req, SDP_SKB_HEAD_SIZE, DMA_FROM_DEVICE);
+               __kfree_skb(skb);
+
                sdp_reset(&ssk->isk.sk);
 
                return -1;
@@ -402,7 +405,6 @@ static struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id)
        struct sdp_buf *rx_req;
        struct ib_device *dev;
        struct sk_buff *skb;
-       int i, frags;
 
        if (unlikely(id != ring_tail(ssk->rx_ring))) {
                printk(KERN_WARNING "Bogus recv completion id %d tail %d\n",
@@ -413,13 +415,8 @@ static struct sk_buff *sdp_recv_completion(struct sdp_sock *ssk, int id)
        dev = ssk->ib_device;
        rx_req = &ssk->rx_ring.buffer[id & (SDP_RX_SIZE - 1)];
        skb = rx_req->skb;
-       ib_dma_unmap_single(dev, rx_req->mapping[0], SDP_SKB_HEAD_SIZE,
-                           DMA_FROM_DEVICE);
-       frags = skb_shinfo(skb)->nr_frags;
-       for (i = 0; i < frags; ++i)
-               ib_dma_unmap_page(dev, rx_req->mapping[i + 1],
-                                 skb_shinfo(skb)->frags[i].size,
-                                 DMA_FROM_DEVICE);
+       sdp_cleanup_sdp_buf(ssk, rx_req, SDP_SKB_HEAD_SIZE, DMA_FROM_DEVICE);
+
        atomic_inc(&ssk->rx_ring.tail);
        atomic_dec(&ssk->remote_credits);
        return skb;
index 05632d82421c10847064f6efafab722d1338c309..173949eab76277ec354ec6b7779ce91739f59acd 100644 (file)
@@ -150,16 +150,22 @@ void sdp_post_send(struct sdp_sock *ssk, struct sk_buff *skb)
                tx_wr.send_flags |= IB_SEND_SOLICITED;
 
        rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr);
-       atomic_inc(&ssk->tx_ring.head);
-       atomic_dec(&ssk->tx_ring.credits);
-       atomic_set(&ssk->remote_credits, rx_ring_posted(ssk));
        if (unlikely(rc)) {
                sdp_dbg(&ssk->isk.sk,
                                "ib_post_send failed with status %d.\n", rc);
+
+               sdp_cleanup_sdp_buf(ssk, tx_req, skb->len - skb->data_len, DMA_TO_DEVICE);
+
                sdp_set_error(&ssk->isk.sk, -ECONNRESET);
                wake_up(&ssk->wq);
+
+               goto err;
        }
 
+       atomic_inc(&ssk->tx_ring.head);
+       atomic_dec(&ssk->tx_ring.credits);
+       atomic_set(&ssk->remote_credits, rx_ring_posted(ssk));
+
        return;
 
 err:
@@ -171,7 +177,6 @@ static struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
        struct ib_device *dev;
        struct sdp_buf *tx_req;
        struct sk_buff *skb = NULL;
-       int i, frags;
        struct sdp_tx_ring *tx_ring = &ssk->tx_ring;
        if (unlikely(mseq != ring_tail(*tx_ring))) {
                printk(KERN_WARNING "Bogus send completion id %d tail %d\n",
@@ -182,14 +187,8 @@ static struct sk_buff *sdp_send_completion(struct sdp_sock *ssk, int mseq)
        dev = ssk->ib_device;
        tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)];
        skb = tx_req->skb;
-       ib_dma_unmap_single(dev, tx_req->mapping[0], skb->len - skb->data_len,
-                           DMA_TO_DEVICE);
-       frags = skb_shinfo(skb)->nr_frags;
-       for (i = 0; i < frags; ++i) {
-               ib_dma_unmap_page(dev, tx_req->mapping[i + 1],
-                                 skb_shinfo(skb)->frags[i].size,
-                                 DMA_TO_DEVICE);
-       }
+
+       sdp_cleanup_sdp_buf(ssk, tx_req, skb->len - skb->data_len, DMA_TO_DEVICE);
 
        tx_ring->una_seq += SDP_SKB_CB(skb)->end_seq;