]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
IPoIB: Protect tx_outstanding from parallel access
authorWengang Wang <wen.gang.wang@oracle.com>
Fri, 15 Jan 2016 07:53:46 +0000 (15:53 +0800)
committerChuck Anderson <chuck.anderson@oracle.com>
Fri, 29 Jan 2016 15:37:02 +0000 (07:37 -0800)
Orabug: 22217400(UEK4)
quick ref: 21861366(UEK2) 22217399(UEK3)

In IPoIB code, parallel access of tx_outstanding (send path VS event
process path) needs to be serialized. We use priv->lock to protect it. We
also use that lock to make the stop/open tx queue to be atomic with the
changing on tx_outstanding to protect the race between the opening of tx
queue from completion hander and the closing from the send path.

This patch also make sure the increase of tx_outstanding prior to the
calling of post_send to avoid the possible decreasing before increasing in
case the running of increasing is scheduled later than the event handler.

Impact on Throuput is ~1.5% drop.

Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com>
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c

index d84531d49722b9137d05ddf833b3839cd55965f1..b45dbd214a7dcb6e16e6d5d250a7418d9f0b21da 100644 (file)
@@ -785,6 +785,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
        struct ipoib_tx_buf *tx_req;
        u64 addr = 0;
        int rc;
+       unsigned long flags;
 
        if (unlikely(skb->len > tx->mtu)) {
                ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -820,11 +821,28 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                }
        }
 
+       spin_lock_irqsave(&priv->lock, flags);
+       if (++priv->tx_outstanding == ipoib_sendq_size) {
+               ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+                         tx->qp->qp_num);
+               netif_stop_queue(dev);
+       }
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       if (netif_queue_stopped(dev)) {
+               rc = ib_req_notify_cq(priv->send_cq,
+                       IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
+               if (rc < 0)
+                       ipoib_warn(priv, "request notify on send CQ failed\n");
+               else if (rc)
+                       ipoib_send_comp_handler(priv->send_cq, dev);
+       }
+
        if (skb_shinfo(skb)->nr_frags) {
                if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
                        ++dev->stats.tx_errors;
                        dev_kfree_skb_any(skb);
-                       return;
+                       goto dec_outstanding;
                }
                rc = post_send_sg(priv, tx, tx->tx_head &
                                  (ipoib_sendq_size - 1),
@@ -835,7 +853,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
                        ++dev->stats.tx_errors;
                        dev_kfree_skb_any(skb);
-                       return;
+                       goto dec_outstanding;
                }
 
                tx_req->mapping[0] = addr;
@@ -851,22 +869,20 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                ++dev->stats.tx_errors;
                ipoib_cm_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(skb);
+               goto dec_outstanding;
        } else {
                dev->trans_start = jiffies;
                ++tx->tx_head;
-
-               if (++priv->tx_outstanding == ipoib_sendq_size) {
-                       ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
-                                 tx->qp->qp_num);
-                       netif_stop_queue(dev);
-                       rc = ib_req_notify_cq(priv->send_cq,
-                               IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
-                       if (rc < 0)
-                               ipoib_warn(priv, "request notify on send CQ failed\n");
-                       else if (rc)
-                               ipoib_send_comp_handler(priv->send_cq, dev);
-               }
        }
+
+       return;
+
+dec_outstanding:
+       spin_lock_irqsave(&priv->lock, flags);
+       --priv->tx_outstanding;
+       if (netif_queue_stopped(dev))
+               netif_wake_queue(dev);
+       spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
@@ -899,10 +915,13 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        netif_tx_lock(dev);
 
        ++tx->tx_tail;
+
+       spin_lock_irqsave(&priv->lock, flags);
        if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
            netif_queue_stopped(dev) &&
            test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                netif_wake_queue(dev);
+       spin_unlock_irqrestore(&priv->lock, flags);
 
        if (wc->status != IB_WC_SUCCESS &&
            wc->status != IB_WC_WR_FLUSH_ERR) {
index 1fb82d84aaa4fc3f7957ae64ecc0b94914daf77b..fe85c6ba1b030549fdd7e1b981a95a343c0f0151 100644 (file)
@@ -376,6 +376,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        unsigned int wr_id = wc->wr_id;
        struct ipoib_tx_buf *tx_req;
+       unsigned long flags;
 
        ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
                       wr_id, wc->status);
@@ -396,10 +397,13 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        dev_kfree_skb_any(tx_req->skb);
 
        ++priv->tx_tail;
+
+       spin_lock_irqsave(&priv->lock, flags);
        if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
            netif_queue_stopped(dev) &&
            test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                netif_wake_queue(dev);
+       spin_unlock_irqrestore(&priv->lock, flags);
 
        if (wc->status != IB_WC_SUCCESS &&
            wc->status != IB_WC_WR_FLUSH_ERR) {
@@ -553,6 +557,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
        struct ipoib_tx_buf *tx_req;
        int hlen, rc;
        void *phead;
+       unsigned long flags;
 
        if (skb_is_gso(skb)) {
                hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -602,12 +607,22 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
        else
                priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
 
+       spin_lock_irqsave(&priv->lock, flags);
        if (++priv->tx_outstanding == ipoib_sendq_size) {
                ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
                if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
                        ipoib_warn(priv, "request notify on send CQ failed\n");
                netif_stop_queue(dev);
        }
+       spin_unlock_irqrestore(&priv->lock, flags);
+       if (netif_queue_stopped(dev)) {
+               rc = ib_req_notify_cq(priv->send_cq,
+                       IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
+               if (rc < 0)
+                       ipoib_warn(priv, "request notify on send CQ failed\n");
+               else if (rc)
+                       ipoib_send_comp_handler(priv->send_cq, dev);
+       }
 
        skb_orphan(skb);
        skb_dst_drop(skb);
@@ -617,11 +632,13 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
        if (unlikely(rc)) {
                ipoib_warn(priv, "post_send failed, error %d\n", rc);
                ++dev->stats.tx_errors;
+               spin_lock_irqsave(&priv->lock, flags);
                --priv->tx_outstanding;
-               ipoib_dma_unmap_tx(priv->ca, tx_req);
-               dev_kfree_skb_any(skb);
                if (netif_queue_stopped(dev))
                        netif_wake_queue(dev);
+               spin_unlock_irqrestore(&priv->lock, flags);
+               ipoib_dma_unmap_tx(priv->ca, tx_req);
+               dev_kfree_skb_any(skb);
        } else {
                dev->trans_start = jiffies;
 
@@ -840,6 +857,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
        unsigned long begin;
        struct ipoib_tx_buf *tx_req;
        int i;
+       unsigned long flags;
 
        if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
                napi_disable(&priv->napi);
@@ -872,7 +890,9 @@ int ipoib_ib_dev_stop(struct net_device *dev)
                                ipoib_dma_unmap_tx(priv->ca, tx_req);
                                dev_kfree_skb_any(tx_req->skb);
                                ++priv->tx_tail;
+                               spin_lock_irqsave(&priv->lock, flags);
                                --priv->tx_outstanding;
+                               spin_unlock_irqrestore(&priv->lock, flags);
                        }
 
                        for (i = 0; i < ipoib_recvq_size; ++i) {