]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
i40e: refactor tail_bump check
authorCarolyn Wyborny <carolyn.wyborny@intel.com>
Thu, 4 Aug 2016 18:37:03 +0000 (11:37 -0700)
committerDhaval Giani <dhaval.giani@oracle.com>
Wed, 8 Mar 2017 00:31:41 +0000 (19:31 -0500)
Orabug: 24568124

This patch refactors tail bump check.

Change-ID: Ide0e19171d67d90cb2b06b8dcd4fa791ae120160
Signed-off-by: Carolyn Wyborny <carolyn.wyborny@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
(cherry picked from commit ffeac83685facb6f0829b898d567056eae8097f8)
Signed-off-by: Brian Maly <brian.maly@oracle.com>
Signed-off-by: Dhaval Giani <dhaval.giani@oracle.com>
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c

index c634e2b9210eba9a01580d0282ba26f122eb13b3..8cfff5643dd3ba7539e0b2c2fc0227601e366800 100644 (file)
@@ -2829,10 +2829,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                                                  I40E_TXD_QW1_CMD_SHIFT);
 
        /* notify HW of packet */
-       if (!tail_bump)
+       if (!tail_bump) {
                prefetchw(tx_desc + 1);
-
-       if (tail_bump) {
+       } else {
                /* Force memory writes to complete before letting h/w
                 * know there are new descriptors to fetch.  (Only
                 * applicable for weak-ordered memory model archs,
@@ -2841,7 +2840,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                wmb();
                writel(i, tx_ring->tail);
        }
-
        return;
 
 dma_error:
index cd8ed31dedf515abcc3ff6008652f4f4dfdf8877..3ef570feb73d768d86979fa16d53fb07d5255e95 100644 (file)
@@ -2060,10 +2060,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                                                  I40E_TXD_QW1_CMD_SHIFT);
 
        /* notify HW of packet */
-       if (!tail_bump)
+       if (!tail_bump) {
                prefetchw(tx_desc + 1);
-
-       if (tail_bump) {
+       } else {
                /* Force memory writes to complete before letting h/w
                 * know there are new descriptors to fetch.  (Only
                 * applicable for weak-ordered memory model archs,
@@ -2072,7 +2071,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                wmb();
                writel(i, tx_ring->tail);
        }
-
        return;
 
 dma_error: