]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bnx2x: add gro_check
authorDmitry Kravkov <dmitry@broadcom.com>
Mon, 20 Feb 2012 09:59:11 +0000 (09:59 +0000)
committerJoe Jin <joe.jin@oracle.com>
Tue, 28 Aug 2012 07:23:18 +0000 (15:23 +0800)
The patch provides workaround for BUG in FW 7.2.16,
which in GRO mode may miscalculate buffer and
place on SGE one frag less than it could.
It may happen only for some MTUs, we mark these MTUs
with gro_check flag during device initialization or
MTU change.

Next FW should include fix for the issue and the
patch could be reverted.

(cherry picked from commit fe603b4d680a2bba9d8c6d4267450fcf295f30d1)
Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_main.c

index ddb2939637b0ddd03ab8e18c9c9b8a737fa2f0f5..dfed2ef1cddb83ff29f4d79f7a5198c88c5959dc 100644 (file)
@@ -340,6 +340,7 @@ union db_prod {
 #define SGE_PAGE_SIZE          PAGE_SIZE
 #define SGE_PAGE_SHIFT         PAGE_SHIFT
 #define SGE_PAGE_ALIGN(addr)   PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGES              (SGE_PAGE_SIZE * PAGES_PER_SGE)
 
 /* SGE ring related macros */
 #define NUM_RX_SGE_PAGES       2
@@ -1209,6 +1210,7 @@ struct bnx2x {
 #define ETH_MAX_JUMBO_PACKET_SIZE      9600
 /* TCP with Timestamp Option (32) + IPv6 (40) */
 #define ETH_MAX_TPA_HEADER_SIZE                72
+#define ETH_MIN_TPA_HEADER_SIZE                40
 
        /* Max supported alignment is 256 (8 shift) */
 #define BNX2X_RX_ALIGN_SHIFT           min(8, L1_CACHE_SHIFT)
@@ -1329,6 +1331,8 @@ struct bnx2x {
 
        u8                      wol;
 
+       bool                    gro_check;
+
        int                     rx_ring_size;
 
        u16                     tx_quick_cons_trip_int;
index d125bcd69f6a4f3a838ae2ab013f9f4f7ad6aa6a..fa2e2e92d62af43920f9064b9216652a40f36f38 100644 (file)
@@ -319,6 +319,16 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
                u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
                tpa_info->full_page =
                        SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+               /*
+                * FW 7.2.16 BUG workaround:
+                * if SGE size is (exactly) multiple gro_size
+                * fw will place one less frag on SGE.
+                * the calculation is done only for potentially
+                * dangerous MTUs.
+                */
+               if (unlikely(bp->gro_check))
+                       if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
+                               tpa_info->full_page -= gro_size;
                tpa_info->gro_size = gro_size;
        }
 
@@ -3481,6 +3491,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
         */
        dev->mtu = new_mtu;
 
+       bp->gro_check = bnx2x_need_gro_check(new_mtu);
+
        return bnx2x_reload_if_running(dev);
 }
 
index 7aa4c06429f68e76c3f54f55ff1b6bdb3860e80a..315a43b4fe771c5018d71891da590eb0cbaa7a21 100644 (file)
@@ -1504,6 +1504,13 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
         */
        return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
 }
+
+static inline bool bnx2x_need_gro_check(int mtu)
+{
+       return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
+               (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
+}
+
 /**
  * bnx2x_bz_fp - zero content of the fastpath structure.
  *
index a945fc2146e0ce63509eec11393d672df868b2b4..1ace3d5e14cc217cbc8e14ce64d05af1a0418027 100644 (file)
@@ -10214,6 +10214,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
        if (CHIP_IS_E3B0(bp))
                bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
 
+       bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
+
        return rc;
 }