]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
fm10k: Don't assume page fragments are page size
authorAlexander Duyck <alexander.h.duyck@redhat.com>
Tue, 16 Jun 2015 18:47:12 +0000 (11:47 -0700)
committerChuck Anderson <chuck.anderson@oracle.com>
Sun, 26 Feb 2017 06:02:49 +0000 (22:02 -0800)
This change pulls out the optimization that assumed that all fragments
would be limited to page size.  That hasn't been the case for some time now
and to assume this is incorrect as the TCP allocator can provide up to a
32K page fragment.

Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Acked-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Orabug: 25394529

(cherry picked from commit aae072e363bed4e91c00d57f753c799276ddb161)
Signed-off-by: Jack Vogel <jack.vogel@oracle.com>
drivers/net/ethernet/intel/fm10k/fm10k_main.c

index c754b2027281f8a2c0b18c079b31c2b7420eedbf..d2b017e57753e005a0544785a3f0d4ac1e4c0ecf 100644 (file)
@@ -1105,9 +1105,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
        struct fm10k_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
        unsigned short f;
-#endif
        u16 count = TXD_USE_COUNT(skb_headlen(skb));
 
        /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
@@ -1115,12 +1113,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
         *       + 2 desc gap to keep tail from touching head
         * otherwise try next time
         */
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
-       count += skb_shinfo(skb)->nr_frags;
-#endif
+
        if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
                tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;