From 44d0abba567613e01333c7de9e2d03d05962e450 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Jun 2015 11:47:12 -0700 Subject: [PATCH] fm10k: Don't assume page fragments are page size This change pulls out the optimization that assumed that all fragments would be limited to page size. That hasn't been the case for some time now and to assume this is incorrect as the TCP allocator can provide up to a 32K page fragment. Signed-off-by: Alexander Duyck Acked-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher Orabug: 25394529 (cherry picked from commit aae072e363bed4e91c00d57f753c799276ddb161) Signed-off-by: Jack Vogel --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index c754b2027281..d2b017e57753 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1105,9 +1105,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_tx_buffer *first; int tso; u32 tx_flags = 0; -#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD unsigned short f; -#endif u16 count = TXD_USE_COUNT(skb_headlen(skb)); /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, @@ -1115,12 +1113,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, * + 2 desc gap to keep tail from touching head * otherwise try next time */ -#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); -#else - count += skb_shinfo(skb)->nr_frags; -#endif + if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; -- 2.50.1