From 7f50c48dfc8b505fed4c1ff02c47194361e620df Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 14 Mar 2017 10:15:22 -0700 Subject: [PATCH] i40e/i40evf: Use length to determine if descriptor is done This change makes it so that we use the length of the packet instead of the DD status bit to determine if a new descriptor is ready to be processed. The obvious advantage is that it cuts down on reads as we don't really even need the DD bit if going from a 0 to a non-zero value on size is enough to inform us that the packet has been completed. Change-ID: Iebdf9cdb36c454ef092df27199b92ad09c374231 Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Orabug: 26785018 (cherry picked from commit d57c0e08c70162feab9ccab085fc34095d2dfd11) Signed-off-by: Jack Vogel Reviewed-by: Kyle Fortin --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 26 +++++++++---------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 26 +++++++++---------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1db8e00945a9e..c285dee613ab5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1752,6 +1752,7 @@ add_tail_frag: * i40e_fetch_rx_buffer - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on * @rx_desc: descriptor containing info written by hardware + * @size: size of buffer to add to skb * * This function allocates an skb on the fly, and populates it with the page * data from the current receive descriptor, taking care to set up the skb @@ -1761,16 +1762,12 @@ add_tail_frag: static inline struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - u64 local_status_error_len = - le64_to_cpu(rx_desc->wb.qword1.status_error_len); - unsigned int size = - (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - DEFINE_DMA_ATTRS(dattrs); struct i40e_rx_buffer *rx_buffer; struct page *page; + DEFINE_DMA_ATTRS(dattrs); rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; page = rx_buffer->page; @@ -1885,6 +1882,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) while (likely(total_rx_packets < budget)) { union i40e_rx_desc *rx_desc; + unsigned int size; u16 vlan_tag; u8 rx_ptype; u64 qword; @@ -1901,19 +1899,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr * which is always zero because packet split isn't used, if the - * hardware wrote DD then it will be non-zero + * hardware wrote DD then the length will be non-zero */ - if (!i40e_test_staterr(rx_desc, - BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + if (!size) break; /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. */ dma_rmb(); - skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb); + skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb, size); if (!skb) break; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 1745611695e00..02013d4c8476d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1105,6 +1105,7 @@ add_tail_frag: * i40evf_fetch_rx_buffer - Allocate skb and populate it * @rx_ring: rx descriptor ring to transact packets on * @rx_desc: descriptor containing info written by hardware + * @size: size of buffer to add to skb * * This function allocates an skb on the fly, and populates it with the page * data from the current receive descriptor, taking care to set up the skb @@ -1114,16 +1115,12 @@ add_tail_frag: static inline struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - u64 local_status_error_len = - le64_to_cpu(rx_desc->wb.qword1.status_error_len); - unsigned int size = - (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - DEFINE_DMA_ATTRS(dattrs); struct i40e_rx_buffer *rx_buffer; struct page *page; + DEFINE_DMA_ATTRS(dattrs); rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; page = rx_buffer->page; @@ -1233,6 +1230,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) while (likely(total_rx_packets < budget)) { union i40e_rx_desc *rx_desc; + unsigned int size; u16 vlan_tag; u8 rx_ptype; u64 qword; @@ -1249,19 +1247,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr * which is always zero because packet split isn't used, if the - * hardware wrote DD then it will be non-zero + * hardware wrote DD then the length will be non-zero */ - if (!i40e_test_staterr(rx_desc, - BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + if (!size) break; /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. */ dma_rmb(); - skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb); + skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb, size); if (!skb) break; -- 2.50.1