From deeb1d02cc5c5a6321891d2f31d7088c47227815 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 30 Jan 2017 12:29:35 -0800 Subject: [PATCH] i40e/i40evf: Add support for mapping pages with DMA attributes This patch adds support for DMA_ATTR_SKIP_CPU_SYNC and DMA_ATTR_WEAK_ORDERING. By enabling both of these for the Rx path we are able to see performance improvements on architectures that implement either one due to the fact that page mapping and unmapping only has to sync what is actually being used instead of the entire buffer. In addition by enabling the weak ordering attribute enables a performance improvement for architectures that can associate a memory ordering with a DMA buffer such as Sparc. Change-ID: If176824e8231c5b24b8a5d55b339a6026738fc75 Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Orabug: 26396243 Local modifications to account for dma_attr data type difference. (cherry picked from commit 59605bc09630c2b577858c371edf89c099b5f925) Signed-off-by: Jack Vogel Reviewed-by: Shannon Nelson --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 37 ++++++++++++++++--- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 4 ++ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 37 ++++++++++++++++--- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 4 ++ 4 files changed, 72 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index db5f90561142..1db8e00945a9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1120,8 +1120,8 @@ err: **/ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) { - struct device *dev = rx_ring->dev; unsigned long bi_size; + DEFINE_DMA_ATTRS(dattrs); u16 i; /* ring already cleared, nothing to do */ @@ -1140,7 +1140,21 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_bi->page) continue; - dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + rx_bi->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + I40E_DMA_ATTRS(&dattrs); + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + PAGE_SIZE, + DMA_FROM_DEVICE, + &dattrs); __free_pages(rx_bi->page, 0); rx_bi->page = NULL; @@ -1253,6 +1267,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, struct i40e_rx_buffer *bi) { struct page *page = bi->page; + DEFINE_DMA_ATTRS(dattrs); dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ @@ -1269,7 +1284,11 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + I40E_DMA_ATTRS(&dattrs); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE, + &dattrs); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use @@ -1329,6 +1348,12 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) if (!i40e_alloc_mapped_page(rx_ring, bi)) goto no_buffers; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ @@ -1743,6 +1768,7 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, unsigned int size = (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + DEFINE_DMA_ATTRS(dattrs); struct i40e_rx_buffer *rx_buffer; struct page *page; @@ -1789,8 +1815,9 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, - DMA_FROM_DEVICE); + I40E_DMA_ATTRS(&dattrs); + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, + DMA_FROM_DEVICE, &dattrs); } /* clear contents of buffer_info */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index f80979025c01..a3511bd822c1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -133,6 +133,10 @@ enum i40e_dyn_idx_t { #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define I40E_DMA_ATTRS(x) dma_set_attr(I40E_RX_DMA_ATTR, x) + /** * i40e_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 790fdfe42f01..1745611695e0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -483,8 +483,8 @@ err: **/ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) { - struct device *dev = rx_ring->dev; unsigned long bi_size; + DEFINE_DMA_ATTRS(dattrs); u16 i; /* ring already cleared, nothing to do */ @@ -503,7 +503,21 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_bi->page) continue; - dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + rx_bi->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + I40E_DMA_ATTRS(&dattrs); + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + PAGE_SIZE, + DMA_FROM_DEVICE, + &dattrs); __free_pages(rx_bi->page, 0); rx_bi->page = NULL; @@ -616,6 +630,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, struct i40e_rx_buffer *bi) { struct page *page = bi->page; + DEFINE_DMA_ATTRS(dattrs); dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ @@ -632,7 +647,11 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + I40E_DMA_ATTRS(&dattrs); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE, + &dattrs); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use @@ -692,6 +711,12 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) if (!i40e_alloc_mapped_page(rx_ring, bi)) goto no_buffers; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ @@ -1096,6 +1121,7 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, unsigned int size = (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + DEFINE_DMA_ATTRS(dattrs); struct i40e_rx_buffer *rx_buffer; struct page *page; @@ -1142,8 +1168,9 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, - DMA_FROM_DEVICE); + I40E_DMA_ATTRS(&dattrs); + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, + DMA_FROM_DEVICE, &dattrs); } /* clear contents of buffer_info */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 17c84930f216..8ad98fb708de 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -120,6 +120,10 @@ enum i40e_dyn_idx_t { #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define I40E_DMA_ATTRS(x) dma_set_attr(I40E_RX_DMA_ATTR, x) + /** * i40e_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) -- 2.50.1