This patch adds support for DMA_ATTR_SKIP_CPU_SYNC and
DMA_ATTR_WEAK_ORDERING. By enabling both of these for the Rx path we
are able to see performance improvements on architectures that implement
either one due to the fact that page mapping and unmapping only has to
sync what is actually being used instead of the entire buffer. In addition
by enabling the weak ordering attribute enables a performance improvement
for architectures that can associate a memory ordering with a DMA buffer
such as Sparc.
Change-ID: If176824e8231c5b24b8a5d55b339a6026738fc75
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Orabug:
26396243
Local modifications to account for dma_attr data type difference.
(cherry picked from commit
59605bc09630c2b577858c371edf89c099b5f925)
Signed-off-by: Jack Vogel <jack.vogel@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
**/
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
+ DEFINE_DMA_ATTRS(dattrs);
u16 i;
/* ring already cleared, nothing to do */
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ I40E_DMA_ATTRS(&dattrs);
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ &dattrs);
__free_pages(rx_bi->page, 0);
rx_bi->page = NULL;
struct i40e_rx_buffer *bi)
{
struct page *page = bi->page;
+ DEFINE_DMA_ATTRS(dattrs);
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ I40E_DMA_ATTRS(&dattrs);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ &dattrs);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
unsigned int size =
(local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ DEFINE_DMA_ATTRS(dattrs);
struct i40e_rx_buffer *rx_buffer;
struct page *page;
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ I40E_DMA_ATTRS(&dattrs);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, &dattrs);
}
/* clear contents of buffer_info */
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#define I40E_DMA_ATTRS(x) dma_set_attr(I40E_RX_DMA_ATTR, x)
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
**/
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
+ DEFINE_DMA_ATTRS(dattrs);
u16 i;
/* ring already cleared, nothing to do */
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ I40E_DMA_ATTRS(&dattrs);
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ &dattrs);
__free_pages(rx_bi->page, 0);
rx_bi->page = NULL;
struct i40e_rx_buffer *bi)
{
struct page *page = bi->page;
+ DEFINE_DMA_ATTRS(dattrs);
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ I40E_DMA_ATTRS(&dattrs);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE,
+ &dattrs);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
unsigned int size =
(local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ DEFINE_DMA_ATTRS(dattrs);
struct i40e_rx_buffer *rx_buffer;
struct page *page;
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ I40E_DMA_ATTRS(&dattrs);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, &dattrs);
}
/* clear contents of buffer_info */
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#define I40E_DMA_ATTRS(x) dma_set_attr(I40E_RX_DMA_ATTR, x)
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)