]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
ixgbe: Make use of order 1 pages and 3K buffers independent of FCoE
authorAlexander Duyck <alexander.h.duyck@intel.com>
Tue, 17 Jan 2017 16:36:14 +0000 (08:36 -0800)
committerJack Vogel <jack.vogel@oracle.com>
Fri, 16 Jun 2017 06:01:23 +0000 (23:01 -0700)
In order to support build_skb with jumbo frames it will be necessary to use
3K buffers for the Rx path with 8K pages backing them.  This is needed on
architectures that implement 4K pages because we can't support 2K buffers
plus padding in a 4K page.

In the case of systems that support page sizes larger than 4K the 3K
attribute will only be applied to FCoE as we can fall back to using just 2K
buffers and adding the padding.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Orabug:26242766
(cherry picked from commit 4f4542bfb3b539bef118578ffafcc98e4ce91979)
Signed-off-by: Jack Vogel <jack.vogel@oracle.com>
Reviewed-by: Ethan Zhao <ethan.zhao@oracle.com>
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

index 8ca3520db99c0d21ded7ae36928c7195f8f9ad68..86b373bd2660c7527f165af95e5bcd29a50e785e 100644 (file)
@@ -218,13 +218,14 @@ struct ixgbe_rx_queue_stats {
 #define IXGBE_TS_HDR_LEN 8
 
 enum ixgbe_ring_state_t {
+       __IXGBE_RX_3K_BUFFER,
+       __IXGBE_RX_RSC_ENABLED,
+       __IXGBE_RX_CSUM_UDP_ZERO_ERR,
+       __IXGBE_RX_FCOE,
        __IXGBE_TX_FDIR_INIT_DONE,
        __IXGBE_TX_XPS_INIT_DONE,
        __IXGBE_TX_DETECT_HANG,
        __IXGBE_HANG_CHECK_ARMED,
-       __IXGBE_RX_RSC_ENABLED,
-       __IXGBE_RX_CSUM_UDP_ZERO_ERR,
-       __IXGBE_RX_FCOE,
 };
 
 struct ixgbe_fwd_adapter {
@@ -336,19 +337,16 @@ struct ixgbe_ring_feature {
  */
 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 {
-#ifdef IXGBE_FCOE
-       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
-               return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
-                                           IXGBE_RXBUFFER_3K;
-#endif
+       if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+               return IXGBE_RXBUFFER_3K;
        return IXGBE_RXBUFFER_2K;
 }
 
 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 {
-#ifdef IXGBE_FCOE
-       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
-               return (PAGE_SIZE < 8192) ? 1 : 0;
+#if (PAGE_SIZE < 8192)
+       if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+               return 1;
 #endif
        return 0;
 }
index 47aa1e7d7348702153ea8b6a82159f17de7d1313..9ffd0d227a2d258e07b89728ede3c73f7cb8d13f 100644 (file)
@@ -1607,6 +1607,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
        u16 i = rx_ring->next_to_use;
+       u16 bufsz;
 
        /* nothing to do */
        if (!cleaned_count)
@@ -1616,10 +1617,19 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
        bi = &rx_ring->rx_buffer_info[i];
        i -= rx_ring->count;
 
+       bufsz = ixgbe_rx_bufsz(rx_ring);
+
        do {
                if (!ixgbe_alloc_mapped_page(rx_ring, bi))
                        break;
 
+#if 1
+               /* sync the buffer for use by the device */
+               dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+                                                bi->page_offset, bufsz,
+                                                DMA_FROM_DEVICE);
+
+#endif
                /*
                 * Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
@@ -1965,7 +1975,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
 {
        struct page *page = rx_buffer->page;
 #if (PAGE_SIZE < 8192)
-       unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+       unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
 #else
        unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
        unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
@@ -3849,10 +3859,15 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
         */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                rx_ring = adapter->rx_ring[i];
+
+               clear_ring_rsc_enabled(rx_ring);
+               clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                        set_ring_rsc_enabled(rx_ring);
-               else
-                       clear_ring_rsc_enabled(rx_ring);
+
+               if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
+                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
        }
 }