]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
ixgbe: Add support for padding packet
authorAlexander Duyck <alexander.h.duyck@intel.com>
Tue, 17 Jan 2017 16:36:54 +0000 (08:36 -0800)
committerJack Vogel <jack.vogel@oracle.com>
Fri, 16 Jun 2017 06:01:23 +0000 (23:01 -0700)
This patch adds support for providing a buffer with headroom and tailroom
to allow for shared info, NET_SKB_PAD, and NET_IP_ALIGN.  With this
combined with the DMA changes we can start using build_skb to build frames
around an incoming Rx buffer instead of having to memcpy the headers.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Orabug: 26242766
(cherry picked from commit 2de6aa3a666e63699978f81d0d5523e7e0778f7b)
Signed-off-by: Jack Vogel <jack.vogel@oracle.com>
Reviewed-by: Ethan Zhao <ethan.zhao@oracle.com>
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

index 86b373bd2660c7527f165af95e5bcd29a50e785e..d452dbef4df5e44e3482fb851516f641f7d96d9c 100644 (file)
 #define IXGBE_RXBUFFER_4K    4096
 #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
 
+#define IXGBE_SKB_PAD          (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IXGBE_MAX_FRAME_BUILD_SKB \
+       (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
+#else
+#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
+#endif
+
 /*
  * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
  * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
@@ -219,6 +227,7 @@ struct ixgbe_rx_queue_stats {
 
 enum ixgbe_ring_state_t {
        __IXGBE_RX_3K_BUFFER,
+       __IXGBE_RX_BUILD_SKB_ENABLED,
        __IXGBE_RX_RSC_ENABLED,
        __IXGBE_RX_CSUM_UDP_ZERO_ERR,
        __IXGBE_RX_FCOE,
@@ -228,6 +237,9 @@ enum ixgbe_ring_state_t {
        __IXGBE_HANG_CHECK_ARMED,
 };
 
+#define ring_uses_build_skb(ring) \
+       test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
 struct ixgbe_fwd_adapter {
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        struct net_device *netdev;
@@ -339,6 +351,10 @@ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 {
        if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
                return IXGBE_RXBUFFER_3K;
+#if (PAGE_SIZE < 8192)
+       if (ring_uses_build_skb(ring))
+               return IXGBE_MAX_FRAME_BUILD_SKB;
+#endif
        return IXGBE_RXBUFFER_2K;
 }
 
@@ -536,6 +552,7 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_VLAN_PROMISC               BIT(13)
 #define IXGBE_FLAG2_EEE_CAPABLE                        BIT(14)
 #define IXGBE_FLAG2_EEE_ENABLED                        BIT(15)
+#define IXGBE_FLAG2_RX_LEGACY                  BIT(16)
 
        /* Tx fast path data */
        int num_tx_queues;
index 4fd826cec779ca9a78c17afcb9c4eb13e3410c8d..4bbd7f5cd65273e1af683a2a5205d630b84be7a0 100644 (file)
@@ -1558,6 +1558,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
        }
 }
 
+static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
+}
+
 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
                                    struct ixgbe_rx_buffer *bi)
 {
@@ -1592,7 +1597,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
 
        bi->dma = dma;
        bi->page = page;
-       bi->page_offset = 0;
+       bi->page_offset = ixgbe_rx_offset(rx_ring);
 
        return true;
 }
@@ -3388,7 +3393,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
        /* configure the packet buffer length */
-       srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+               srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       else
+               srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 
        /* configure descriptor type */
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -3722,6 +3730,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                 */
                rxdctl &= ~0x3FFFFF;
                rxdctl |=  0x080420;
+#if (PAGE_SIZE < 8192)
+       } else {
+               rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+                           IXGBE_RXDCTL_RLPML_EN);
+
+               /* Limit the maximum frame size so we don't overrun the skb */
+               if (ring_uses_build_skb(ring) &&
+                   !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+                       rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
+                                 IXGBE_RXDCTL_RLPML_EN;
+#endif
        }
 
        /* initialize Rx descriptor 0 */
@@ -3867,12 +3886,26 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
 
                clear_ring_rsc_enabled(rx_ring);
                clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+               clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
 
                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                        set_ring_rsc_enabled(rx_ring);
 
                if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
                        set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+               if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+                       continue;
+
+               set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#if (PAGE_SIZE < 8192)
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+               if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))
+                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+#endif
        }
 }