/* Supported Rx Buffer Sizes */
 #define IGB_RXBUFFER_256       256
 #define IGB_RXBUFFER_2048      2048
+#define IGB_RXBUFFER_3072      3072
 #define IGB_RX_HDR_LEN         IGB_RXBUFFER_256
 #define IGB_TS_HDR_LEN         16
-#define IGB_RX_BUFSZ           IGB_RXBUFFER_2048
 
 #define IGB_SKB_PAD            (NET_SKB_PAD + NET_IP_ALIGN)
 #if (PAGE_SIZE < 8192)
 };
 
 enum e1000_ring_flags_t {
+       IGB_RING_FLAG_RX_3K_BUFFER,
        IGB_RING_FLAG_RX_SCTP_CSUM,
        IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
        IGB_RING_FLAG_TX_CTX_IDX,
        IGB_RING_FLAG_TX_DETECT_HANG
 };
 
+#define ring_uses_large_buffer(ring) \
+       test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define set_ring_uses_large_buffer(ring) \
+       set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+#define clear_ring_uses_large_buffer(ring) \
+       clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+
+static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+       if (ring_uses_large_buffer(ring))
+               return IGB_RXBUFFER_3072;
+#endif
+       return IGB_RXBUFFER_2048;
+}
+
+static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+       if (ring_uses_large_buffer(ring))
+               return 1;
+#endif
+       return 0;
+}
+
+#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
+
 #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
 
 #define IGB_RX_DESC(R, i)      \
 
                                          16, 1,
                                          page_address(buffer_info->page) +
                                                      buffer_info->page_offset,
-                                         IGB_RX_BUFSZ, true);
+                                         igb_rx_bufsz(rx_ring), true);
                                }
                        }
                }
 
        /* set descriptor configuration */
        srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-       srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+       if (ring_uses_large_buffer(ring))
+               srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+       else
+               srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
        srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
        if (hw->mac.type >= e1000_82580)
                srrctl |= E1000_SRRCTL_TIMESTAMP;
        wr32(E1000_RXDCTL(reg_idx), rxdctl);
 }
 
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+                                 struct igb_ring *rx_ring)
+{
+       /* set build_skb and buffer size flags */
+       clear_ring_uses_large_buffer(rx_ring);
+
+       if (adapter->flags & IGB_FLAG_RX_LEGACY)
+               return;
+
+#if (PAGE_SIZE < 8192)
+       if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
+               return;
+
+       set_ring_uses_large_buffer(rx_ring);
+#endif
+}
+
 /**
  *  igb_configure_rx - Configure receive Unit after Reset
  *  @adapter: board private structure
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring
         */
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct igb_ring *rx_ring = adapter->rx_ring[i];
+
+               igb_set_rx_buffer_len(adapter, rx_ring);
+               igb_configure_rx_ring(adapter, rx_ring);
+       }
 }
 
 /**
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              buffer_info->dma,
                                              buffer_info->page_offset,
-                                             IGB_RX_BUFSZ,
+                                             igb_rx_bufsz(rx_ring),
                                              DMA_FROM_DEVICE);
 
                /* free resources associated with mapping */
                dma_unmap_page_attrs(rx_ring->dev,
                                     buffer_info->dma,
-                                    PAGE_SIZE,
+                                    igb_rx_pg_size(rx_ring),
                                     DMA_FROM_DEVICE,
                                     IGB_RX_DMA_ATTR);
                __page_frag_cache_drain(buffer_info->page,
 
 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
                                  struct page *page,
-                                 unsigned int truesize)
+                                 const unsigned int truesize)
 {
        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
 
                return false;
 
        /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+       rx_buffer->page_offset ^= truesize;
 #else
        /* move offset up to the next cache line */
        rx_buffer->page_offset += truesize;
+#define IGB_LAST_OFFSET \
+       (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
 
-       if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+       if (rx_buffer->page_offset > IGB_LAST_OFFSET)
                return false;
 #endif
 
        struct page *page = rx_buffer->page;
        void *va = page_address(page) + rx_buffer->page_offset;
 #if (PAGE_SIZE < 8192)
-       unsigned int truesize = IGB_RX_BUFSZ;
+       unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
 #else
        unsigned int truesize = SKB_DATA_ALIGN(size);
 #endif
                 * any references we are holding to it
                 */
                dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
-                                    PAGE_SIZE, DMA_FROM_DEVICE,
+                                    igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
                                     IGB_RX_DMA_ATTR);
                __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
        }
                return true;
 
        /* alloc new page for storage */
-       page = dev_alloc_page();
+       page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
        if (unlikely(!page)) {
                rx_ring->rx_stats.alloc_failed++;
                return false;
        }
 
        /* map page for use */
-       dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
-                                DMA_FROM_DEVICE, IGB_RX_DMA_ATTR);
+       dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+                                igb_rx_pg_size(rx_ring),
+                                DMA_FROM_DEVICE,
+                                IGB_RX_DMA_ATTR);
 
        /* if mapping failed free memory back to system since
         * there isn't much point in holding memory we can't use
         */
        if (dma_mapping_error(rx_ring->dev, dma)) {
-               __free_page(page);
+               __free_pages(page, igb_rx_pg_order(rx_ring));
 
                rx_ring->rx_stats.alloc_failed++;
                return false;
        union e1000_adv_rx_desc *rx_desc;
        struct igb_rx_buffer *bi;
        u16 i = rx_ring->next_to_use;
+       u16 bufsz;
 
        /* nothing to do */
        if (!cleaned_count)
        bi = &rx_ring->rx_buffer_info[i];
        i -= rx_ring->count;
 
+       bufsz = igb_rx_bufsz(rx_ring);
+
        do {
                if (!igb_alloc_mapped_page(rx_ring, bi))
                        break;
 
                /* sync the buffer for use by the device */
                dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
-                                                bi->page_offset,
-                                                IGB_RX_BUFSZ,
+                                                bi->page_offset, bufsz,
                                                 DMA_FROM_DEVICE);
 
                /* Refresh the desc even if buffer_addrs didn't change