enum ixgbevf_ring_state_t {
        __IXGBEVF_RX_3K_BUFFER,
+       __IXGBEVF_RX_BUILD_SKB_ENABLED,
        __IXGBEVF_TX_DETECT_HANG,
        __IXGBEVF_HANG_CHECK_ARMED,
 };
 #define clear_ring_uses_large_buffer(ring) \
        clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
 
+#define ring_uses_build_skb(ring) \
+       test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+#define set_ring_build_skb_enabled(ring) \
+       set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+#define clear_ring_build_skb_enabled(ring) \
+       clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
 static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
 {
 #if (PAGE_SIZE < 8192)
        if (ring_uses_large_buffer(ring))
                return IXGBEVF_RXBUFFER_3072;
+
+       if (ring_uses_build_skb(ring))
+               return IXGBEVF_MAX_FRAME_BUILD_SKB;
 #endif
        return IXGBEVF_RXBUFFER_2048;
 }
 
        return true;
 }
 
+static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
+}
+
 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
                                      struct ixgbevf_rx_buffer *bi)
 {
 
        bi->dma = dma;
        bi->page = page;
-       bi->page_offset = 0;
+       bi->page_offset = ixgbevf_rx_offset(rx_ring);
        bi->pagecnt_bias = 1;
        rx_ring->rx_stats.alloc_rx_page++;
 
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+                               SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
+                               SKB_DATA_ALIGN(size);
 #endif
        unsigned int pull_len;
 
 
        ixgbevf_configure_srrctl(adapter, ring, reg_idx);
 
-       /* allow any size packet since we can handle overflow */
-       rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
+       /* RXDCTL.RLPML does not work on 82599 */
+       if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
+               rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+                           IXGBE_RXDCTL_RLPML_EN);
+
+#if (PAGE_SIZE < 8192)
+               /* Limit the maximum frame size so we don't overrun the skb */
+               if (ring_uses_build_skb(ring) &&
+                   !ring_uses_large_buffer(ring))
+                       rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
+                                 IXGBE_RXDCTL_RLPML_EN;
+#endif
+       }
 
        rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
        IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
        unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* set build_skb and buffer size flags */
+       clear_ring_build_skb_enabled(rx_ring);
        clear_ring_uses_large_buffer(rx_ring);
 
        if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
                return;
 
+       set_ring_build_skb_enabled(rx_ring);
+
 #if (PAGE_SIZE < 8192)
        if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
                return;
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
+       if (netif_running(netdev))
+               ixgbevf_reinit_locked(adapter);
+
        return 0;
 }