*/
 static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
 {
-       return ice_is_xdp_ena_vsi(rx_ring->vsi) ? XDP_PACKET_HEADROOM : 0;
+       if (ice_ring_uses_build_skb(rx_ring))
+               return ICE_SKB_PAD;
+       else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+               return XDP_PACKET_HEADROOM;
+
+       return 0;
 }
 
 /**
                struct sk_buff *skb, unsigned int size)
 {
 #if (PAGE_SIZE >= 8192)
-       unsigned int truesize = SKB_DATA_ALIGN(size);
+       unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
 #else
        unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 #endif
 
                xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
                rcu_read_unlock();
-               if (xdp_res) {
-                       if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
-                               unsigned int truesize;
+               if (!xdp_res)
+                       goto construct_skb;
+               if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+                       unsigned int truesize;
 
 #if (PAGE_SIZE < 8192)
-                               truesize = ice_rx_pg_size(rx_ring) / 2;
+                       truesize = ice_rx_pg_size(rx_ring) / 2;
 #else
-                               truesize = SKB_DATA_ALIGN(size);
+                       truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
+                                                 size);
 #endif
-                               xdp_xmit |= xdp_res;
-                               ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
-                       } else {
-                               rx_buf->pagecnt_bias++;
-                       }
-                       total_rx_bytes += size;
-                       total_rx_pkts++;
-
-                       cleaned_count++;
-                       ice_put_rx_buf(rx_ring, rx_buf);
-                       continue;
+                       xdp_xmit |= xdp_res;
+                       ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+               } else {
+                       rx_buf->pagecnt_bias++;
                }
+               total_rx_bytes += size;
+               total_rx_pkts++;
+
+               cleaned_count++;
+               ice_put_rx_buf(rx_ring, rx_buf);
+               continue;
 construct_skb:
                if (skb)
                        ice_add_rx_frag(rx_ring, rx_buf, skb, size);
 
 #define ICE_RX_BUF_WRITE       16      /* Must be power of 2 */
 #define ICE_MAX_TXQ_PER_TXQG   128
 
+/* Attempt to maximize the headroom available for incoming frames. We use a 2K
+ * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
+ * This leaves us with 512 bytes of room.  From that we need to deduct the
+ * space needed for the shared info and the padding needed to IP align the
+ * frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ *       up negative.  In these cases we should fall back to the legacy
+ *       receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define ICE_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
+
+/**
+ * ice_compute_pad - compute the padding
+ * rx_buf_len: buffer length
+ *
+ * Figure out the size of half page based on given buffer length and
+ * then subtract the skb_shared_info followed by subtraction of the
+ * actual buffer length; this in turn results in the actual space that
+ * is left for padding usage
+ */
+static inline int ice_compute_pad(int rx_buf_len)
+{
+       int half_page_size;
+
+       half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+       return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
+}
+
+/**
+ * ice_skb_pad - determine the padding that we can supply
+ *
+ * Figure out the right Rx buffer size and based on that calculate the
+ * padding
+ */
+static inline int ice_skb_pad(void)
+{
+       int rx_buf_len;
+
+       /* If a 2K buffer cannot handle a standard Ethernet frame then
+        * optimize padding for a 3K buffer instead of a 1.5K buffer.
+        *
+        * For a 3K buffer we need to add enough padding to allow for
+        * tailroom due to NET_IP_ALIGN possibly shifting us out of
+        * cache-line alignment.
+        */
+       if (ICE_2K_TOO_SMALL_WITH_PADDING)
+               rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+       else
+               rx_buf_len = ICE_RXBUF_1536;
+
+       /* if needed make room for NET_IP_ALIGN */
+       rx_buf_len -= NET_IP_ALIGN;
+
+       return ice_compute_pad(rx_buf_len);
+}
+
+#define ICE_SKB_PAD ice_skb_pad()
+#else
+#define ICE_2K_TOO_SMALL_WITH_PADDING false
+#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
 /* We are assuming that the cache line is always 64 Bytes here for ice.
  * In order to make sure that is a correct assumption there is a check in probe
  * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
         * in their own cache line if possible
         */
 #define ICE_TX_FLAGS_RING_XDP          BIT(0)
+#define ICE_RX_FLAGS_RING_BUILD_SKB    BIT(1)
        u8 flags;
        dma_addr_t dma;                 /* physical address of ring */
        unsigned int size;              /* length of descriptor ring in bytes */
        u8 dcb_tc;                      /* Traffic class of ring */
 } ____cacheline_internodealigned_in_smp;
 
+static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+{
+       return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
+}
+
+static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+{
+       ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
+static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+{
+       ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
+}
+
 static inline bool ice_ring_is_xdp(struct ice_ring *ring)
 {
        return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);