Future use cases will have a different padding value.
Signed-off-by: Bailey Forrest <bcf@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
                        gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
                }
        } else {
-               skb = gve_rx_copy(netdev, napi, page_info, len);
+               skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD);
                if (skb) {
                        u64_stats_update_begin(&rx->statss);
                        rx->rx_copied_pkt++;
 
        if (len <= priv->rx_copybreak) {
                /* Just copy small packets */
-               skb = gve_rx_copy(dev, napi, page_info, len);
+               skb = gve_rx_copy(dev, napi, page_info, len, GVE_RX_PAD);
                u64_stats_update_begin(&rx->statss);
                rx->rx_copied_pkt++;
                rx->rx_copybreak_pkt++;
 
 }
 
 struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
-                           struct gve_rx_slot_page_info *page_info, u16 len)
+                           struct gve_rx_slot_page_info *page_info, u16 len,
+                           u16 pad)
 {
        struct sk_buff *skb = napi_alloc_skb(napi, len);
-       void *va = page_info->page_address + GVE_RX_PAD +
+       void *va = page_info->page_address + pad +
                   (page_info->page_offset ? PAGE_SIZE / 2 : 0);
 
        if (unlikely(!skb))
 
 void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
 
 struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
-                           struct gve_rx_slot_page_info *page_info, u16 len);
+                           struct gve_rx_slot_page_info *page_info, u16 len,
+                           u16 pad);
 
 #endif /* _GVE_UTILS_H */