netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
 }
 
-static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
-                            dma_addr_t addr, struct page *page, __be64 *slot_addr)
+static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
+                               struct gve_rx_slot_page_info *page_info,
+                               dma_addr_t addr, struct page *page,
+                               __be64 *slot_addr)
 {
        page_info->page = page;
        page_info->page_offset = 0;
        page_info->page_address = page_address(page);
+       page_info->buf_size = rx->packet_buffer_size;
        *slot_addr = cpu_to_be64(addr);
        /* The page already has 1 ref */
        page_ref_add(page, INT_MAX - 1);
                return err;
        }
 
-       gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+       gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
        return 0;
 }
 
                        struct page *page = rx->data.qpl->pages[i];
                        dma_addr_t addr = i * PAGE_SIZE;
 
-                       gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+                       gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
+                                           page,
                                            &rx->data.data_ring[i].qpl_offset);
                        continue;
                }
                        rx->qpl_copy_pool[j].page = page;
                        rx->qpl_copy_pool[j].page_offset = 0;
                        rx->qpl_copy_pool[j].page_address = page_address(page);
+                       rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
 
                        /* The page already has 1 ref. */
                        page_ref_add(page, INT_MAX - 1);
 
        rx->gve = priv;
        rx->q_num = idx;
+       rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
 
        rx->mask = slots - 1;
        rx->data.raw_addressing = cfg->raw_addressing;
        rx->db_threshold = slots / 2;
        gve_rx_init_ring_state_gqi(rx);
 
-       rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
        gve_rx_ctx_clear(&rx->ctx);
 
        return 0;
        copy_page_info->pad = page_info->pad;
 
        skb = gve_rx_add_frags(napi, copy_page_info,
-                              rx->packet_buffer_size, len, ctx);
+                              copy_page_info->buf_size, len, ctx);
        if (unlikely(!skb))
                return NULL;
 
         * device.
         */
        if (page_info->can_flip) {
-               skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
+               skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
+                                      len, ctx);
                /* No point in recycling if we didn't get the skb */
                if (skb) {
                        /* Make sure that the page isn't freed. */
                        skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
                                                    page_info, len, napi,
                                                    data_slot,
-                                                   rx->packet_buffer_size, ctx);
+                                                   page_info->buf_size, ctx);
                } else {
                        skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
                                         page_info, len, napi, data_slot);
                void *old_data;
                int xdp_act;
 
-               xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+               xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
                xdp_prepare_buff(&xdp, page_info->page_address +
                                 page_info->page_offset, GVE_RX_PAD,
                                 len, false);