static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
                                         struct bnxt_rx_ring_info *rxr,
+                                        unsigned int *offset,
                                         gfp_t gfp)
 {
        struct device *dev = &bp->pdev->dev;
        struct page *page;
 
-       page = page_pool_dev_alloc_pages(rxr->page_pool);
+       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+               page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
+                                               BNXT_RX_PAGE_SIZE);
+       } else {
+               page = page_pool_dev_alloc_pages(rxr->page_pool);
+               *offset = 0;
+       }
        if (!page)
                return NULL;
 
-       *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
-                                     DMA_ATTR_WEAK_ORDERING);
+       *mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
+                                     bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
        if (dma_mapping_error(dev, *mapping)) {
                page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
        dma_addr_t mapping;
 
        if (BNXT_RX_PAGE_MODE(bp)) {
+               unsigned int offset;
                struct page *page =
-                       __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+                       __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
 
                if (!page)
                        return -ENOMEM;
 
                mapping += bp->rx_dma_offset;
                rx_buf->data = page;
-               rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+               rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
        } else {
                u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
 
        unsigned int offset = 0;
 
        if (BNXT_RX_PAGE_MODE(bp)) {
-               page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+               page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
 
                if (!page)
                        return -ENOMEM;
                return NULL;
        }
        dma_addr -= bp->rx_dma_offset;
-       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
-                            DMA_ATTR_WEAK_ORDERING);
-       skb = build_skb(page_address(page), PAGE_SIZE);
+       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+                            bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+       skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
        if (!skb) {
                page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
        }
        skb_mark_for_recycle(skb);
-       skb_reserve(skb, bp->rx_dma_offset);
+       skb_reserve(skb, bp->rx_offset);
        __skb_put(skb, len);
 
        return skb;
                return NULL;
        }
        dma_addr -= bp->rx_dma_offset;
-       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
-                            DMA_ATTR_WEAK_ORDERING);
+       dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+                            bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
 
        if (unlikely(!payload))
                payload = eth_get_headlen(bp->dev, data_ptr, len);
 
        skb_mark_for_recycle(skb);
        off = (void *)data_ptr - page_address(page);
-       skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+       skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
        memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
               payload + NET_IP_ALIGN);
 
 
        skb->data_len += total_frag_len;
        skb->len += total_frag_len;
-       skb->truesize += PAGE_SIZE * agg_bufs;
+       skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
        return skb;
 }
 
                rx_buf->data = NULL;
                if (BNXT_RX_PAGE_MODE(bp)) {
                        mapping -= bp->rx_dma_offset;
-                       dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
-                                            bp->rx_dir,
+                       dma_unmap_page_attrs(&pdev->dev, mapping,
+                                            BNXT_RX_PAGE_SIZE, bp->rx_dir,
                                             DMA_ATTR_WEAK_ORDERING);
                        page_pool_recycle_direct(rxr->page_pool, data);
                } else {
        pp.napi = &rxr->bnapi->napi;
        pp.dev = &bp->pdev->dev;
        pp.dma_dir = DMA_BIDIRECTIONAL;
+       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
+               pp.flags |= PP_FLAG_PAGE_FRAG;
 
        rxr->page_pool = page_pool_create(&pp);
        if (IS_ERR(rxr->page_pool)) {