struct mana_port_context *apc = netdev_priv(ndev);
        struct bpf_prog *old_prog;
        struct gdma_context *gc;
+       int err;
 
        gc = apc->ac->gdma_dev->gdma_context;
 
         */
        apc->bpf_prog = prog;
 
-       if (old_prog)
-               bpf_prog_put(old_prog);
+       if (apc->port_is_up) {
+               /* Re-create rxq's after xdp prog was loaded or unloaded.
+                * Ex: re create rxq's to switch from full pages to smaller
+                * size page fragments when xdp prog is unloaded and
+                * vice-versa.
+                */
+
+               /* Pre-allocate buffers to prevent failure in mana_attach */
+               err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+               if (err) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "XDP: Insufficient memory for tx/rx re-config");
+                       return err;
+               }
+
+               err = mana_detach(ndev, false);
+               if (err) {
+                       netdev_err(ndev,
+                                  "mana_detach failed at xdp set: %d\n", err);
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "XDP: Re-config failed at detach");
+                       goto err_dealloc_rxbuffs;
+               }
+
+               err = mana_attach(ndev);
+               if (err) {
+                       netdev_err(ndev,
+                                  "mana_attach failed at xdp set: %d\n", err);
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "XDP: Re-config failed at attach");
+                       goto err_dealloc_rxbuffs;
+               }
 
-       if (apc->port_is_up)
                mana_chn_setxdp(apc, prog);
+               mana_pre_dealloc_rxbufs(apc);
+       }
+
+       if (old_prog)
+               bpf_prog_put(old_prog);
 
        if (prog)
                ndev->max_mtu = MANA_XDP_MTU_MAX;
                ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
 
        return 0;
+
+err_dealloc_rxbuffs:
+       apc->bpf_prog = old_prog;
+       mana_pre_dealloc_rxbufs(apc);
+       return err;
 }
 
 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
 
                return true;
 }
 
+static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
+                            bool from_pool)
+{
+       if (from_pool)
+               page_pool_put_full_page(rxq->page_pool, page, false);
+       else
+               put_page(page);
+}
+
 /* Microsoft Azure Network Adapter (MANA) functions */
 
 static int mana_open(struct net_device *ndev)
 }
 
 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
-static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
-                              u32 *headroom)
+static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
+                              int mtu, u32 *datasize, u32 *alloc_size,
+                              u32 *headroom, u32 *frag_count)
 {
-       if (mtu > MANA_XDP_MTU_MAX)
-               *headroom = 0; /* no support for XDP */
-       else
-               *headroom = XDP_PACKET_HEADROOM;
+       u32 len, buf_size;
 
-       *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+       /* Calculate datasize first (consistent across all cases) */
+       *datasize = mtu + ETH_HLEN;
 
-       /* Using page pool in this case, so alloc_size is PAGE_SIZE */
-       if (*alloc_size < PAGE_SIZE)
-               *alloc_size = PAGE_SIZE;
+       /* For xdp and jumbo frames make sure only one packet fits per page */
+       if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
+               if (mana_xdp_get(apc)) {
+                       *headroom = XDP_PACKET_HEADROOM;
+                       *alloc_size = PAGE_SIZE;
+               } else {
+                       *headroom = 0; /* no support for XDP */
+                       *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD +
+                                                    *headroom);
+               }
 
-       *datasize = mtu + ETH_HLEN;
+               *frag_count = 1;
+               return;
+       }
+
+       /* Standard MTU case - optimize for multiple packets per page */
+       *headroom = 0;
+
+       /* Calculate base buffer size needed */
+       len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+       buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT);
+
+       /* Calculate how many packets can fit in a page */
+       *frag_count = PAGE_SIZE / buf_size;
+       *alloc_size = buf_size;
 }
 
 int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
        void *va;
        int i;
 
-       mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
-                          &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
+       mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize,
+                          &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom,
+                          &mpc->rxbpre_frag_count);
 
        dev = mpc->ac->gdma_dev->gdma_context->dev;
 
 
 drop:
        if (from_pool) {
-               page_pool_recycle_direct(rxq->page_pool,
-                                        virt_to_head_page(buf_va));
+               if (rxq->frag_count == 1)
+                       page_pool_recycle_direct(rxq->page_pool,
+                                                virt_to_head_page(buf_va));
+               else
+                       page_pool_free_va(rxq->page_pool, buf_va, true);
        } else {
                WARN_ON_ONCE(rxq->xdp_save_va);
                /* Save for reuse */
                             dma_addr_t *da, bool *from_pool)
 {
        struct page *page;
+       u32 offset;
        void *va;
-
        *from_pool = false;
 
-       /* Reuse XDP dropped page if available */
-       if (rxq->xdp_save_va) {
-               va = rxq->xdp_save_va;
-               rxq->xdp_save_va = NULL;
-       } else {
-               page = page_pool_dev_alloc_pages(rxq->page_pool);
-               if (!page)
+       /* Don't use fragments for jumbo frames or XDP where it's 1 fragment
+        * per page.
+        */
+       if (rxq->frag_count == 1) {
+               /* Reuse XDP dropped page if available */
+               if (rxq->xdp_save_va) {
+                       va = rxq->xdp_save_va;
+                       page = virt_to_head_page(va);
+                       rxq->xdp_save_va = NULL;
+               } else {
+                       page = page_pool_dev_alloc_pages(rxq->page_pool);
+                       if (!page)
+                               return NULL;
+
+                       *from_pool = true;
+                       va = page_to_virt(page);
+               }
+
+               *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
+                                    DMA_FROM_DEVICE);
+               if (dma_mapping_error(dev, *da)) {
+                       mana_put_rx_page(rxq, page, *from_pool);
                        return NULL;
+               }
 
-               *from_pool = true;
-               va = page_to_virt(page);
+               return va;
        }
 
-       *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
-                            DMA_FROM_DEVICE);
-       if (dma_mapping_error(dev, *da)) {
-               if (*from_pool)
-                       page_pool_put_full_page(rxq->page_pool, page, false);
-               else
-                       put_page(virt_to_head_page(va));
-
+       page =  page_pool_dev_alloc_frag(rxq->page_pool, &offset,
+                                        rxq->alloc_size);
+       if (!page)
                return NULL;
-       }
+
+       va  = page_to_virt(page) + offset;
+       *da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
+       *from_pool = true;
 
        return va;
 }
        va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
        if (!va)
                return;
-
-       dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
-                        DMA_FROM_DEVICE);
+       if (!rxoob->from_pool || rxq->frag_count == 1)
+               dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
+                                DMA_FROM_DEVICE);
        *old_buf = rxoob->buf_va;
        *old_fp = rxoob->from_pool;
 
                if (!rx_oob->buf_va)
                        continue;
 
-               dma_unmap_single(dev, rx_oob->sgl[0].address,
-                                rx_oob->sgl[0].size, DMA_FROM_DEVICE);
-
                page = virt_to_head_page(rx_oob->buf_va);
 
-               if (rx_oob->from_pool)
-                       page_pool_put_full_page(rxq->page_pool, page, false);
-               else
-                       put_page(page);
+               if (rxq->frag_count == 1 || !rx_oob->from_pool) {
+                       dma_unmap_single(dev, rx_oob->sgl[0].address,
+                                        rx_oob->sgl[0].size, DMA_FROM_DEVICE);
+                       mana_put_rx_page(rxq, page, rx_oob->from_pool);
+               } else {
+                       page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
+               }
 
                rx_oob->buf_va = NULL;
        }
        struct page_pool_params pprm = {};
        int ret;
 
-       pprm.pool_size = mpc->rx_queue_size;
+       pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
        pprm.nid = gc->numa_node;
        pprm.napi = &rxq->rx_cq.napi;
        pprm.netdev = rxq->ndev;
        pprm.order = get_order(rxq->alloc_size);
+       pprm.queue_idx = rxq->rxq_idx;
+       pprm.dev = gc->dev;
+
+       /* Let the page pool do the dma map when page sharing with multiple
+        * fragments enabled for rx buffers.
+        */
+       if (rxq->frag_count > 1) {
+               pprm.flags =  PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+               pprm.max_len = PAGE_SIZE;
+               pprm.dma_dir = DMA_FROM_DEVICE;
+       }
 
        rxq->page_pool = page_pool_create(&pprm);
 
        rxq->rxq_idx = rxq_idx;
        rxq->rxobj = INVALID_MANA_HANDLE;
 
-       mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
-                          &rxq->headroom);
-
+       mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
+                          &rxq->headroom, &rxq->frag_count);
        /* Create page pool for RX queue */
        err = mana_create_page_pool(rxq, gc);
        if (err) {