pp.dma_dir = bp->rx_dir;
        pp.max_len = PAGE_SIZE;
        pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
-       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
-               pp.flags |= PP_FLAG_PAGE_FRAG;
 
        rxr->page_pool = page_pool_create(&pp);
        if (IS_ERR(rxr->page_pool)) {
 
 static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
 {
        struct page_pool_params pp_params = {
-               .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
-                               PP_FLAG_DMA_SYNC_DEV,
+               .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
                .order = hns3_page_order(ring),
                .pool_size = ring->desc_num * hns3_buf_size(ring) /
                                (PAGE_SIZE << hns3_page_order(ring)),
 
                .offset         = 0,
        };
 
-       if (rxbufq->rx_buf_size == IDPF_RX_BUF_2048)
-               pp.flags |= PP_FLAG_PAGE_FRAG;
-
        return page_pool_create(&pp);
 }
 
 
        }
 
        pp_params.order = get_order(buf_size);
-       pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+       pp_params.flags = PP_FLAG_DMA_MAP;
        pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
        pp_params.nid = NUMA_NO_NODE;
        pp_params.dev = pfvf->dev;
 
                struct page_pool_params pp_params = { 0 };
 
                pp_params.order     = 0;
-               pp_params.flags     = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
+               pp_params.flags     = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
                pp_params.pool_size = pool_size;
                pp_params.nid       = node;
                pp_params.dev       = rq->pdev;
 
 {
        struct page_pool_params pp_params = {
                .order = 0,
-               .flags = PP_FLAG_PAGE_FRAG,
+               .flags = 0,
                .nid = NUMA_NO_NODE,
                .dev = dev->dma_dev,
        };
 
                                        * Please note DMA-sync-for-CPU is still
                                        * device driver responsibility
                                        */
-#define PP_FLAG_PAGE_FRAG      BIT(2) /* for page frag feature */
 #define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
-                                PP_FLAG_DMA_SYNC_DEV |\
-                                PP_FLAG_PAGE_FRAG)
+                                PP_FLAG_DMA_SYNC_DEV)
 
 /*
  * Fast allocation side cache array/stack
 
 /**
  * struct page_pool_params - page pool parameters
- * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
+ * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
  * @order:     2^order pages on allocation
  * @pool_size: size of the ptr_ring
  * @nid:       NUMA node id to allocate from pages from
 
        unsigned int max_size = PAGE_SIZE << pool->p.order;
        struct page *page = pool->frag_page;
 
-       if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
-                   size > max_size))
+       if (WARN_ON(size > max_size))
                return NULL;
 
        size = ALIGN(size, dma_get_cache_alignment());
 
        /* In general, avoid mixing page_pool and non-page_pool allocated
         * pages within the same SKB. Additionally avoid dealing with clones
         * with page_pool pages, in case the SKB is using page_pool fragment
-        * references (PP_FLAG_PAGE_FRAG). Since we only take full page
+        * references (page_pool_alloc_frag()). Since we only take full page
         * references for cloned SKBs at the moment that would result in
         * inconsistent reference counts.
         * In theory we could take full references if @from is cloned and