struct bnxt_tx_ring_info *txr;
        struct bnxt_sw_tx_bd *tx_buf;
        __le32 lflags = 0;
+       skb_frag_t *frag;
 
        i = skb_get_queue_mapping(skb);
        if (unlikely(i >= bp->tx_nr_rings)) {
                lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
 
        if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
-           !lflags) {
+           skb_frags_readable(skb) && !lflags) {
                struct tx_push_buffer *tx_push_buf = txr->tx_push;
                struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
                struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
                skb_copy_from_linear_data(skb, pdata, len);
                pdata += len;
                for (j = 0; j < last_frag; j++) {
-                       skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
                        void *fptr;
 
+                       frag = &skb_shinfo(skb)->frags[j];
                        fptr = skb_frag_address_safe(frag);
                        if (!fptr)
                                goto normal_tx;
                        cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
        txbd0 = txbd;
        for (i = 0; i < last_frag; i++) {
-               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+               frag = &skb_shinfo(skb)->frags[i];
                prod = NEXT_TX(prod);
                txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
 
                        goto tx_dma_error;
 
                tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
-               dma_unmap_addr_set(tx_buf, mapping, mapping);
+               netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+                                         mapping, mapping);
 
                txbd->tx_bd_haddr = cpu_to_le64(mapping);
 
        for (i = 0; i < last_frag; i++) {
                prod = NEXT_TX(prod);
                tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
-               dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
-                              skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                              DMA_TO_DEVICE);
+               frag = &skb_shinfo(skb)->frags[i];
+               netmem_dma_unmap_page_attrs(&pdev->dev,
+                                           dma_unmap_addr(tx_buf, mapping),
+                                           skb_frag_size(frag),
+                                           DMA_TO_DEVICE, 0);
        }
 
 tx_free:
        u16 hw_cons = txr->tx_hw_cons;
        unsigned int tx_bytes = 0;
        u16 cons = txr->tx_cons;
+       skb_frag_t *frag;
        int tx_pkts = 0;
        bool rc = false;
 
                last = tx_buf->nr_frags;
 
                for (j = 0; j < last; j++) {
+                       frag = &skb_shinfo(skb)->frags[j];
                        cons = NEXT_TX(cons);
                        tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
-                       dma_unmap_page(
-                               &pdev->dev,
-                               dma_unmap_addr(tx_buf, mapping),
-                               skb_frag_size(&skb_shinfo(skb)->frags[j]),
-                               DMA_TO_DEVICE);
+                       netmem_dma_unmap_page_attrs(&pdev->dev,
+                                                   dma_unmap_addr(tx_buf,
+                                                                  mapping),
+                                                   skb_frag_size(frag),
+                                                   DMA_TO_DEVICE, 0);
                }
                if (unlikely(is_ts_pkt)) {
                        if (BNXT_CHIP_P5(bp)) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
 
                        tx_buf = &txr->tx_buf_ring[ring_idx];
-                       dma_unmap_page(&pdev->dev,
-                                      dma_unmap_addr(tx_buf, mapping),
-                                      skb_frag_size(frag), DMA_TO_DEVICE);
+                       netmem_dma_unmap_page_attrs(&pdev->dev,
+                                                   dma_unmap_addr(tx_buf,
+                                                                  mapping),
+                                                   skb_frag_size(frag),
+                                                   DMA_TO_DEVICE, 0);
                }
                dev_kfree_skb(skb);
        }
        if (BNXT_SUPPORTS_QUEUE_API(bp))
                dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
        dev->request_ops_lock = true;
+       dev->netmem_tx = true;
 
        rc = register_netdev(dev);
        if (rc)