fec16_to_cpu(bdp->cbd_sc),
                        fec32_to_cpu(bdp->cbd_bufaddr),
                        fec16_to_cpu(bdp->cbd_datlen),
-                       txq->tx_skbuff[index]);
+                       txq->tx_buf[index].skb);
                bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
                index++;
        } while (bdp != txq->bd.base);
 
        index = fec_enet_get_bd_index(last_bdp, &txq->bd);
        /* Save skb pointer */
-       txq->tx_skbuff[index] = skb;
+       txq->tx_buf[index].skb = skb;
 
        /* Make sure the updates to rest of the descriptor are performed before
         * transferring ownership.
 
        skb_tx_timestamp(skb);
 
-       /* Make sure the update to bdp and tx_skbuff are performed before
-        * txq->bd.cur.
-        */
+       /* Make sure the update to bdp is performed before txq->bd.cur. */
        wmb();
        txq->bd.cur = bdp;
 
        }
 
        /* Save skb pointer */
-       txq->tx_skbuff[index] = skb;
+       txq->tx_buf[index].skb = skb;
 
        skb_tx_timestamp(skb);
        txq->bd.cur = bdp;
                for (i = 0; i < txq->bd.ring_size; i++) {
                        /* Initialize the BD for every fragment in the page. */
                        bdp->cbd_sc = cpu_to_fec16(0);
-                       if (bdp->cbd_bufaddr &&
-                           !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
-                               dma_unmap_single(&fep->pdev->dev,
-                                                fec32_to_cpu(bdp->cbd_bufaddr),
-                                                fec16_to_cpu(bdp->cbd_datlen),
-                                                DMA_TO_DEVICE);
-                       if (txq->tx_skbuff[i]) {
-                               dev_kfree_skb_any(txq->tx_skbuff[i]);
-                               txq->tx_skbuff[i] = NULL;
+                       if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+                               if (bdp->cbd_bufaddr &&
+                                   !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                                       dma_unmap_single(&fep->pdev->dev,
+                                                        fec32_to_cpu(bdp->cbd_bufaddr),
+                                                        fec16_to_cpu(bdp->cbd_datlen),
+                                                        DMA_TO_DEVICE);
+                               if (txq->tx_buf[i].skb) {
+                                       dev_kfree_skb_any(txq->tx_buf[i].skb);
+                                       txq->tx_buf[i].skb = NULL;
+                               }
+                       } else {
+                               if (bdp->cbd_bufaddr)
+                                       dma_unmap_single(&fep->pdev->dev,
+                                                        fec32_to_cpu(bdp->cbd_bufaddr),
+                                                        fec16_to_cpu(bdp->cbd_datlen),
+                                                        DMA_TO_DEVICE);
+
+                               if (txq->tx_buf[i].xdp) {
+                                       xdp_return_frame(txq->tx_buf[i].xdp);
+                                       txq->tx_buf[i].xdp = NULL;
+                               }
+
+                               /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+                               txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
                        }
+
                        bdp->cbd_bufaddr = cpu_to_fec32(0);
                        bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
                }
 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 {
        struct  fec_enet_private *fep;
+       struct xdp_frame *xdpf;
        struct bufdesc *bdp;
        unsigned short status;
        struct  sk_buff *skb;
 
                index = fec_enet_get_bd_index(bdp, &txq->bd);
 
-               skb = txq->tx_skbuff[index];
-               txq->tx_skbuff[index] = NULL;
-               if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
-                       dma_unmap_single(&fep->pdev->dev,
-                                        fec32_to_cpu(bdp->cbd_bufaddr),
-                                        fec16_to_cpu(bdp->cbd_datlen),
-                                        DMA_TO_DEVICE);
-               bdp->cbd_bufaddr = cpu_to_fec32(0);
-               if (!skb)
-                       goto skb_done;
+               if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
+                       skb = txq->tx_buf[index].skb;
+                       txq->tx_buf[index].skb = NULL;
+                       if (bdp->cbd_bufaddr &&
+                           !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                               dma_unmap_single(&fep->pdev->dev,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
+                                                fec16_to_cpu(bdp->cbd_datlen),
+                                                DMA_TO_DEVICE);
+                       bdp->cbd_bufaddr = cpu_to_fec32(0);
+                       if (!skb)
+                               goto tx_buf_done;
+               } else {
+                       xdpf = txq->tx_buf[index].xdp;
+                       if (bdp->cbd_bufaddr)
+                               dma_unmap_single(&fep->pdev->dev,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
+                                                fec16_to_cpu(bdp->cbd_datlen),
+                                                DMA_TO_DEVICE);
+                       bdp->cbd_bufaddr = cpu_to_fec32(0);
+                       if (!xdpf) {
+                               txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+                               goto tx_buf_done;
+                       }
+               }
 
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
                                ndev->stats.tx_carrier_errors++;
                } else {
                        ndev->stats.tx_packets++;
-                       ndev->stats.tx_bytes += skb->len;
-               }
 
-               /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
-                * are to time stamp the packet, so we still need to check time
-                * stamping enabled flag.
-                */
-               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
-                            fep->hwts_tx_en) &&
-                   fep->bufdesc_ex) {
-                       struct skb_shared_hwtstamps shhwtstamps;
-                       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
-                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
-                       skb_tstamp_tx(skb, &shhwtstamps);
+                       if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
+                               ndev->stats.tx_bytes += skb->len;
+                       else
+                               ndev->stats.tx_bytes += xdpf->len;
                }
 
                /* Deferred means some collisions occurred during transmit,
                if (status & BD_ENET_TX_DEF)
                        ndev->stats.collisions++;
 
-               /* Free the sk buffer associated with this last transmit */
-               dev_kfree_skb_any(skb);
-skb_done:
-               /* Make sure the update to bdp and tx_skbuff are performed
+               if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
+                       /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+                        * are to time stamp the packet, so we still need to check time
+                        * stamping enabled flag.
+                        */
+                       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+                                    fep->hwts_tx_en) && fep->bufdesc_ex) {
+                               struct skb_shared_hwtstamps shhwtstamps;
+                               struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+                               fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+                               skb_tstamp_tx(skb, &shhwtstamps);
+                       }
+
+                       /* Free the sk buffer associated with this last transmit */
+                       dev_kfree_skb_any(skb);
+               } else {
+                       xdp_return_frame(xdpf);
+
+                       txq->tx_buf[index].xdp = NULL;
+                       /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+                       txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+               }
+
+tx_buf_done:
+               /* Make sure the update to bdp and tx_buf are performed
                 * before dirty_tx
                 */
                wmb();
                for (i = 0; i < txq->bd.ring_size; i++) {
                        kfree(txq->tx_bounce[i]);
                        txq->tx_bounce[i] = NULL;
-                       skb = txq->tx_skbuff[i];
-                       txq->tx_skbuff[i] = NULL;
-                       dev_kfree_skb(skb);
+
+                       if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+                               skb = txq->tx_buf[i].skb;
+                               txq->tx_buf[i].skb = NULL;
+                               dev_kfree_skb(skb);
+                       } else {
+                               if (txq->tx_buf[i].xdp) {
+                                       xdp_return_frame(txq->tx_buf[i].xdp);
+                                       txq->tx_buf[i].xdp = NULL;
+                               }
+
+                               txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+                       }
                }
        }
 }
                ebdp->cbd_esc = cpu_to_fec32(estatus);
        }
 
-       txq->tx_skbuff[index] = NULL;
+       txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
+       txq->tx_buf[index].xdp = frame;
 
        /* Make sure the updates to rest of the descriptor are performed before
         * transferring ownership.