#include <linux/vmalloc.h>
 #include <net/pkt_sched.h>
 
+static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
+{
+       if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
+               return NULL;
+
+       return tx_swbd->skb;
+}
+
+static struct xdp_frame *
+enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
+{
+       if (tx_swbd->is_xdp_redirect)
+               return tx_swbd->xdp_frame;
+
+       return NULL;
+}
+
 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
                                struct enetc_tx_swbd *tx_swbd)
 {
        tx_swbd->dma = 0;
 }
 
-static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
-                             struct enetc_tx_swbd *tx_swbd)
+static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
+                               struct enetc_tx_swbd *tx_swbd)
 {
+       struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+       struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+
        if (tx_swbd->dma)
                enetc_unmap_tx_buff(tx_ring, tx_swbd);
 
-       if (tx_swbd->skb) {
-               dev_kfree_skb_any(tx_swbd->skb);
+       if (xdp_frame) {
+               xdp_return_frame(tx_swbd->xdp_frame);
+               tx_swbd->xdp_frame = NULL;
+       } else if (skb) {
+               dev_kfree_skb_any(skb);
                tx_swbd->skb = NULL;
        }
 }
 
        do {
                tx_swbd = &tx_ring->tx_swbd[i];
-               enetc_free_tx_skb(tx_ring, tx_swbd);
+               enetc_free_tx_frame(tx_ring, tx_swbd);
                if (i == 0)
                        i = tx_ring->bd_count;
                i--;
        do_tstamp = false;
 
        while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
+               struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+               struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+
                if (unlikely(tx_swbd->check_wb)) {
                        struct enetc_ndev_priv *priv = netdev_priv(ndev);
                        union enetc_tx_bd *txbd;
                else if (likely(tx_swbd->dma))
                        enetc_unmap_tx_buff(tx_ring, tx_swbd);
 
-               if (tx_swbd->skb) {
+               if (xdp_frame) {
+                       xdp_return_frame(xdp_frame);
+                       tx_swbd->xdp_frame = NULL;
+               } else if (skb) {
                        if (unlikely(do_tstamp)) {
-                               enetc_tstamp_tx(tx_swbd->skb, tstamp);
+                               enetc_tstamp_tx(skb, tstamp);
                                do_tstamp = false;
                        }
-                       napi_consume_skb(tx_swbd->skb, napi_budget);
+                       napi_consume_skb(skb, napi_budget);
                        tx_swbd->skb = NULL;
                }
 
        return true;
 }
 
+static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
+                                         struct enetc_tx_swbd *xdp_tx_arr,
+                                         struct xdp_frame *xdp_frame)
+{
+       struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
+       struct skb_shared_info *shinfo;
+       void *data = xdp_frame->data;
+       int len = xdp_frame->len;
+       skb_frag_t *frag;
+       dma_addr_t dma;
+       unsigned int f;
+       int n = 0;
+
+       dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+               netdev_err(tx_ring->ndev, "DMA map error\n");
+               return -1;
+       }
+
+       xdp_tx_swbd->dma = dma;
+       xdp_tx_swbd->dir = DMA_TO_DEVICE;
+       xdp_tx_swbd->len = len;
+       xdp_tx_swbd->is_xdp_redirect = true;
+       xdp_tx_swbd->is_eof = false;
+       xdp_tx_swbd->xdp_frame = NULL;
+
+       n++;
+       xdp_tx_swbd = &xdp_tx_arr[n];
+
+       shinfo = xdp_get_shared_info_from_frame(xdp_frame);
+
+       for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
+            f++, frag++) {
+               data = skb_frag_address(frag);
+               len = skb_frag_size(frag);
+
+               dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+                       /* Undo the DMA mapping for all fragments */
+                       while (n-- >= 0)
+                               enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
+
+                       netdev_err(tx_ring->ndev, "DMA map error\n");
+                       return -1;
+               }
+
+               xdp_tx_swbd->dma = dma;
+               xdp_tx_swbd->dir = DMA_TO_DEVICE;
+               xdp_tx_swbd->len = len;
+               xdp_tx_swbd->is_xdp_redirect = true;
+               xdp_tx_swbd->is_eof = false;
+               xdp_tx_swbd->xdp_frame = NULL;
+
+               n++;
+               xdp_tx_swbd = &xdp_tx_arr[n];
+       }
+
+       xdp_tx_arr[n - 1].is_eof = true;
+       xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
+
+       return n;
+}
+
+int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+                  struct xdp_frame **frames, u32 flags)
+{
+       struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       struct enetc_bdr *tx_ring;
+       int xdp_tx_bd_cnt, i, k;
+       int xdp_tx_frm_cnt = 0;
+
+       tx_ring = priv->tx_ring[smp_processor_id()];
+
+       prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
+
+       for (k = 0; k < num_frames; k++) {
+               xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
+                                                              xdp_redirect_arr,
+                                                              frames[k]);
+               if (unlikely(xdp_tx_bd_cnt < 0))
+                       break;
+
+               if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
+                                          xdp_tx_bd_cnt))) {
+                       for (i = 0; i < xdp_tx_bd_cnt; i++)
+                               enetc_unmap_tx_buff(tx_ring,
+                                                   &xdp_redirect_arr[i]);
+                       tx_ring->stats.xdp_tx_drops++;
+                       break;
+               }
+
+               xdp_tx_frm_cnt++;
+       }
+
+       if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
+               enetc_update_tx_ring_tail(tx_ring);
+
+       tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
+
+       return xdp_tx_frm_cnt;
+}
+
 static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
                                     struct xdp_buff *xdp_buff, u16 size)
 {
        rx_ring->stats.xdp_drops++;
 }
 
+static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
+                          int rx_ring_last)
+{
+       while (rx_ring_first != rx_ring_last) {
+               struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+
+               if (rx_swbd->page) {
+                       dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+                                      rx_swbd->dir);
+                       __free_page(rx_swbd->page);
+                       rx_swbd->page = NULL;
+               }
+               enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+       }
+       rx_ring->stats.xdp_redirect_failures++;
+}
+
 static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
                                   struct napi_struct *napi, int work_limit,
                                   struct bpf_prog *prog)
 {
+       int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
        struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
        struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
        struct enetc_bdr *tx_ring = priv->tx_ring[rx_ring->index];
-       int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0;
        int rx_frm_cnt = 0, rx_byte_cnt = 0;
        int cleaned_cnt, i;
        u32 xdp_act;
                int orig_i, orig_cleaned_cnt;
                struct xdp_buff xdp_buff;
                struct sk_buff *skb;
+               int tmp_orig_i, err;
                u32 bd_status;
 
                rxbd = enetc_rxbd(rx_ring, i);
                                rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
                                xdp_tx_frm_cnt++;
                        }
+                       break;
+               case XDP_REDIRECT:
+                       /* xdp_return_frame does not support S/G in the sense
+                        * that it leaks the fragments (__xdp_return should not
+                        * call page_frag_free only for the initial buffer).
+                        * Until XDP_REDIRECT gains support for S/G let's keep
+                        * the code structure in place, but dead. We drop the
+                        * S/G frames ourselves to avoid memory leaks which
+                        * would otherwise leave the kernel OOM.
+                        */
+                       if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
+                               enetc_xdp_drop(rx_ring, orig_i, i);
+                               rx_ring->stats.xdp_redirect_sg++;
+                               break;
+                       }
+
+                       tmp_orig_i = orig_i;
+
+                       while (orig_i != i) {
+                               enetc_put_rx_buff(rx_ring,
+                                                 &rx_ring->rx_swbd[orig_i]);
+                               enetc_bdr_idx_inc(rx_ring, &orig_i);
+                       }
+
+                       err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+                       if (unlikely(err)) {
+                               enetc_xdp_free(rx_ring, tmp_orig_i, i);
+                       } else {
+                               xdp_redirect_frm_cnt++;
+                               rx_ring->stats.xdp_redirect++;
+                       }
+
+                       if (unlikely(xdp_redirect_frm_cnt > ENETC_DEFAULT_TX_WORK)) {
+                               xdp_do_flush_map();
+                               xdp_redirect_frm_cnt = 0;
+                       }
+
                        break;
                default:
                        bpf_warn_invalid_xdp_action(xdp_act);
        rx_ring->stats.packets += rx_frm_cnt;
        rx_ring->stats.bytes += rx_byte_cnt;
 
+       if (xdp_redirect_frm_cnt)
+               xdp_do_flush_map();
+
        if (xdp_tx_frm_cnt)
                enetc_update_tx_ring_tail(tx_ring);
 
        int size, i;
 
        for (i = 0; i < txr->bd_count; i++)
-               enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
+               enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
 
        size = txr->bd_count * sizeof(union enetc_tx_bd);
 
        for (i = 0; i < tx_ring->bd_count; i++) {
                struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
 
-               enetc_free_tx_skb(tx_ring, tx_swbd);
+               enetc_free_tx_frame(tx_ring, tx_swbd);
        }
 
        tx_ring->next_to_clean = 0;