return 0;
 }
 
+static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
+                                       struct mlx5e_dma_info *dma_info)
+{
+       dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
+                      rq->buff.map_dir);
+}
+
 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
                        bool recycle)
 {
        if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
                return;
 
-       dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
-                      rq->buff.map_dir);
+       mlx5e_page_dma_unmap(rq, dma_info);
        put_page(dma_info->page);
 }
 
                                   struct mlx5e_dma_info *di,
                                   void *va, u16 *rx_headroom, u32 *len)
 {
-       const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+       struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
        struct xdp_buff xdp;
        u32 act;
+       int err;
 
        if (!prog)
                return false;
                if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
                        trace_xdp_exception(rq->netdev, prog, act);
                return true;
+       case XDP_REDIRECT:
+               /* When XDP enabled then page-refcnt==1 here */
+               err = xdp_do_redirect(rq->netdev, &xdp, prog);
+               if (!err) {
+                       __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
+                       rq->xdpsq.db.redirect_flush = true;
+                       mlx5e_page_dma_unmap(rq, di);
+               }
+               return true;
        default:
                bpf_warn_invalid_xdp_action(act);
        case XDP_ABORTED:
                xdpsq->db.doorbell = false;
        }
 
+       if (xdpsq->db.redirect_flush) {
+               xdp_do_flush_map();
+               xdpsq->db.redirect_flush = false;
+       }
+
        mlx5_cqwq_update_db_record(&cq->wq);
 
        /* ensure cq space is freed before enabling more cqes */