int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_sq *sq);
 void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
 
 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
 
 
        return work_done;
 }
+
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+{
+       struct mlx5e_sq *sq;
+       u16 sqcc;
+       int i;
+
+       sq = container_of(cq, struct mlx5e_sq, cq);
+
+       if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+               return false;
+
+       /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+        * otherwise a cq overrun may occur
+        */
+       sqcc = sq->cc;
+
+       for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+               struct mlx5_cqe64 *cqe;
+               u16 wqe_counter;
+               bool last_wqe;
+
+               cqe = mlx5e_get_cqe(cq);
+               if (!cqe)
+                       break;
+
+               mlx5_cqwq_pop(&cq->wq);
+
+               wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+               do {
+                       struct mlx5e_sq_wqe_info *wi;
+                       struct mlx5e_dma_info *di;
+                       u16 ci;
+
+                       last_wqe = (sqcc == wqe_counter);
+
+                       ci = sqcc & sq->wq.sz_m1;
+                       di = &sq->db.xdp.di[ci];
+                       wi = &sq->db.xdp.wqe_info[ci];
+
+                       if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
+                               sqcc++;
+                               continue;
+                       }
+
+                       sqcc += wi->num_wqebbs;
+                       /* Recycle RX page */
+                       mlx5e_page_release(&sq->channel->rq, di, true);
+               } while (!last_wqe);
+       }
+
+       mlx5_cqwq_update_db_record(&cq->wq);
+
+       /* ensure cq space is freed before enabling more cqes */
+       wmb();
+
+       sq->cc = sqcc;
+       return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+void mlx5e_free_xdpsq_descs(struct mlx5e_sq *sq)
+{
+       struct mlx5e_sq_wqe_info *wi;
+       struct mlx5e_dma_info *di;
+       u16 ci;
+
+       while (sq->cc != sq->pc) {
+               ci = sq->cc & sq->wq.sz_m1;
+               di = &sq->db.xdp.di[ci];
+               wi = &sq->db.xdp.wqe_info[ci];
+
+               if (wi->opcode == MLX5_OPCODE_NOP) {
+                       sq->cc++;
+                       continue;
+               }
+
+               sq->cc += wi->num_wqebbs;
+
+               mlx5e_page_release(&sq->channel->rq, di, false);
+       }
+}
 
        }
 }
 
-static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
-{
-       struct mlx5e_sq_wqe_info *wi;
-       struct mlx5e_dma_info *di;
-       u16 ci;
-
-       while (sq->cc != sq->pc) {
-               ci = sq->cc & sq->wq.sz_m1;
-               di = &sq->db.xdp.di[ci];
-               wi = &sq->db.xdp.wqe_info[ci];
-
-               if (wi->opcode == MLX5_OPCODE_NOP) {
-                       sq->cc++;
-                       continue;
-               }
-
-               sq->cc += wi->num_wqebbs;
-
-               mlx5e_page_release(&sq->channel->rq, di, false);
-       }
-}
-
 void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
 {
        switch (sq->type) {
                mlx5e_free_txq_sq_descs(sq);
                break;
        case MLX5E_SQ_XDP:
-               mlx5e_free_xdp_sq_descs(sq);
+               mlx5e_free_xdpsq_descs(sq);
                break;
        }
 }
 
        sq->cc = sqcc;
 }
 
-static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
-{
-       struct mlx5e_sq *sq;
-       u16 sqcc;
-       int i;
-
-       sq = container_of(cq, struct mlx5e_sq, cq);
-
-       if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
-               return false;
-
-       /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
-        * otherwise a cq overrun may occur
-        */
-       sqcc = sq->cc;
-
-       for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
-               struct mlx5_cqe64 *cqe;
-               u16 wqe_counter;
-               bool last_wqe;
-
-               cqe = mlx5e_get_cqe(cq);
-               if (!cqe)
-                       break;
-
-               mlx5_cqwq_pop(&cq->wq);
-
-               wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
-               do {
-                       struct mlx5e_sq_wqe_info *wi;
-                       struct mlx5e_dma_info *di;
-                       u16 ci;
-
-                       last_wqe = (sqcc == wqe_counter);
-
-                       ci = sqcc & sq->wq.sz_m1;
-                       di = &sq->db.xdp.di[ci];
-                       wi = &sq->db.xdp.wqe_info[ci];
-
-                       if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
-                               sqcc++;
-                               continue;
-                       }
-
-                       sqcc += wi->num_wqebbs;
-                       /* Recycle RX page */
-                       mlx5e_page_release(&sq->channel->rq, di, true);
-               } while (!last_wqe);
-       }
-
-       mlx5_cqwq_update_db_record(&cq->wq);
-
-       /* ensure cq space is freed before enabling more cqes */
-       wmb();
-
-       sq->cc = sqcc;
-       return (i == MLX5E_TX_CQ_POLL_BUDGET);
-}
-
 int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 {
        struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
        busy |= work_done == budget;
 
        if (c->xdp)
-               busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
+               busy |= mlx5e_poll_xdpsq_cq(&c->xdp_sq.cq);
 
        mlx5e_poll_ico_cq(&c->icosq.cq);