]> www.infradead.org Git - users/hch/misc.git/commitdiff
ice: move ice_qp_[ena|dis] for reuse
authorPaul Greenwalt <paul.greenwalt@intel.com>
Mon, 18 Aug 2025 13:22:56 +0000 (09:22 -0400)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Fri, 19 Sep 2025 15:42:07 +0000 (08:42 -0700)
Move ice_qp_[ena|dis] and related helper functions to ice_base.c to
allow reuse of these function currently only used by ice_xsk.c.

Suggested-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_base.h
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/ice/ice_xsk.h

index c5da8e9cc0a0e5551b340e70628813999059bcfe..dc4beac04086652007d2ad55e1e0540ad59a6304 100644 (file)
@@ -1206,3 +1206,148 @@ ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
                txq_meta->tc = tc;
        }
 }
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+       struct ice_vsi_stats *vsi_stat;
+       struct ice_pf *pf;
+
+       pf = vsi->back;
+       if (!pf->vsi_stats)
+               return;
+
+       vsi_stat = pf->vsi_stats[vsi->idx];
+       if (!vsi_stat)
+               return;
+
+       memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+              sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+       memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+              sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
+       if (vsi->xdp_rings)
+               memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+                      sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+       ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+       if (vsi->xdp_rings)
+               ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+       ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+       struct ice_txq_meta txq_meta = { };
+       struct ice_q_vector *q_vector;
+       struct ice_tx_ring *tx_ring;
+       struct ice_rx_ring *rx_ring;
+       int fail = 0;
+       int err;
+
+       if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+               return -EINVAL;
+
+       tx_ring = vsi->tx_rings[q_idx];
+       rx_ring = vsi->rx_rings[q_idx];
+       q_vector = rx_ring->q_vector;
+
+       synchronize_net();
+       netif_carrier_off(vsi->netdev);
+       netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+       ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+       ice_qvec_toggle_napi(vsi, q_vector, false);
+
+       ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+       err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+       if (!fail)
+               fail = err;
+       if (vsi->xdp_rings) {
+               struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+               memset(&txq_meta, 0, sizeof(txq_meta));
+               ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+               err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+                                          &txq_meta);
+               if (!fail)
+                       fail = err;
+       }
+
+       ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+       ice_qp_clean_rings(vsi, q_idx);
+       ice_qp_reset_stats(vsi, q_idx);
+
+       return fail;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+       struct ice_q_vector *q_vector;
+       int fail = 0;
+       bool link_up;
+       int err;
+
+       err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
+       if (!fail)
+               fail = err;
+
+       if (ice_is_xdp_ena_vsi(vsi)) {
+               struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+               err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
+               if (!fail)
+                       fail = err;
+               ice_set_ring_xdp(xdp_ring);
+               ice_tx_xsk_pool(vsi, q_idx);
+       }
+
+       err = ice_vsi_cfg_single_rxq(vsi, q_idx);
+       if (!fail)
+               fail = err;
+
+       q_vector = vsi->rx_rings[q_idx]->q_vector;
+       ice_qvec_cfg_msix(vsi, q_vector, q_idx);
+
+       err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
+       if (!fail)
+               fail = err;
+
+       ice_qvec_toggle_napi(vsi, q_vector, true);
+       ice_qvec_ena_irq(vsi, q_vector);
+
+       /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+       synchronize_net();
+       ice_get_link_status(vsi->port_info, &link_up);
+       if (link_up) {
+               netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+               netif_carrier_on(vsi->netdev);
+       }
+
+       return fail;
+}
index b711bc921928d60d905acd71d4555934ac52ff8f..632b5be61a980190d56d954a1b4d9e292a85e616 100644 (file)
@@ -32,4 +32,6 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
 void
 ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
                  struct ice_txq_meta *txq_meta);
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx);
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx);
 #endif /* _ICE_BASE_H_ */
index a3a4eaa17739ae084ccbe2cc75f3d10ca1be3fdc..575fd48f485f1569e191dd2ca55cb654640f0147 100644 (file)
@@ -18,53 +18,13 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
        return &rx_ring->xdp_buf[idx];
 }
 
-/**
- * ice_qp_reset_stats - Resets all stats for rings of given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
-{
-       struct ice_vsi_stats *vsi_stat;
-       struct ice_pf *pf;
-
-       pf = vsi->back;
-       if (!pf->vsi_stats)
-               return;
-
-       vsi_stat = pf->vsi_stats[vsi->idx];
-       if (!vsi_stat)
-               return;
-
-       memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
-              sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
-       memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
-              sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
-       if (vsi->xdp_rings)
-               memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
-                      sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
-}
-
-/**
- * ice_qp_clean_rings - Cleans all the rings of a given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
-{
-       ice_clean_tx_ring(vsi->tx_rings[q_idx]);
-       if (vsi->xdp_rings)
-               ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
-       ice_clean_rx_ring(vsi->rx_rings[q_idx]);
-}
-
 /**
  * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
  * @vsi: VSI that has netdev
  * @q_vector: q_vector that has NAPI context
  * @enable: true for enable, false for disable
  */
-static void
+void
 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
                     bool enable)
 {
@@ -83,7 +43,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
  * @rx_ring: Rx ring that will have its IRQ disabled
  * @q_vector: queue vector
  */
-static void
+void
 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
                 struct ice_q_vector *q_vector)
 {
@@ -113,7 +73,7 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
  * @q_vector: queue vector
  * @qid: queue index
  */
-static void
+void
 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
 {
        u16 reg_idx = q_vector->reg_idx;
@@ -143,7 +103,7 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
  * @vsi: the VSI that contains queue vector
  * @q_vector: queue vector
  */
-static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
 {
        struct ice_pf *pf = vsi->back;
        struct ice_hw *hw = &pf->hw;
@@ -153,111 +113,6 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
        ice_flush(hw);
 }
 
-/**
- * ice_qp_dis - Disables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
-{
-       struct ice_txq_meta txq_meta = { };
-       struct ice_q_vector *q_vector;
-       struct ice_tx_ring *tx_ring;
-       struct ice_rx_ring *rx_ring;
-       int fail = 0;
-       int err;
-
-       if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
-               return -EINVAL;
-
-       tx_ring = vsi->tx_rings[q_idx];
-       rx_ring = vsi->rx_rings[q_idx];
-       q_vector = rx_ring->q_vector;
-
-       synchronize_net();
-       netif_carrier_off(vsi->netdev);
-       netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
-       ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-       ice_qvec_toggle_napi(vsi, q_vector, false);
-
-       ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
-       err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
-       if (!fail)
-               fail = err;
-       if (vsi->xdp_rings) {
-               struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
-               memset(&txq_meta, 0, sizeof(txq_meta));
-               ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
-               err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
-                                          &txq_meta);
-               if (!fail)
-                       fail = err;
-       }
-
-       ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
-       ice_qp_clean_rings(vsi, q_idx);
-       ice_qp_reset_stats(vsi, q_idx);
-
-       return fail;
-}
-
-/**
- * ice_qp_ena - Enables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
-{
-       struct ice_q_vector *q_vector;
-       int fail = 0;
-       bool link_up;
-       int err;
-
-       err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
-       if (!fail)
-               fail = err;
-
-       if (ice_is_xdp_ena_vsi(vsi)) {
-               struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
-               err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
-               if (!fail)
-                       fail = err;
-               ice_set_ring_xdp(xdp_ring);
-               ice_tx_xsk_pool(vsi, q_idx);
-       }
-
-       err = ice_vsi_cfg_single_rxq(vsi, q_idx);
-       if (!fail)
-               fail = err;
-
-       q_vector = vsi->rx_rings[q_idx]->q_vector;
-       ice_qvec_cfg_msix(vsi, q_vector, q_idx);
-
-       err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
-       if (!fail)
-               fail = err;
-
-       ice_qvec_toggle_napi(vsi, q_vector, true);
-       ice_qvec_ena_irq(vsi, q_vector);
-
-       /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
-       synchronize_net();
-       ice_get_link_status(vsi->port_info, &link_up);
-       if (link_up) {
-               netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-               netif_carrier_on(vsi->netdev);
-       }
-
-       return fail;
-}
-
 /**
  * ice_xsk_pool_disable - disable a buffer pool region
  * @vsi: Current VSI
index 8dc5d55e26c5209e051bd3a2428266bfb451de3a..600cbeeaa203089b77df3013c80a6dee795654e8 100644 (file)
@@ -23,6 +23,13 @@ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
+void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+                      u16 qid);
+void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+                         bool enable);
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector);
+void ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+                     struct ice_q_vector *q_vector);
 #else
 static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
                               struct xsk_buff_pool __always_unused *xsk_pool)
@@ -75,5 +82,20 @@ ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
 {
        return 0;
 }
+
+static inline void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+                 u16 qid) { }
+
+static inline void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+                    bool enable) { }
+
+static inline void
+ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { }
+
+static inline void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+                struct ice_q_vector *q_vector) { }
 #endif /* CONFIG_XDP_SOCKETS */
 #endif /* !_ICE_XSK_H_ */