]> www.infradead.org Git - users/hch/xfs.git/commitdiff
ice: map XDP queues to vectors in ice_vsi_map_rings_to_vectors()
authorLarysa Zaremba <larysa.zaremba@intel.com>
Mon, 3 Jun 2024 21:42:34 +0000 (14:42 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 6 Jun 2024 02:27:56 +0000 (19:27 -0700)
ice_pf_dcb_recfg() re-maps queues to vectors with
ice_vsi_map_rings_to_vectors(), which does not restore the previous
state for XDP queues. This leads to no AF_XDP traffic after rebuild.

Map XDP queues to vectors in ice_vsi_map_rings_to_vectors().
Also, move the code around, so XDP queues are mapped independently only
through .ndo_bpf().

Fixes: 6624e780a577 ("ice: split ice_vsi_setup into smaller functions")
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com>
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-5-e3563aa89b0c@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c

index a5de6ef9c07ef6184eb5e3fd0888c318296a96d3..99a75a59078ef3e6c71c46696b694d00fd082ea1 100644 (file)
@@ -940,6 +940,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
                          enum ice_xdp_cfg cfg_type);
 int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
 int
 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
             u32 flags);
index 687f6cb2b917afc55de7020c401c5095c6163825..5d396c1a7731482f725561a8eff709ecd3cc793e 100644 (file)
@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
                }
                rx_rings_rem -= rx_rings_per_v;
        }
+
+       if (ice_is_xdp_ena_vsi(vsi))
+               ice_map_xdp_rings(vsi);
 }
 
 /**
index dd8b374823eeca6211a6f6acc47132a30d325b30..7629b0190578b3d4bf1fc8d54b54af570f1648d4 100644 (file)
@@ -2274,13 +2274,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
                if (ret)
                        goto unroll_vector_base;
 
-               ice_vsi_map_rings_to_vectors(vsi);
-
-               /* Associate q_vector rings to napi */
-               ice_vsi_set_napi_queues(vsi);
-
-               vsi->stat_offsets_loaded = false;
-
                if (ice_is_xdp_ena_vsi(vsi)) {
                        ret = ice_vsi_determine_xdp_res(vsi);
                        if (ret)
@@ -2291,6 +2284,13 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
                                goto unroll_vector_base;
                }
 
+               ice_vsi_map_rings_to_vectors(vsi);
+
+               /* Associate q_vector rings to napi */
+               ice_vsi_set_napi_queues(vsi);
+
+               vsi->stat_offsets_loaded = false;
+
                /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
                if (vsi->type != ICE_VSI_CTRL)
                        /* Do not exit if configuring RSS had an issue, at
index 2a270aacd24a9a6b38a0a83431d8050a606281aa..1b61ca3a6eb6e15353be17e6d7f72a27708bff8b 100644 (file)
@@ -2707,6 +2707,60 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
                bpf_prog_put(old_prog);
 }
 
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+       struct ice_q_vector *q_vector;
+       struct ice_tx_ring *ring;
+
+       if (static_key_enabled(&ice_xdp_locking_key))
+               return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+       q_vector = vsi->rx_rings[qid]->q_vector;
+       ice_for_each_tx_ring(ring, q_vector->tx)
+               if (ice_ring_is_xdp(ring))
+                       return ring;
+
+       return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+       int xdp_rings_rem = vsi->num_xdp_txq;
+       int v_idx, q_idx;
+
+       /* follow the logic from ice_vsi_map_rings_to_vectors */
+       ice_for_each_q_vector(vsi, v_idx) {
+               struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+               int xdp_rings_per_v, q_id, q_base;
+
+               xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+                                              vsi->num_q_vectors - v_idx);
+               q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+               for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+                       struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+                       xdp_ring->q_vector = q_vector;
+                       xdp_ring->next = q_vector->tx.tx_ring;
+                       q_vector->tx.tx_ring = xdp_ring;
+               }
+               xdp_rings_rem -= xdp_rings_per_v;
+       }
+
+       ice_for_each_rxq(vsi, q_idx) {
+               vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+                                                                      q_idx);
+               ice_tx_xsk_pool(vsi, q_idx);
+       }
+}
+
 /**
  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
  * @vsi: VSI to bring up Tx rings used by XDP
@@ -2719,7 +2773,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
                          enum ice_xdp_cfg cfg_type)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
-       int xdp_rings_rem = vsi->num_xdp_txq;
        struct ice_pf *pf = vsi->back;
        struct ice_qs_cfg xdp_qs_cfg = {
                .qs_mutex = &pf->avail_q_mutex,
@@ -2732,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
                .mapping_mode = ICE_VSI_MAP_CONTIG
        };
        struct device *dev;
-       int i, v_idx;
-       int status;
+       int status, i;
 
        dev = ice_pf_to_dev(pf);
        vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2752,42 +2804,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
        if (ice_xdp_alloc_setup_rings(vsi))
                goto clear_xdp_rings;
 
-       /* follow the logic from ice_vsi_map_rings_to_vectors */
-       ice_for_each_q_vector(vsi, v_idx) {
-               struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
-               int xdp_rings_per_v, q_id, q_base;
-
-               xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
-                                              vsi->num_q_vectors - v_idx);
-               q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
-               for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
-                       struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
-                       xdp_ring->q_vector = q_vector;
-                       xdp_ring->next = q_vector->tx.tx_ring;
-                       q_vector->tx.tx_ring = xdp_ring;
-               }
-               xdp_rings_rem -= xdp_rings_per_v;
-       }
-
-       ice_for_each_rxq(vsi, i) {
-               if (static_key_enabled(&ice_xdp_locking_key)) {
-                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
-               } else {
-                       struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
-                       struct ice_tx_ring *ring;
-
-                       ice_for_each_tx_ring(ring, q_vector->tx) {
-                               if (ice_ring_is_xdp(ring)) {
-                                       vsi->rx_rings[i]->xdp_ring = ring;
-                                       break;
-                               }
-                       }
-               }
-               ice_tx_xsk_pool(vsi, i);
-       }
-
        /* omit the scheduler update if in reset path; XDP queues will be
         * taken into account at the end of ice_vsi_rebuild, where
         * ice_cfg_vsi_lan is being called
@@ -2795,6 +2811,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
        if (cfg_type == ICE_XDP_CFG_PART)
                return 0;
 
+       ice_map_xdp_rings(vsi);
+
        /* tell the Tx scheduler that right now we have
         * additional queues
         */