bpf_prog_put(old_prog);
 }
 
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+       struct ice_q_vector *q_vector;
+       struct ice_tx_ring *ring;
+
+       if (static_key_enabled(&ice_xdp_locking_key))
+               return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+       q_vector = vsi->rx_rings[qid]->q_vector;
+       ice_for_each_tx_ring(ring, q_vector->tx)
+               if (ice_ring_is_xdp(ring))
+                       return ring;
+
+       return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+       int xdp_rings_rem = vsi->num_xdp_txq;
+       int v_idx, q_idx;
+
+       /* follow the logic from ice_vsi_map_rings_to_vectors */
+       ice_for_each_q_vector(vsi, v_idx) {
+               struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+               int xdp_rings_per_v, q_id, q_base;
+
+               xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+                                              vsi->num_q_vectors - v_idx);
+               q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+               for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+                       struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+                       xdp_ring->q_vector = q_vector;
+                       xdp_ring->next = q_vector->tx.tx_ring;
+                       q_vector->tx.tx_ring = xdp_ring;
+               }
+               xdp_rings_rem -= xdp_rings_per_v;
+       }
+
+       ice_for_each_rxq(vsi, q_idx) {
+               vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+                                                                      q_idx);
+               ice_tx_xsk_pool(vsi, q_idx);
+       }
+}
+
 /**
  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
  * @vsi: VSI to bring up Tx rings used by XDP
                          enum ice_xdp_cfg cfg_type)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
-       int xdp_rings_rem = vsi->num_xdp_txq;
        struct ice_pf *pf = vsi->back;
        struct ice_qs_cfg xdp_qs_cfg = {
                .qs_mutex = &pf->avail_q_mutex,
                .mapping_mode = ICE_VSI_MAP_CONTIG
        };
        struct device *dev;
-       int i, v_idx;
-       int status;
+       int status, i;
 
        dev = ice_pf_to_dev(pf);
        vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
        if (ice_xdp_alloc_setup_rings(vsi))
                goto clear_xdp_rings;
 
-       /* follow the logic from ice_vsi_map_rings_to_vectors */
-       ice_for_each_q_vector(vsi, v_idx) {
-               struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
-               int xdp_rings_per_v, q_id, q_base;
-
-               xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
-                                              vsi->num_q_vectors - v_idx);
-               q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
-               for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
-                       struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
-                       xdp_ring->q_vector = q_vector;
-                       xdp_ring->next = q_vector->tx.tx_ring;
-                       q_vector->tx.tx_ring = xdp_ring;
-               }
-               xdp_rings_rem -= xdp_rings_per_v;
-       }
-
-       ice_for_each_rxq(vsi, i) {
-               if (static_key_enabled(&ice_xdp_locking_key)) {
-                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
-               } else {
-                       struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
-                       struct ice_tx_ring *ring;
-
-                       ice_for_each_tx_ring(ring, q_vector->tx) {
-                               if (ice_ring_is_xdp(ring)) {
-                                       vsi->rx_rings[i]->xdp_ring = ring;
-                                       break;
-                               }
-                       }
-               }
-               ice_tx_xsk_pool(vsi, i);
-       }
-
        /* omit the scheduler update if in reset path; XDP queues will be
         * taken into account at the end of ice_vsi_rebuild, where
         * ice_cfg_vsi_lan is being called
        if (cfg_type == ICE_XDP_CFG_PART)
                return 0;
 
+       ice_map_xdp_rings(vsi);
+
        /* tell the Tx scheduler that right now we have
         * additional queues
         */