ICE_VSI_STATE_NBITS             /* must be last */
 };
 
+struct ice_vsi_stats {
+       struct ice_ring_stats **tx_ring_stats;  /* Tx ring stats array */
+       struct ice_ring_stats **rx_ring_stats;  /* Rx ring stats array */
+};
+
 /* struct that defines a VSI, associated with a dev */
 struct ice_vsi {
        struct net_device *netdev;
        u16 ctrl_vsi_idx;               /* control VSI index in pf->vsi array */
 
        struct ice_vsi **vsi;           /* VSIs created by the driver */
+       struct ice_vsi_stats **vsi_stats;
        struct ice_sw *first_sw;        /* first switch created by firmware */
        u16 eswitch_mode;               /* current mode of eswitch */
        struct ice_vfs vfs;
 
 
        ice_for_each_alloc_txq(vsi, j) {
                tx_ring = READ_ONCE(vsi->tx_rings[j]);
-               if (tx_ring) {
-                       data[i++] = tx_ring->stats.pkts;
-                       data[i++] = tx_ring->stats.bytes;
+               if (tx_ring && tx_ring->ring_stats) {
+                       data[i++] = tx_ring->ring_stats->stats.pkts;
+                       data[i++] = tx_ring->ring_stats->stats.bytes;
                } else {
                        data[i++] = 0;
                        data[i++] = 0;
 
        ice_for_each_alloc_rxq(vsi, j) {
                rx_ring = READ_ONCE(vsi->rx_rings[j]);
-               if (rx_ring) {
-                       data[i++] = rx_ring->stats.pkts;
-                       data[i++] = rx_ring->stats.bytes;
+               if (rx_ring && rx_ring->ring_stats) {
+                       data[i++] = rx_ring->ring_stats->stats.pkts;
+                       data[i++] = rx_ring->ring_stats->stats.bytes;
                } else {
                        data[i++] = 0;
                        data[i++] = 0;
 
        return IRQ_HANDLED;
 }
 
+/**
+ * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
+ * @vsi: VSI pointer
+ */
+static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
+{
+       struct ice_vsi_stats *vsi_stat;
+       struct ice_pf *pf = vsi->back;
+
+       if (vsi->type == ICE_VSI_CHNL)
+               return 0;
+       if (!pf->vsi_stats)
+               return -ENOENT;
+
+       vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
+       if (!vsi_stat)
+               return -ENOMEM;
+
+       vsi_stat->tx_ring_stats =
+               kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
+                       GFP_KERNEL);
+       if (!vsi_stat->tx_ring_stats)
+               goto err_alloc_tx;
+
+       vsi_stat->rx_ring_stats =
+               kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
+                       GFP_KERNEL);
+       if (!vsi_stat->rx_ring_stats)
+               goto err_alloc_rx;
+
+       pf->vsi_stats[vsi->idx] = vsi_stat;
+
+       return 0;
+
+err_alloc_rx:
+       kfree(vsi_stat->rx_ring_stats);
+err_alloc_tx:
+       kfree(vsi_stat->tx_ring_stats);
+       kfree(vsi_stat);
+       pf->vsi_stats[vsi->idx] = NULL;
+       return -ENOMEM;
+}
+
 /**
  * ice_vsi_alloc - Allocates the next available struct VSI in the PF
  * @pf: board private structure
 
        if (vsi->type == ICE_VSI_CTRL && vf)
                vf->ctrl_vsi_idx = vsi->idx;
+
+       /* allocate memory for Tx/Rx ring stat pointers */
+       if (ice_vsi_alloc_stat_arrays(vsi))
+               goto err_rings;
+
        goto unlock_pf;
 
 err_rings:
        return -ENOMEM;
 }
 
+/**
+ * ice_vsi_free_stats - Free the ring statistics structures
+ * @vsi: VSI pointer
+ */
+static void ice_vsi_free_stats(struct ice_vsi *vsi)
+{
+       struct ice_vsi_stats *vsi_stat;
+       struct ice_pf *pf = vsi->back;
+       int i;
+
+       if (vsi->type == ICE_VSI_CHNL)
+               return;
+       if (!pf->vsi_stats)
+               return;
+
+       vsi_stat = pf->vsi_stats[vsi->idx];
+       if (!vsi_stat)
+               return;
+
+       ice_for_each_alloc_txq(vsi, i) {
+               if (vsi_stat->tx_ring_stats[i]) {
+                       kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+                       WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+               }
+       }
+
+       ice_for_each_alloc_rxq(vsi, i) {
+               if (vsi_stat->rx_ring_stats[i]) {
+                       kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+                       WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+               }
+       }
+
+       kfree(vsi_stat->tx_ring_stats);
+       kfree(vsi_stat->rx_ring_stats);
+       kfree(vsi_stat);
+       pf->vsi_stats[vsi->idx] = NULL;
+}
+
+/**
+ * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
+ * @vsi: VSI which is having stats allocated
+ */
+static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
+{
+       struct ice_ring_stats **tx_ring_stats;
+       struct ice_ring_stats **rx_ring_stats;
+       struct ice_vsi_stats *vsi_stats;
+       struct ice_pf *pf = vsi->back;
+       u16 i;
+
+       vsi_stats = pf->vsi_stats[vsi->idx];
+       tx_ring_stats = vsi_stats->tx_ring_stats;
+       rx_ring_stats = vsi_stats->rx_ring_stats;
+
+       /* Allocate Tx ring stats */
+       ice_for_each_alloc_txq(vsi, i) {
+               struct ice_ring_stats *ring_stats;
+               struct ice_tx_ring *ring;
+
+               ring = vsi->tx_rings[i];
+               ring_stats = tx_ring_stats[i];
+
+               if (!ring_stats) {
+                       ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+                       if (!ring_stats)
+                               goto err_out;
+
+                       WRITE_ONCE(tx_ring_stats[i], ring_stats);
+               }
+
+               ring->ring_stats = ring_stats;
+       }
+
+       /* Allocate Rx ring stats */
+       ice_for_each_alloc_rxq(vsi, i) {
+               struct ice_ring_stats *ring_stats;
+               struct ice_rx_ring *ring;
+
+               ring = vsi->rx_rings[i];
+               ring_stats = rx_ring_stats[i];
+
+               if (!ring_stats) {
+                       ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+                       if (!ring_stats)
+                               goto err_out;
+
+                        WRITE_ONCE(rx_ring_stats[i], ring_stats);
+               }
+
+               ring->ring_stats = ring_stats;
+       }
+
+       return 0;
+
+err_out:
+       ice_vsi_free_stats(vsi);
+       return -ENOMEM;
+}
+
 /**
  * ice_vsi_manage_rss_lut - disable/enable RSS
  * @vsi: the VSI being changed
                if (ret)
                        goto unroll_vector_base;
 
+               ret = ice_vsi_alloc_ring_stats(vsi);
+               if (ret)
+                       goto unroll_vector_base;
+
                ice_vsi_map_rings_to_vectors(vsi);
 
                /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
                if (ret)
                        goto unroll_vector_base;
 
+               ret = ice_vsi_alloc_ring_stats(vsi);
+               if (ret)
+                       goto unroll_vector_base;
                /* Do not exit if configuring RSS had an issue, at least
                 * receive traffic on first queue. Hence no need to capture
                 * return value
                ret = ice_vsi_alloc_rings(vsi);
                if (ret)
                        goto unroll_vsi_init;
+
+               ret = ice_vsi_alloc_ring_stats(vsi);
+               if (ret)
+                       goto unroll_vector_base;
+
                break;
        default:
                /* clean up the resources and exit */
 unroll_alloc_q_vector:
        ice_vsi_free_q_vectors(vsi);
 unroll_vsi_init:
+       ice_vsi_free_stats(vsi);
        ice_vsi_delete(vsi);
 unroll_get_qs:
        ice_vsi_put_qs(vsi);
            vsi->agg_node && vsi->agg_node->valid)
                vsi->agg_node->num_vsis--;
        ice_vsi_clear_rings(vsi);
-
+       ice_vsi_free_stats(vsi);
        ice_vsi_put_qs(vsi);
 
        /* retain SW VSI data structure since it is needed to unregister and
        }
 }
 
+/**
+ * ice_vsi_realloc_stat_arrays - Frees unused stat structures
+ * @vsi: VSI pointer
+ * @prev_txq: Number of Tx rings before ring reallocation
+ * @prev_rxq: Number of Rx rings before ring reallocation
+ */
+static int
+ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
+{
+       struct ice_vsi_stats *vsi_stat;
+       struct ice_pf *pf = vsi->back;
+       int i;
+
+       if (!prev_txq || !prev_rxq)
+               return 0;
+       if (vsi->type == ICE_VSI_CHNL)
+               return 0;
+
+       vsi_stat = pf->vsi_stats[vsi->idx];
+
+       if (vsi->num_txq < prev_txq) {
+               for (i = vsi->num_txq; i < prev_txq; i++) {
+                       if (vsi_stat->tx_ring_stats[i]) {
+                               kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+                               WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+                       }
+               }
+       }
+
+       if (vsi->num_rxq < prev_rxq) {
+               for (i = vsi->num_rxq; i < prev_rxq; i++) {
+                       if (vsi_stat->rx_ring_stats[i]) {
+                               kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+                               WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+                       }
+               }
+       }
+
+       return 0;
+}
+
 /**
  * ice_vsi_rebuild - Rebuild VSI after reset
  * @vsi: VSI to be rebuild
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
        struct ice_coalesce_stored *coalesce;
+       int ret, i, prev_txq, prev_rxq;
        int prev_num_q_vectors = 0;
        enum ice_vsi_type vtype;
        struct ice_pf *pf;
-       int ret, i;
 
        if (!vsi)
                return -EINVAL;
 
        prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
 
+       prev_txq = vsi->num_txq;
+       prev_rxq = vsi->num_rxq;
+
        ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
        ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
        if (ret)
                if (ret)
                        goto err_vectors;
 
+               ret = ice_vsi_alloc_ring_stats(vsi);
+               if (ret)
+                       goto err_vectors;
+
                ice_vsi_map_rings_to_vectors(vsi);
 
                vsi->stat_offsets_loaded = false;
                if (ret)
                        goto err_vectors;
 
+               ret = ice_vsi_alloc_ring_stats(vsi);
+               if (ret)
+                       goto err_vectors;
+
                vsi->stat_offsets_loaded = false;
                break;
        case ICE_VSI_CHNL:
                        return ice_schedule_reset(pf, ICE_RESET_PFR);
                }
        }
+
+       if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq))
+               goto err_vectors;
+
        ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
        kfree(coalesce);
 
  */
 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
 {
-       u64_stats_update_begin(&tx_ring->syncp);
-       ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
-       u64_stats_update_end(&tx_ring->syncp);
+       u64_stats_update_begin(&tx_ring->ring_stats->syncp);
+       ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
+       u64_stats_update_end(&tx_ring->ring_stats->syncp);
 }
 
 /**
  */
 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
 {
-       u64_stats_update_begin(&rx_ring->syncp);
-       ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
-       u64_stats_update_end(&rx_ring->syncp);
+       u64_stats_update_begin(&rx_ring->ring_stats->syncp);
+       ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
+       u64_stats_update_end(&rx_ring->ring_stats->syncp);
 }
 
 /**
 
 
        ice_for_each_txq(vsi, i) {
                struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
+               struct ice_ring_stats *ring_stats;
 
                if (!tx_ring)
                        continue;
                if (ice_ring_ch_enabled(tx_ring))
                        continue;
 
+               ring_stats = tx_ring->ring_stats;
+               if (!ring_stats)
+                       continue;
+
                if (tx_ring->desc) {
                        /* If packet counter has not changed the queue is
                         * likely stalled, so force an interrupt for this
                         * prev_pkt would be negative if there was no
                         * pending work.
                         */
-                       packets = tx_ring->stats.pkts & INT_MAX;
-                       if (tx_ring->tx_stats.prev_pkt == packets) {
+                       packets = ring_stats->stats.pkts & INT_MAX;
+                       if (ring_stats->tx_stats.prev_pkt == packets) {
                                /* Trigger sw interrupt to revive the queue */
                                ice_trigger_sw_intr(hw, tx_ring->q_vector);
                                continue;
                         * to ice_get_tx_pending()
                         */
                        smp_rmb();
-                       tx_ring->tx_stats.prev_pkt =
+                       ring_stats->tx_stats.prev_pkt =
                            ice_get_tx_pending(tx_ring) ? packets : -1;
                }
        }
 
        ice_for_each_xdp_txq(vsi, i) {
                u16 xdp_q_idx = vsi->alloc_txq + i;
+               struct ice_ring_stats *ring_stats;
                struct ice_tx_ring *xdp_ring;
 
                xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
-
                if (!xdp_ring)
                        goto free_xdp_rings;
 
+               ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+               if (!ring_stats) {
+                       ice_free_tx_ring(xdp_ring);
+                       goto free_xdp_rings;
+               }
+
+               xdp_ring->ring_stats = ring_stats;
                xdp_ring->q_index = xdp_q_idx;
                xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
                xdp_ring->vsi = vsi;
        return 0;
 
 free_xdp_rings:
-       for (; i >= 0; i--)
-               if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+       for (; i >= 0; i--) {
+               if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
+                       kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
+                       vsi->xdp_rings[i]->ring_stats = NULL;
                        ice_free_tx_ring(vsi->xdp_rings[i]);
+               }
+       }
        return -ENOMEM;
 }
 
                                synchronize_rcu();
                                ice_free_tx_ring(vsi->xdp_rings[i]);
                        }
+                       kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
+                       vsi->xdp_rings[i]->ring_stats = NULL;
                        kfree_rcu(vsi->xdp_rings[i], rcu);
                        vsi->xdp_rings[i] = NULL;
                }
                goto err_init_pf_unroll;
        }
 
+       pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
+                                    sizeof(*pf->vsi_stats), GFP_KERNEL);
+       if (!pf->vsi_stats) {
+               err = -ENOMEM;
+               goto err_init_vsi_unroll;
+       }
+
        err = ice_init_interrupt_scheme(pf);
        if (err) {
                dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
                err = -EIO;
-               goto err_init_vsi_unroll;
+               goto err_init_vsi_stats_unroll;
        }
 
        /* In case of MSIX we are going to setup the misc vector right here
        ice_free_irq_msix_misc(pf);
 err_init_interrupt_unroll:
        ice_clear_interrupt_scheme(pf);
+err_init_vsi_stats_unroll:
+       devm_kfree(dev, pf->vsi_stats);
+       pf->vsi_stats = NULL;
 err_init_vsi_unroll:
        devm_kfree(dev, pf->vsi);
 err_init_pf_unroll:
                        continue;
                ice_vsi_free_q_vectors(pf->vsi[i]);
        }
+       devm_kfree(&pdev->dev, pf->vsi_stats);
+       pf->vsi_stats = NULL;
        ice_deinit_pf(pf);
        ice_devlink_destroy_regions(pf);
        ice_deinit_hw(&pf->hw);
                u64 pkts = 0, bytes = 0;
 
                ring = READ_ONCE(rings[i]);
-               if (!ring)
+               if (!ring || !ring->ring_stats)
                        continue;
-               ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+               ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
+                                            ring->ring_stats->stats, &pkts,
+                                            &bytes);
                vsi_stats->tx_packets += pkts;
                vsi_stats->tx_bytes += bytes;
-               vsi->tx_restart += ring->tx_stats.restart_q;
-               vsi->tx_busy += ring->tx_stats.tx_busy;
-               vsi->tx_linearize += ring->tx_stats.tx_linearize;
+               vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
+               vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
+               vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
        }
 }
 
        /* update Rx rings counters */
        ice_for_each_rxq(vsi, i) {
                struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+               struct ice_ring_stats *ring_stats;
 
-               ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+               ring_stats = ring->ring_stats;
+               ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
+                                            ring_stats->stats, &pkts,
+                                            &bytes);
                vsi_stats->rx_packets += pkts;
                vsi_stats->rx_bytes += bytes;
-               vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
-               vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+               vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
+               vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
        }
 
        /* update XDP Tx rings counters */
 
        u64 pkts, bytes;
 
        tx_ring = np->vsi->tx_rings[vf_id];
-       ice_fetch_u64_stats_per_ring(&tx_ring->syncp, tx_ring->stats,
+       ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
+                                    tx_ring->ring_stats->stats,
                                     &pkts, &bytes);
        stats->rx_packets = pkts;
        stats->rx_bytes = bytes;
 
        rx_ring = np->vsi->rx_rings[vf_id];
-       ice_fetch_u64_stats_per_ring(&rx_ring->syncp, rx_ring->stats,
+       ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
+                                    rx_ring->ring_stats->stats,
                                     &pkts, &bytes);
        stats->tx_packets = pkts;
        stats->tx_bytes = bytes;
-       stats->tx_dropped = rx_ring->rx_stats.alloc_page_failed +
-                           rx_ring->rx_stats.alloc_buf_failed;
+       stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
+                           rx_ring->ring_stats->rx_stats.alloc_buf_failed;
 
        return 0;
 }
 
                if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
                    !test_bit(ICE_VSI_DOWN, vsi->state)) {
                        netif_tx_wake_queue(txring_txq(tx_ring));
-                       ++tx_ring->tx_stats.restart_q;
+                       ++tx_ring->ring_stats->tx_stats.restart_q;
                }
        }
 
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-       tx_ring->tx_stats.prev_pkt = -1;
+       tx_ring->ring_stats->tx_stats.prev_pkt = -1;
        return 0;
 
 err:
        /* alloc new page for storage */
        page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
        if (unlikely(!page)) {
-               rx_ring->rx_stats.alloc_page_failed++;
+               rx_ring->ring_stats->rx_stats.alloc_page_failed++;
                return false;
        }
 
         */
        if (dma_mapping_error(rx_ring->dev, dma)) {
                __free_pages(page, ice_rx_pg_order(rx_ring));
-               rx_ring->rx_stats.alloc_page_failed++;
+               rx_ring->ring_stats->rx_stats.alloc_page_failed++;
                return false;
        }
 
        if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
                return false;
 
-       rx_ring->rx_stats.non_eop_descs++;
+       rx_ring->ring_stats->rx_stats.non_eop_descs++;
 
        return true;
 }
                }
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
-                       rx_ring->rx_stats.alloc_buf_failed++;
+                       rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
                        if (rx_buf)
                                rx_buf->pagecnt_bias++;
                        break;
                ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
        rx_ring->skb = skb;
 
-       ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
+       if (rx_ring->ring_stats)
+               ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
+                                        total_rx_bytes);
 
        /* guarantee a trip back through this routine if there was a failure */
        return failure ? budget : (int)total_rx_pkts;
                struct ice_tx_ring *tx_ring;
 
                ice_for_each_tx_ring(tx_ring, *rc) {
-                       packets += tx_ring->stats.pkts;
-                       bytes += tx_ring->stats.bytes;
+                       struct ice_ring_stats *ring_stats;
+
+                       ring_stats = tx_ring->ring_stats;
+                       if (!ring_stats)
+                               continue;
+                       packets += ring_stats->stats.pkts;
+                       bytes += ring_stats->stats.bytes;
                }
        } else {
                struct ice_rx_ring *rx_ring;
 
                ice_for_each_rx_ring(rx_ring, *rc) {
-                       packets += rx_ring->stats.pkts;
-                       bytes += rx_ring->stats.bytes;
+                       struct ice_ring_stats *ring_stats;
+
+                       ring_stats = rx_ring->ring_stats;
+                       if (!ring_stats)
+                               continue;
+                       packets += ring_stats->stats.pkts;
+                       bytes += ring_stats->stats.bytes;
                }
        }
 
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
        netif_tx_start_queue(txring_txq(tx_ring));
-       ++tx_ring->tx_stats.restart_q;
+       ++tx_ring->ring_stats->tx_stats.restart_q;
        return 0;
 }
 
                if (__skb_linearize(skb))
                        goto out_drop;
                count = ice_txd_use_count(skb->len);
-               tx_ring->tx_stats.tx_linearize++;
+               tx_ring->ring_stats->tx_stats.tx_linearize++;
        }
 
        /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
         */
        if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
                              ICE_DESCS_FOR_CTX_DESC)) {
-               tx_ring->tx_stats.tx_busy++;
+               tx_ring->ring_stats->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
 
 
        u64 alloc_buf_failed;
 };
 
+struct ice_ring_stats {
+       struct rcu_head rcu;    /* to avoid race on free */
+       struct ice_q_stats stats;
+       struct u64_stats_sync syncp;
+       union {
+               struct ice_txq_stats tx_stats;
+               struct ice_rxq_stats rx_stats;
+       };
+};
+
 enum ice_ring_state_t {
        ICE_TX_XPS_INIT_DONE,
        ICE_TX_NBITS,
        u16 rx_buf_len;
 
        /* stats structs */
-       struct ice_rxq_stats rx_stats;
-       struct ice_q_stats      stats;
-       struct u64_stats_sync syncp;
+       struct ice_ring_stats *ring_stats;
 
        struct rcu_head rcu;            /* to avoid race on free */
        /* CL4 - 3rd cacheline starts here */
        u16 count;                      /* Number of descriptors */
        u16 q_index;                    /* Queue number of ring */
        /* stats structs */
-       struct ice_txq_stats tx_stats;
+       struct ice_ring_stats *ring_stats;
        /* CL3 - 3rd cacheline starts here */
-       struct ice_q_stats      stats;
-       struct u64_stats_sync syncp;
        struct rcu_head rcu;            /* to avoid race on free */
        DECLARE_BITMAP(xps_state, ICE_TX_NBITS);        /* XPS Config State */
        struct ice_channel *ch;
 
                ice_clean_xdp_irq(xdp_ring);
 
        if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
-               xdp_ring->tx_stats.tx_busy++;
+               xdp_ring->ring_stats->tx_stats.tx_busy++;
                return ICE_XDP_CONSUMED;
        }
 
 
  */
 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
 {
-       memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
-              sizeof(vsi->rx_rings[q_idx]->rx_stats));
-       memset(&vsi->tx_rings[q_idx]->stats, 0,
-              sizeof(vsi->tx_rings[q_idx]->stats));
+       struct ice_vsi_stats *vsi_stat;
+       struct ice_pf *pf;
+
+       pf = vsi->back;
+       if (!pf->vsi_stats)
+               return;
+
+       vsi_stat = pf->vsi_stats[vsi->idx];
+       if (!vsi_stat)
+               return;
+
+       memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+              sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+       memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+              sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
        if (ice_is_xdp_ena_vsi(vsi))
-               memset(&vsi->xdp_rings[q_idx]->stats, 0,
-                      sizeof(vsi->xdp_rings[q_idx]->stats));
+               memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+                      sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
 }
 
 /**
                /* XDP_PASS path */
                skb = ice_construct_skb_zc(rx_ring, xdp);
                if (!skb) {
-                       rx_ring->rx_stats.alloc_buf_failed++;
+                       rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
                        break;
                }