vsi->rx_rings[i].ring_active);
                        dev_info(&pf->pdev->dev,
                                 "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
-                                i, vsi->rx_rings[i].rx_stats.packets,
-                                vsi->rx_rings[i].rx_stats.bytes,
+                                i, vsi->rx_rings[i].stats.packets,
+                                vsi->rx_rings[i].stats.bytes,
                                 vsi->rx_rings[i].rx_stats.non_eop_descs);
                        dev_info(&pf->pdev->dev,
                                 "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
                                 vsi->tx_rings[i].ring_active);
                        dev_info(&pf->pdev->dev,
                                 "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
-                                i, vsi->tx_rings[i].tx_stats.packets,
-                                vsi->tx_rings[i].tx_stats.bytes,
+                                i, vsi->tx_rings[i].stats.packets,
+                                vsi->tx_rings[i].stats.bytes,
                                 vsi->tx_rings[i].tx_stats.restart_queue);
                        dev_info(&pf->pdev->dev,
                                 "    tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
 
                data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
-       for (j = 0; j < vsi->num_queue_pairs; j++) {
-               data[i++] = vsi->tx_rings[j].tx_stats.packets;
-               data[i++] = vsi->tx_rings[j].tx_stats.bytes;
-       }
-       for (j = 0; j < vsi->num_queue_pairs; j++) {
-               data[i++] = vsi->rx_rings[j].rx_stats.packets;
-               data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+       for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+               data[i] = vsi->tx_rings[j].stats.packets;
+               data[i + 1] = vsi->tx_rings[j].stats.bytes;
+               data[i + 2] = vsi->rx_rings[j].stats.packets;
+               data[i + 3] = vsi->rx_rings[j].stats.bytes;
        }
        if (vsi == pf->vsi[pf->lan_vsi]) {
                for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
                        p += ETH_GSTRING_LEN;
                        snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
                        p += ETH_GSTRING_LEN;
-               }
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
                        p += ETH_GSTRING_LEN;
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
 
        memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
        if (vsi->rx_rings)
                for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       memset(&vsi->rx_rings[i].stats, 0 ,
+                              sizeof(vsi->rx_rings[i].stats));
                        memset(&vsi->rx_rings[i].rx_stats, 0 ,
                               sizeof(vsi->rx_rings[i].rx_stats));
+                       memset(&vsi->tx_rings[i].stats, 0 ,
+                              sizeof(vsi->tx_rings[i].stats));
                        memset(&vsi->tx_rings[i].tx_stats, 0,
                               sizeof(vsi->tx_rings[i].tx_stats));
                }
                struct i40e_ring *p;
 
                p = &vsi->rx_rings[q];
-               rx_b += p->rx_stats.bytes;
-               rx_p += p->rx_stats.packets;
+               rx_b += p->stats.bytes;
+               rx_p += p->stats.packets;
                rx_buf += p->rx_stats.alloc_rx_buff_failed;
                rx_page += p->rx_stats.alloc_rx_page_failed;
 
                p = &vsi->tx_rings[q];
-               tx_b += p->tx_stats.bytes;
-               tx_p += p->tx_stats.packets;
+               tx_b += p->stats.bytes;
+               tx_p += p->stats.packets;
                tx_restart += p->tx_stats.restart_queue;
                tx_busy += p->tx_stats.tx_busy;
        }
 
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
            tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
        } else {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
 
        i += tx_ring->count;
        tx_ring->next_to_clean = i;
-       tx_ring->tx_stats.bytes += total_bytes;
-       tx_ring->tx_stats.packets += total_packets;
+       tx_ring->stats.bytes += total_bytes;
+       tx_ring->stats.packets += total_packets;
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
        }
 
        rx_ring->next_to_clean = i;
-       rx_ring->rx_stats.packets += total_rx_packets;
-       rx_ring->rx_stats.bytes += total_rx_bytes;
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
 
        unsigned int page_offset;
 };
 
-struct i40e_tx_queue_stats {
+struct i40e_queue_stats {
        u64 packets;
        u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
        u64 restart_queue;
        u64 tx_busy;
        u64 tx_done_old;
 };
 
 struct i40e_rx_queue_stats {
-       u64 packets;
-       u64 bytes;
        u64 non_eop_descs;
        u64 alloc_rx_page_failed;
        u64 alloc_rx_buff_failed;
        bool ring_active;               /* is ring online or not */
 
        /* stats structs */
+       struct i40e_queue_stats stats;
        union {
                struct i40e_tx_queue_stats tx_stats;
                struct i40e_rx_queue_stats rx_stats;