#define IXGBEVF_QUEUE_STATS_LEN ( \
        (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
+        ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
         ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
         (sizeof(struct ixgbevf_stats) / sizeof(u64)))
 #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
                i += 2;
        }
 
+       /* populate XDP queue data */
+       for (j = 0; j < adapter->num_xdp_queues; j++) {
+               ring = adapter->xdp_ring[j];
+               if (!ring) {
+                       data[i++] = 0;
+                       data[i++] = 0;
+                       continue;
+               }
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&ring->syncp);
+                       data[i] = ring->stats.packets;
+                       data[i + 1] = ring->stats.bytes;
+               } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+               i += 2;
+       }
+
        /* populate Rx queue data */
        for (j = 0; j < adapter->num_rx_queues; j++) {
                ring = adapter->rx_ring[j];
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
                }
+               for (i = 0; i < adapter->num_xdp_queues; i++) {
+                       sprintf(p, "xdp_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "xdp_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
+               }
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;