strcpy(data + (idx++) * ETH_GSTRING_LEN,
                       pcie_perf_stats_desc[i].format);
 
-       for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
+       for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pcie_perf_stall_stats_desc[i].format);
+                      pcie_perf_stats_desc64[i].format);
+
+       for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
+        strcpy(data + (idx++) * ETH_GSTRING_LEN,
+               pcie_perf_stall_stats_desc[i].format);
 
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
                data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
                                                  pcie_perf_stats_desc, i);
 
+       for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
+               data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
+                                                 pcie_perf_stats_desc64, i);
+
        for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
                data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
                                                  pcie_perf_stall_stats_desc, i);
 
        MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
                 counter_set.pcie_perf_cntrs_grp_data_layout.c)
 
+#define PCIE_PERF_OFF64(c) \
+       MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
+#define PCIE_PERF_GET64(pcie_stats, c) \
+       MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
+                  counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
+
 struct mlx5e_pcie_stats {
        __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
 };
        { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
 };
 
+static const struct counter_desc pcie_perf_stats_desc64[] = {
+       { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
+};
+
 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
        { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
        { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
 #define NUM_PCIE_PERF_COUNTERS(priv) \
        (ARRAY_SIZE(pcie_perf_stats_desc) * \
         MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+#define NUM_PCIE_PERF_COUNTERS64(priv) \
+       (ARRAY_SIZE(pcie_perf_stats_desc64) * \
+        MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
 #define NUM_PCIE_PERF_STALL_COUNTERS(priv) \
        (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \
         MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
                                         NUM_PPORT_PRIO + \
                                         NUM_PPORT_ETH_EXT_COUNTERS(priv))
 #define NUM_PCIE_COUNTERS(priv)                (NUM_PCIE_PERF_COUNTERS(priv) + \
+                                        NUM_PCIE_PERF_COUNTERS64(priv) +\
                                         NUM_PCIE_PERF_STALL_COUNTERS(priv))
 #define NUM_RQ_STATS                   ARRAY_SIZE(rq_stats_desc)
 #define NUM_SQ_STATS                   ARRAY_SIZE(sq_stats_desc)
 
 
        u8         crc_error_tlp[0x20];
 
-       u8         reserved_at_140[0x40];
+       u8         tx_overflow_buffer_pkt_high[0x20];
+
+       u8         tx_overflow_buffer_pkt_low[0x20];
 
        u8         outbound_stalled_reads[0x20];
 
 struct mlx5_ifc_mcam_enhanced_features_bits {
        u8         reserved_at_0[0x7b];
        u8         pcie_outbound_stalled[0x1];
-       u8         reserved_at_7c[0x1];
+       u8         tx_overflow_buffer_pkt[0x1];
        u8         mtpps_enh_out_per_adj[0x1];
        u8         mtpps_fs[0x1];
        u8         pcie_performance_group[0x1];