[BNXT_RE_TX_CNP].name                = "tx_cnp_pkts",
        [BNXT_RE_RX_CNP].name                = "rx_cnp_pkts",
        [BNXT_RE_RX_ECN].name                = "rx_ecn_marked_pkts",
+       [BNXT_RE_PACING_RESCHED].name        = "pacing_reschedule",
+       [BNXT_RE_PACING_CMPL].name           = "pacing_complete",
+       [BNXT_RE_PACING_ALERT].name          = "pacing_alerts",
+       [BNXT_RE_DB_FIFO_REG].name           = "db_fifo_register",
 };
 
 static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
                        err_s->res_oos_drop_count;
 }
 
+static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
+                                        struct rdma_hw_stats *stats)
+{
+       struct bnxt_re_db_pacing_stats *pacing_s =  &rdev->stats.pacing;
+
+       stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched;
+       stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete;
+       stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts;
+       stats->value[BNXT_RE_DB_FIFO_REG] =
+               readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+}
+
 int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
                            struct rdma_hw_stats *stats,
                            u32 port, int index)
                                goto done;
                        }
                }
+               if (rdev->pacing.dbr_pacing)
+                       bnxt_re_copy_db_pacing_stats(rdev, stats);
        }
 
 done:
 
        BNXT_RE_TX_CNP,
        BNXT_RE_RX_CNP,
        BNXT_RE_RX_ECN,
+       BNXT_RE_PACING_RESCHED,
+       BNXT_RE_PACING_CMPL,
+       BNXT_RE_PACING_ALERT,
+       BNXT_RE_DB_FIFO_REG,
        BNXT_RE_NUM_EXT_COUNTERS
 };
 
 #define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1)
 
+struct bnxt_re_db_pacing_stats {
+       u64 resched;
+       u64 complete;
+       u64 alerts;
+};
+
 struct bnxt_re_res_cntrs {
        atomic_t qp_count;
        atomic_t rc_qp_count;
 struct bnxt_re_stats {
        struct bnxt_re_rstat            rstat;
        struct bnxt_re_res_cntrs        res;
+       struct bnxt_re_db_pacing_stats  pacing;
 };
 
 struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
 
                pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
        schedule_delayed_work(&rdev->dbq_pacing_work,
                              msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
+       rdev->stats.pacing.alerts++;
        mutex_unlock(&rdev->pacing.dbq_lock);
 }
 
        pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing);
        if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) {
                bnxt_re_set_default_pacing_data(rdev);
+               rdev->stats.pacing.complete++;
                goto dbq_unlock;
        }
 
 restart_timer:
        schedule_delayed_work(&rdev->dbq_pacing_work,
                              msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
+       rdev->stats.pacing.resched++;
 dbq_unlock:
        rdev->pacing.do_pacing_save = pacing_data->do_pacing;
        mutex_unlock(&rdev->pacing.dbq_lock);