ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
                if (!ctx->tqm_fp_rings_count)
                        ctx->tqm_fp_rings_count = bp->max_q;
+               else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+                       ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
 
-               tqm_rings = ctx->tqm_fp_rings_count + 1;
+               tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
                ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
                if (!ctx_pg) {
                        kfree(ctx);
             pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
             pg_dir = &req.tqm_sp_page_dir,
             ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
-            i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+            i < BNXT_MAX_TQM_RINGS;
+            i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
                if (!(enables & ena))
                        continue;
 
 
        struct bnxt_ctx_pg_info **ctx_pg_tbl;
 };
 
+#define BNXT_MAX_TQM_SP_RINGS          1
+#define BNXT_MAX_TQM_FP_RINGS          8
+#define BNXT_MAX_TQM_RINGS             \
+       (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
 struct bnxt_ctx_mem_info {
        u32     qp_max_entries;
        u16     qp_min_qp1_entries;
        struct bnxt_ctx_pg_info stat_mem;
        struct bnxt_ctx_pg_info mrav_mem;
        struct bnxt_ctx_pg_info tim_mem;
-       struct bnxt_ctx_pg_info *tqm_mem[9];
+       struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
 };
 
 struct bnxt_fw_health {