/* Number of pinned cpu breakpoints in a cpu */
        unsigned int    cpu_pinned;
        /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
+#ifdef hw_breakpoint_slots
+       unsigned int    tsk_pinned[hw_breakpoint_slots(0)];
+#else
        unsigned int    *tsk_pinned;
+#endif
        /* Number of non-pinned cpu/task breakpoints in a cpu */
        unsigned int    flexible; /* XXX: placeholder, see fetch_this_slot() */
 };
 
 static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
-static int nr_slots[TYPE_MAX] __ro_after_init;
 
 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
 {
 /* Serialize accesses to the above constraints */
 static DEFINE_MUTEX(nr_bp_mutex);
 
+#ifdef hw_breakpoint_slots
+/*
+ * Number of breakpoint slots is constant, and the same for all types.
+ */
+static_assert(hw_breakpoint_slots(TYPE_INST) == hw_breakpoint_slots(TYPE_DATA));
+static inline int hw_breakpoint_slots_cached(int type) { return hw_breakpoint_slots(type); }
+static inline int init_breakpoint_slots(void)          { return 0; }
+#else
+/*
+ * Dynamic number of breakpoint slots.
+ */
+static int __nr_bp_slots[TYPE_MAX] __ro_after_init;
+
+static inline int hw_breakpoint_slots_cached(int type)
+{
+       return __nr_bp_slots[type];
+}
+
+static __init int init_breakpoint_slots(void)
+{
+       int i, cpu, err_cpu;
+
+       for (i = 0; i < TYPE_MAX; i++)
+               __nr_bp_slots[i] = hw_breakpoint_slots(i);
+
+       for_each_possible_cpu(cpu) {
+               for (i = 0; i < TYPE_MAX; i++) {
+                       struct bp_cpuinfo *info = get_bp_info(cpu, i);
+
+                       info->tsk_pinned = kcalloc(__nr_bp_slots[i], sizeof(int), GFP_KERNEL);
+                       if (!info->tsk_pinned)
+                               goto err;
+               }
+       }
+
+       return 0;
+err:
+       for_each_possible_cpu(err_cpu) {
+               for (i = 0; i < TYPE_MAX; i++)
+                       kfree(get_bp_info(err_cpu, i)->tsk_pinned);
+               if (err_cpu == cpu)
+                       break;
+       }
+
+       return -ENOMEM;
+}
+#endif
+
 __weak int hw_breakpoint_weight(struct perf_event *bp)
 {
        return 1;
        unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
        int i;
 
-       for (i = nr_slots[type] - 1; i >= 0; i--) {
+       for (i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
                if (tsk_pinned[i] > 0)
                        return i + 1;
        }
        fetch_this_slot(&slots, weight);
 
        /* Flexible counters need to keep at least one slot */
-       if (slots.pinned + (!!slots.flexible) > nr_slots[type])
+       if (slots.pinned + (!!slots.flexible) > hw_breakpoint_slots_cached(type))
                return -ENOSPC;
 
        ret = arch_reserve_bp_slot(bp);
                        if (info->cpu_pinned)
                                return true;
 
-                       for (int slot = 0; slot < nr_slots[type]; ++slot) {
+                       for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
                                if (info->tsk_pinned[slot])
                                        return true;
                        }
 
 int __init init_hw_breakpoint(void)
 {
-       int cpu, err_cpu;
-       int i, ret;
-
-       for (i = 0; i < TYPE_MAX; i++)
-               nr_slots[i] = hw_breakpoint_slots(i);
-
-       for_each_possible_cpu(cpu) {
-               for (i = 0; i < TYPE_MAX; i++) {
-                       struct bp_cpuinfo *info = get_bp_info(cpu, i);
-
-                       info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
-                                                       GFP_KERNEL);
-                       if (!info->tsk_pinned) {
-                               ret = -ENOMEM;
-                               goto err;
-                       }
-               }
-       }
+       int ret;
 
        ret = rhltable_init(&task_bps_ht, &task_bps_ht_params);
        if (ret)
-               goto err;
+               return ret;
+
+       ret = init_breakpoint_slots();
+       if (ret)
+               return ret;
 
        constraints_initialized = true;
 
        perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
 
        return register_die_notifier(&hw_breakpoint_exceptions_nb);
-
-err:
-       for_each_possible_cpu(err_cpu) {
-               for (i = 0; i < TYPE_MAX; i++)
-                       kfree(get_bp_info(err_cpu, i)->tsk_pinned);
-               if (err_cpu == cpu)
-                       break;
-       }
-
-       return ret;
 }