#include <linux/slab.h>
 
 /*
- * Constraints data
+ * Datastructure to track the total uses of N slots across tasks or CPUs;
+ * bp_slots_histogram::count[N] is the number of assigned N+1 breakpoint slots.
  */
-struct bp_cpuinfo {
-       /* Number of pinned cpu breakpoints in a cpu */
-       unsigned int    cpu_pinned;
-       /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
+struct bp_slots_histogram {
 #ifdef hw_breakpoint_slots
-       atomic_t        tsk_pinned[hw_breakpoint_slots(0)];
+       atomic_t count[hw_breakpoint_slots(0)];
 #else
-       atomic_t        *tsk_pinned;
+       atomic_t *count;
 #endif
 };
 
+/*
+ * Per-CPU constraints data.
+ */
+struct bp_cpuinfo {
+       /* Number of pinned CPU breakpoints in a CPU. */
+       unsigned int                    cpu_pinned;
+       /* Histogram of pinned task breakpoints in a CPU. */
+       struct bp_slots_histogram       tsk_pinned;
+};
+
 static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
 
 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
        return __nr_bp_slots[type];
 }
 
+static __init bool
+bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type)
+{
+       hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL);
+       return hist->count;
+}
+
+static __init void bp_slots_histogram_free(struct bp_slots_histogram *hist)
+{
+       kfree(hist->count);
+}
+
 static __init int init_breakpoint_slots(void)
 {
        int i, cpu, err_cpu;
                for (i = 0; i < TYPE_MAX; i++) {
                        struct bp_cpuinfo *info = get_bp_info(cpu, i);
 
-                       info->tsk_pinned = kcalloc(__nr_bp_slots[i], sizeof(atomic_t), GFP_KERNEL);
-                       if (!info->tsk_pinned)
+                       if (!bp_slots_histogram_alloc(&info->tsk_pinned, i))
                                goto err;
                }
        }
 err:
        for_each_possible_cpu(err_cpu) {
                for (i = 0; i < TYPE_MAX; i++)
-                       kfree(get_bp_info(err_cpu, i)->tsk_pinned);
+                       bp_slots_histogram_free(&get_bp_info(err_cpu, i)->tsk_pinned);
                if (err_cpu == cpu)
                        break;
        }
 }
 #endif
 
+static inline void
+bp_slots_histogram_add(struct bp_slots_histogram *hist, int old, int val)
+{
+       const int old_idx = old - 1;
+       const int new_idx = old_idx + val;
+
+       if (old_idx >= 0)
+               WARN_ON(atomic_dec_return_relaxed(&hist->count[old_idx]) < 0);
+       if (new_idx >= 0)
+               WARN_ON(atomic_inc_return_relaxed(&hist->count[new_idx]) < 0);
+}
+
+static int
+bp_slots_histogram_max(struct bp_slots_histogram *hist, enum bp_type_idx type)
+{
+       for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
+               const int count = atomic_read(&hist->count[i]);
+
+               /* Catch unexpected writers; we want a stable snapshot. */
+               ASSERT_EXCLUSIVE_WRITER(hist->count[i]);
+               if (count > 0)
+                       return i + 1;
+               WARN(count < 0, "inconsistent breakpoint slots histogram");
+       }
+
+       return 0;
+}
+
 #ifndef hw_breakpoint_weight
 static inline int hw_breakpoint_weight(struct perf_event *bp)
 {
 }
 
 /*
- * Report the maximum number of pinned breakpoints a task
- * have in this cpu
+ * Return the maximum number of pinned breakpoints a task has in this CPU.
  */
 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
 {
-       atomic_t *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
-       int i;
+       struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned;
 
        /*
         * At this point we want to have acquired the bp_cpuinfo_sem as a
         * toggle_bp_task_slot() to tsk_pinned, and we get a stable snapshot.
         */
        lockdep_assert_held_write(&bp_cpuinfo_sem);
-
-       for (i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
-               ASSERT_EXCLUSIVE_WRITER(tsk_pinned[i]); /* Catch unexpected writers. */
-               if (atomic_read(&tsk_pinned[i]) > 0)
-                       return i + 1;
-       }
-
-       return 0;
+       return bp_slots_histogram_max(tsk_pinned, type);
 }
 
 /*
 static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
                                enum bp_type_idx type, int weight)
 {
-       atomic_t *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
-       int old_idx, new_idx;
+       struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned;
 
        /*
         * If bp->hw.target, tsk_pinned is only modified, but not used
         * bp_cpuinfo_sem as a writer to stabilize tsk_pinned's value.
         */
        lockdep_assert_held_read(&bp_cpuinfo_sem);
-
-       old_idx = task_bp_pinned(cpu, bp, type) - 1;
-       new_idx = old_idx + weight;
-
-       if (old_idx >= 0)
-               atomic_dec(&tsk_pinned[old_idx]);
-       if (new_idx >= 0)
-               atomic_inc(&tsk_pinned[new_idx]);
+       bp_slots_histogram_add(tsk_pinned, task_bp_pinned(cpu, bp, type), weight);
 }
 
 /*
                                return true;
 
                        for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
-                               if (atomic_read(&info->tsk_pinned[slot]))
+                               if (atomic_read(&info->tsk_pinned.count[slot]))
                                        return true;
                        }
                }