}
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
+/* Count the maximum pages reached in kernel stacks */
+static inline void kstack_histogram(unsigned long used_stack)
+{
+#ifdef CONFIG_VM_EVENT_COUNTERS
+       if (used_stack <= 1024)
+               count_vm_event(KSTACK_1K);
+#if THREAD_SIZE > 1024
+       else if (used_stack <= 2048)
+               count_vm_event(KSTACK_2K);
+#endif
+#if THREAD_SIZE > 2048
+       else if (used_stack <= 4096)
+               count_vm_event(KSTACK_4K);
+#endif
+#if THREAD_SIZE > 4096
+       else if (used_stack <= 8192)
+               count_vm_event(KSTACK_8K);
+#endif
+#if THREAD_SIZE > 8192
+       else if (used_stack <= 16384)
+               count_vm_event(KSTACK_16K);
+#endif
+#if THREAD_SIZE > 16384
+       else if (used_stack <= 32768)
+               count_vm_event(KSTACK_32K);
+#endif
+#if THREAD_SIZE > 32768
+       else if (used_stack <= 65536)
+               count_vm_event(KSTACK_64K);
+#endif
+#if THREAD_SIZE > 65536
+       else
+               count_vm_event(KSTACK_REST);
+#endif
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+}
+
 static void check_stack_usage(void)
 {
        static DEFINE_SPINLOCK(low_water_lock);
        unsigned long free;
 
        free = stack_not_used(current);
+       kstack_histogram(THREAD_SIZE - free);
 
        if (free >= lowest_to_date)
                return;