__stack_depot_early_init_requested = true;
 }
 
+/* Allocates a hash table via memblock. Can only be used during early boot. */
 int __init stack_depot_early_init(void)
 {
        unsigned long entries = 0;
 
-       /* This is supposed to be called only once, from mm_init() */
+       /* This function must be called only once, from mm_init(). */
        if (WARN_ON(__stack_depot_early_init_passed))
                return 0;
-
        __stack_depot_early_init_passed = true;
 
+       /*
+        * If KASAN is enabled, use the maximum order: KASAN is frequently used
+        * in fuzzing scenarios, which leads to a large number of different
+        * stack traces being stored in stack depot.
+        */
        if (kasan_enabled() && !stack_hash_order)
                stack_hash_order = STACK_HASH_ORDER_MAX;
 
        if (!__stack_depot_early_init_requested || stack_depot_disabled)
                return 0;
 
+       /*
+        * If stack_hash_order is not set, leave entries as 0 to rely on the
+        * automatic calculations performed by alloc_large_system_hash.
+        */
        if (stack_hash_order)
-               entries = 1UL <<  stack_hash_order;
+               entries = 1UL << stack_hash_order;
+       pr_info("allocating hash table via alloc_large_system_hash\n");
        stack_table = alloc_large_system_hash("stackdepot",
                                                sizeof(struct stack_record *),
                                                entries,
                                                &stack_hash_mask,
                                                1UL << STACK_HASH_ORDER_MIN,
                                                1UL << STACK_HASH_ORDER_MAX);
-
        if (!stack_table) {
                pr_err("hash table allocation failed, disabling\n");
                stack_depot_disabled = true;
        return 0;
 }
 
+/* Allocates a hash table via kvcalloc. Can be used after boot. */
 int stack_depot_init(void)
 {
        static DEFINE_MUTEX(stack_depot_init_mutex);
        mutex_lock(&stack_depot_init_mutex);
        if (!stack_depot_disabled && !stack_table) {
                unsigned long entries;
-               int scale = STACK_HASH_SCALE;
 
+               /*
+                * Similarly to stack_depot_early_init, use stack_hash_order
+                * if assigned, and rely on automatic scaling otherwise.
+                */
                if (stack_hash_order) {
                        entries = 1UL << stack_hash_order;
                } else {
+                       int scale = STACK_HASH_SCALE;
+
                        entries = nr_free_buffer_pages();
                        entries = roundup_pow_of_two(entries);
 
                if (entries > 1UL << STACK_HASH_ORDER_MAX)
                        entries = 1UL << STACK_HASH_ORDER_MAX;
 
-               pr_info("allocating hash table of %lu entries with kvcalloc\n",
+               pr_info("allocating hash table of %lu entries via kvcalloc\n",
                                entries);
                stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
                if (!stack_table) {