* If the provided stack trace comes from the interrupt context, only the part
  * up to the interrupt entry is saved.
  *
- * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
+ * Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if
  *          alloc_pages() cannot be used from the current context. Currently
  *          this is the case for contexts where neither %GFP_ATOMIC nor
  *          %GFP_NOWAIT can be used (NMI, raw_spin_lock).
  */
 depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
                                            unsigned int nr_entries,
-                                           gfp_t gfp_flags,
+                                           gfp_t alloc_flags,
                                            depot_flags_t depot_flags);
 
 /**
  * Return: Handle of the stack trace stored in depot, 0 on failure
  */
 depot_stack_handle_t stack_depot_save(unsigned long *entries,
-                                     unsigned int nr_entries, gfp_t gfp_flags);
+                                     unsigned int nr_entries, gfp_t alloc_flags);
 
 /**
  * __stack_depot_get_stack_record - Get a pointer to a stack_record struct
 
                        prealloc = page_address(page);
        }
 
-       raw_spin_lock_irqsave(&pool_lock, flags);
+       if (in_nmi()) {
+               /* We can never allocate in NMI context. */
+               WARN_ON_ONCE(can_alloc);
+               /* Best effort; bail if we fail to take the lock. */
+               if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+                       goto exit;
+       } else {
+               raw_spin_lock_irqsave(&pool_lock, flags);
+       }
        printk_deferred_enter();
 
        /* Try to find again, to avoid concurrently inserting duplicates. */