The KUnit test suite is very likely to fail when using a deferrable timer
    since it currently causes very unpredictable sample intervals.
 
+By default KFENCE will only sample 1 heap allocation within each sample
+interval. *Burst mode* allows to sample successive heap allocations, where the
+kernel boot parameter ``kfence.burst`` can be set to a non-zero value which
+denotes the *additional* successive allocations within a sample interval;
+setting ``kfence.burst=N`` means that ``1 + N`` successive allocations are
+attempted through KFENCE for each sample interval.
+
 The KFENCE memory pool is of fixed size, and if the pool is exhausted, no
 further KFENCE allocations occur. With ``CONFIG_KFENCE_NUM_OBJECTS`` (default
 255), the number of available guarded objects can be controlled. Each object
 
 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
 
+/* Allocation burst count: number of excess KFENCE allocations per sample. */
+static unsigned int kfence_burst __read_mostly;
+module_param_named(burst, kfence_burst, uint, 0644);
+
 /* If true, use a deferrable timer. */
 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
 module_param_named(deferrable, kfence_deferrable, bool, 0444);
        if (!READ_ONCE(kfence_enabled))
                return;
 
-       atomic_set(&kfence_allocation_gate, 0);
+       atomic_set(&kfence_allocation_gate, -kfence_burst);
 #ifdef CONFIG_KFENCE_STATIC_KEYS
        /* Enable static key, and await allocation to happen. */
        static_branch_enable(&kfence_allocation_key);
 
-       wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
+       wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate) > 0);
 
        /* Disable static key and reset timer. */
        static_branch_disable(&kfence_allocation_key);
        unsigned long stack_entries[KFENCE_STACK_DEPTH];
        size_t num_stack_entries;
        u32 alloc_stack_hash;
+       int allocation_gate;
 
        /*
         * Perform size check before switching kfence_allocation_gate, so that
        if (s->flags & SLAB_SKIP_KFENCE)
                return NULL;
 
-       if (atomic_inc_return(&kfence_allocation_gate) > 1)
+       allocation_gate = atomic_inc_return(&kfence_allocation_gate);
+       if (allocation_gate > 1)
                return NULL;
 #ifdef CONFIG_KFENCE_STATIC_KEYS
        /*
         * waitqueue_active() is fully ordered after the update of
         * kfence_allocation_gate per atomic_inc_return().
         */
-       if (waitqueue_active(&allocation_wait)) {
+       if (allocation_gate == 1 && waitqueue_active(&allocation_wait)) {
                /*
                 * Calling wake_up() here may deadlock when allocations happen
                 * from within timer code. Use an irq_work to defer it.