KFENCE_COUNTER_FREES,
        KFENCE_COUNTER_ZOMBIES,
        KFENCE_COUNTER_BUGS,
+       KFENCE_COUNTER_SKIP_INCOMPAT,
+       KFENCE_COUNTER_SKIP_CAPACITY,
        KFENCE_COUNTER_COUNT,
 };
 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
        [KFENCE_COUNTER_FREES]          = "total frees",
        [KFENCE_COUNTER_ZOMBIES]        = "zombie allocations",
        [KFENCE_COUNTER_BUGS]           = "total bugs",
+       [KFENCE_COUNTER_SKIP_INCOMPAT]  = "skipped allocations (incompatible)",
+       [KFENCE_COUNTER_SKIP_CAPACITY]  = "skipped allocations (capacity)",
 };
 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
 
                list_del_init(&meta->list);
        }
        raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
-       if (!meta)
+       if (!meta) {
+               atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
                return NULL;
+       }
 
        if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
                /*
         * Perform size check before switching kfence_allocation_gate, so that
         * we don't disable KFENCE without making an allocation.
         */
-       if (size > PAGE_SIZE)
+       if (size > PAGE_SIZE) {
+               atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
                return NULL;
+       }
 
        /*
         * Skip allocations from non-default zones, including DMA. We cannot
         * properties (e.g. reside in DMAable memory).
         */
        if ((flags & GFP_ZONEMASK) ||
-           (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
+           (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
+               atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
                return NULL;
+       }
 
        if (atomic_inc_return(&kfence_allocation_gate) > 1)
                return NULL;