return false;
 }
 
+void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
+static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
+{
+       if (kasan_enabled())
+               __kasan_slab_free_mempool(ptr, ip);
+}
+
 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
                                       void *object, gfp_t flags);
 static __always_inline void * __must_check kasan_slab_alloc(
        return (void *)object;
 }
 
-void __kasan_poison_kfree(void *ptr, unsigned long ip);
-static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip)
-{
-       if (kasan_enabled())
-               __kasan_poison_kfree(ptr, ip);
-}
-
 void __kasan_kfree_large(void *ptr, unsigned long ip);
 static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
 {
 {
        return false;
 }
+static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
                                   gfp_t flags)
 {
 {
        return (void *)object;
 }
-static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
 
 #endif /* CONFIG_KASAN */
 
        return ____kasan_slab_free(cache, object, ip, true);
 }
 
+void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
+{
+       struct page *page;
+
+       page = virt_to_head_page(ptr);
+
+       /*
+        * Even though this function is only called for kmem_cache_alloc and
+        * kmalloc backed mempool allocations, those allocations can still be
+        * !PageSlab() when the size provided to kmalloc is larger than
+        * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
+        */
+       if (unlikely(!PageSlab(page))) {
+               if (ptr != page_address(page)) {
+                       kasan_report_invalid_free(ptr, ip);
+                       return;
+               }
+               poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
+       } else {
+               ____kasan_slab_free(page->slab_cache, ptr, ip, false);
+       }
+}
+
 static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
 {
        kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
                                                flags, true);
 }
 
-void __kasan_poison_kfree(void *ptr, unsigned long ip)
-{
-       struct page *page;
-
-       page = virt_to_head_page(ptr);
-
-       if (unlikely(!PageSlab(page))) {
-               if (ptr != page_address(page)) {
-                       kasan_report_invalid_free(ptr, ip);
-                       return;
-               }
-               poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
-       } else {
-               ____kasan_slab_free(page->slab_cache, ptr, ip, false);
-       }
-}
-
 void __kasan_kfree_large(void *ptr, unsigned long ip)
 {
        if (ptr != page_address(virt_to_head_page(ptr)))
 
 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 {
        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
-               kasan_poison_kfree(element, _RET_IP_);
+               kasan_slab_free_mempool(element, _RET_IP_);
        else if (pool->alloc == mempool_alloc_pages)
                kasan_free_pages(element, (unsigned long)pool->pool_data);
 }