}
 
 bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
-static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
-                                               unsigned long ip)
+static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
        if (kasan_enabled())
-               return __kasan_slab_free(s, object, ip);
+               return __kasan_slab_free(s, object, _RET_IP_);
        return false;
 }
 
 void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static __always_inline void kasan_slab_free_mempool(void *ptr)
 {
        if (kasan_enabled())
-               __kasan_slab_free_mempool(ptr, ip);
+               __kasan_slab_free_mempool(ptr, _RET_IP_);
 }
 
 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
 }
 
 void __kasan_kfree_large(void *ptr, unsigned long ip);
-static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
+static __always_inline void kasan_kfree_large(void *ptr)
 {
        if (kasan_enabled())
-               __kasan_kfree_large(ptr, ip);
+               __kasan_kfree_large(ptr, _RET_IP_);
 }
 
 bool kasan_save_enable_multi_shot(void);
 {
        return (void *)object;
 }
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
-                                  unsigned long ip)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
        return false;
 }
-static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
+static inline void kasan_slab_free_mempool(void *ptr) {}
 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
                                   gfp_t flags)
 {
 {
        return (void *)object;
 }
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
+static inline void kasan_kfree_large(void *ptr) {}
 
 #endif /* CONFIG_KASAN */
 
 
 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 {
        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
-               kasan_slab_free_mempool(element, _RET_IP_);
+               kasan_slab_free_mempool(element);
        else if (pool->alloc == mempool_alloc_pages)
                kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
 
                memset(objp, 0, cachep->object_size);
 
        /* Put the object into the quarantine, don't touch it for now. */
-       if (kasan_slab_free(cachep, objp, _RET_IP_))
+       if (kasan_slab_free(cachep, objp))
                return;
 
        /* Use KCSAN to help debug racy use-after-free. */
 
 static __always_inline void kfree_hook(void *x)
 {
        kmemleak_free(x);
-       kasan_kfree_large(x, _RET_IP_);
+       kasan_kfree_large(x);
 }
 
 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
                                     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
 
        /* KASAN might put x into memory quarantine, delaying its reuse */
-       return kasan_slab_free(s, x, _RET_IP_);
+       return kasan_slab_free(s, x);
 }
 
 static inline bool slab_free_freelist_hook(struct kmem_cache *s,