return false;
 
        if (kasan_stack_collection_enabled())
-               kasan_set_free_info(cache, object, tag);
+               kasan_save_free_info(cache, object, tag);
 
        return kasan_quarantine_put(cache, object);
 }
        }
 }
 
-static void set_alloc_info(struct kmem_cache *cache, void *object,
+static void save_alloc_info(struct kmem_cache *cache, void *object,
                                gfp_t flags, bool is_kmalloc)
 {
        struct kasan_alloc_meta *alloc_meta;
 
        /* Save alloc info (if possible) for non-kmalloc() allocations. */
        if (kasan_stack_collection_enabled())
-               set_alloc_info(cache, (void *)object, flags, false);
+               save_alloc_info(cache, (void *)object, flags, false);
 
        return tagged_object;
 }
         * This also rewrites the alloc info when called from kasan_krealloc().
         */
        if (kasan_stack_collection_enabled())
-               set_alloc_info(cache, (void *)object, flags, true);
+               save_alloc_info(cache, (void *)object, flags, true);
 
        /* Keep the tag that was set by kasan_slab_alloc(). */
        return (void *)object;
 
        return __kasan_record_aux_stack(addr, false);
 }
 
-void kasan_set_free_info(struct kmem_cache *cache,
+void kasan_save_free_info(struct kmem_cache *cache,
                                void *object, u8 tag)
 {
        struct kasan_free_meta *free_meta;
 
 
 depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
 void kasan_set_track(struct kasan_track *track, gfp_t flags);
-void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag);
+void kasan_save_free_info(struct kmem_cache *cache, void *object, u8 tag);
 struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
                                void *object, u8 tag);