__kasan_poison_slab(slab);
 }
 
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
                                                        void *object)
 {
        if (kasan_enabled())
-               __kasan_unpoison_object_data(cache, object);
+               __kasan_unpoison_new_object(cache, object);
 }
 
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
                                                        void *object)
 {
        if (kasan_enabled())
-               __kasan_poison_object_data(cache, object);
+               __kasan_poison_new_object(cache, object);
 }
 
 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
        return false;
 }
 static inline void kasan_poison_slab(struct slab *slab) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
                                        void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
                                        void *object) {}
 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
                                const void *object)
 
                     KASAN_SLAB_REDZONE, false);
 }
 
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
 {
        kasan_unpoison(object, cache->object_size, false);
 }
 
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
 {
        kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
                        KASAN_SLAB_REDZONE, false);
 
 
        /*
         * Perform shadow offset calculation based on untagged address, as
-        * some of the callers (e.g. kasan_poison_object_data) pass tagged
+        * some of the callers (e.g. kasan_poison_new_object) pass tagged
         * addresses to this function.
         */
        addr = kasan_reset_tag(addr);
 
        /*
         * Perform shadow offset calculation based on untagged address, as
-        * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
+        * some of the callers (e.g. kasan_unpoison_new_object) pass tagged
         * addresses to this function.
         */
        addr = kasan_reset_tag(addr);
 
                 * They must also be threaded.
                 */
                if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
-                       kasan_unpoison_object_data(cachep,
-                                                  objp + obj_offset(cachep));
+                       kasan_unpoison_new_object(cachep, objp + obj_offset(cachep));
                        cachep->ctor(objp + obj_offset(cachep));
-                       kasan_poison_object_data(
-                               cachep, objp + obj_offset(cachep));
+                       kasan_poison_new_object(cachep, objp + obj_offset(cachep));
                }
 
                if (cachep->flags & SLAB_RED_ZONE) {
 
                /* constructor could break poison info */
                if (DEBUG == 0 && cachep->ctor) {
-                       kasan_unpoison_object_data(cachep, objp);
+                       kasan_unpoison_new_object(cachep, objp);
                        cachep->ctor(objp);
-                       kasan_poison_object_data(cachep, objp);
+                       kasan_poison_new_object(cachep, objp);
                }
 
                if (!shuffled)
 
        setup_object_debug(s, object);
        object = kasan_init_slab_obj(s, object);
        if (unlikely(s->ctor)) {
-               kasan_unpoison_object_data(s, object);
+               kasan_unpoison_new_object(s, object);
                s->ctor(object);
-               kasan_poison_object_data(s, object);
+               kasan_poison_new_object(s, object);
        }
        return object;
 }
 
        }
 
        skb = nc->skb_cache[--nc->skb_count];
-       kasan_unpoison_object_data(skbuff_cache, skb);
+       kasan_unpoison_new_object(skbuff_cache, skb);
 
        return skb;
 }
        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
        u32 i;
 
-       kasan_poison_object_data(skbuff_cache, skb);
+       kasan_poison_new_object(skbuff_cache, skb);
        nc->skb_cache[nc->skb_count++] = skb;
 
        if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
                for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
-                       kasan_unpoison_object_data(skbuff_cache,
-                                                  nc->skb_cache[i]);
+                       kasan_unpoison_new_object(skbuff_cache,
+                                                 nc->skb_cache[i]);
 
                kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
                                     nc->skb_cache + NAPI_SKB_CACHE_HALF);