return 0;
 }
 
+/* Supports checking bulk free of a constructed freelist */
 static noinline struct kmem_cache_node *free_debug_processing(
-       struct kmem_cache *s, struct page *page, void *object,
+       struct kmem_cache *s, struct page *page,
+       void *head, void *tail, int bulk_cnt,
        unsigned long addr, unsigned long *flags)
 {
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+       void *object = head;
+       int cnt = 0;
 
        spin_lock_irqsave(&n->list_lock, *flags);
        slab_lock(page);
        if (!check_slab(s, page))
                goto fail;
 
+next_object:
+       cnt++;
+
        if (!check_valid_pointer(s, page, object)) {
                slab_err(s, page, "Invalid object pointer 0x%p", object);
                goto fail;
        if (s->flags & SLAB_STORE_USER)
                set_track(s, object, TRACK_FREE, addr);
        trace(s, page, object, 0);
+       /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
        init_object(s, object, SLUB_RED_INACTIVE);
+
+       /* Reached end of constructed freelist yet? */
+       if (object != tail) {
+               object = get_freepointer(s, object);
+               goto next_object;
+       }
 out:
+       if (cnt != bulk_cnt)
+               slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
+                        bulk_cnt, cnt);
+
        slab_unlock(page);
        /*
         * Keep node_lock to preserve integrity
        struct page *page, void *object, unsigned long addr) { return 0; }
 
 static inline struct kmem_cache_node *free_debug_processing(
-       struct kmem_cache *s, struct page *page, void *object,
+       struct kmem_cache *s, struct page *page,
+       void *head, void *tail, int bulk_cnt,
        unsigned long addr, unsigned long *flags) { return NULL; }
 
 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
        kasan_slab_free(s, x);
 }
 
+static inline void slab_free_freelist_hook(struct kmem_cache *s,
+                                          void *head, void *tail)
+{
+/*
+ * Compiler cannot detect this function can be removed if slab_free_hook()
+ * evaluates to nothing.  Thus, catch all relevant config debug options here.
+ */
+#if defined(CONFIG_KMEMCHECK) ||               \
+       defined(CONFIG_LOCKDEP) ||              \
+       defined(CONFIG_DEBUG_KMEMLEAK) ||       \
+       defined(CONFIG_DEBUG_OBJECTS_FREE) ||   \
+       defined(CONFIG_KASAN)
+
+       void *object = head;
+       void *tail_obj = tail ? : head;
+
+       do {
+               slab_free_hook(s, object);
+       } while ((object != tail_obj) &&
+                (object = get_freepointer(s, object)));
+#endif
+}
+
 static void setup_object(struct kmem_cache *s, struct page *page,
                                void *object)
 {
  * handling required then we can return immediately.
  */
 static void __slab_free(struct kmem_cache *s, struct page *page,
-                       void *x, unsigned long addr)
+                       void *head, void *tail, int cnt,
+                       unsigned long addr)
+
 {
        void *prior;
-       void **object = (void *)x;
        int was_frozen;
        struct page new;
        unsigned long counters;
        stat(s, FREE_SLOWPATH);
 
        if (kmem_cache_debug(s) &&
-               !(n = free_debug_processing(s, page, x, addr, &flags)))
+           !(n = free_debug_processing(s, page, head, tail, cnt,
+                                       addr, &flags)))
                return;
 
        do {
                }
                prior = page->freelist;
                counters = page->counters;
-               set_freepointer(s, object, prior);
+               set_freepointer(s, tail, prior);
                new.counters = counters;
                was_frozen = new.frozen;
-               new.inuse--;
+               new.inuse -= cnt;
                if ((!new.inuse || !prior) && !was_frozen) {
 
                        if (kmem_cache_has_cpu_partial(s) && !prior) {
 
        } while (!cmpxchg_double_slab(s, page,
                prior, counters,
-               object, new.counters,
+               head, new.counters,
                "__slab_free"));
 
        if (likely(!n)) {
  *
  * If fastpath is not possible then fall back to __slab_free where we deal
  * with all sorts of special processing.
+ *
+ * Bulk free of a freelist with several objects (all pointing to the
+ * same page) possible by specifying head and tail ptr, plus objects
+ * count (cnt). Bulk free indicated by tail pointer being set.
  */
-static __always_inline void slab_free(struct kmem_cache *s,
-                       struct page *page, void *x, unsigned long addr)
+static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
+                                     void *head, void *tail, int cnt,
+                                     unsigned long addr)
 {
-       void **object = (void *)x;
+       void *tail_obj = tail ? : head;
        struct kmem_cache_cpu *c;
        unsigned long tid;
 
-       slab_free_hook(s, x);
+       slab_free_freelist_hook(s, head, tail);
 
 redo:
        /*
        barrier();
 
        if (likely(page == c->page)) {
-               set_freepointer(s, object, c->freelist);
+               set_freepointer(s, tail_obj, c->freelist);
 
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                c->freelist, tid,
-                               object, next_tid(tid)))) {
+                               head, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_free", s, tid);
                        goto redo;
                }
                stat(s, FREE_FASTPATH);
        } else
-               __slab_free(s, page, x, addr);
+               __slab_free(s, page, head, tail_obj, cnt, addr);
 
 }
 
        s = cache_from_obj(s, x);
        if (!s)
                return;
-       slab_free(s, virt_to_head_page(x), x, _RET_IP_);
+       slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
        trace_kmem_cache_free(_RET_IP_, x);
 }
 EXPORT_SYMBOL(kmem_cache_free);
                        c->tid = next_tid(c->tid);
                        local_irq_enable();
                        /* Slowpath: overhead locked cmpxchg_double_slab */
-                       __slab_free(s, page, object, _RET_IP_);
+                       __slab_free(s, page, object, object, 1, _RET_IP_);
                        local_irq_disable();
                        c = this_cpu_ptr(s->cpu_slab);
                }
                __free_kmem_pages(page, compound_order(page));
                return;
        }
-       slab_free(page->slab_cache, page, object, _RET_IP_);
+       slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
 }
 EXPORT_SYMBOL(kfree);