return nr_freed;
 }
 
-int __kmem_cache_shrink(struct kmem_cache *cachep)
+int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
 {
        int ret = 0;
        int node;
 {
        int i;
        struct kmem_cache_node *n;
-       int rc = __kmem_cache_shrink(cachep);
+       int rc = __kmem_cache_shrink(cachep, false);
 
        if (rc)
                return rc;
 
 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
 
 int __kmem_cache_shutdown(struct kmem_cache *);
-int __kmem_cache_shrink(struct kmem_cache *);
+int __kmem_cache_shrink(struct kmem_cache *, bool);
 void slab_kmem_cache_release(struct kmem_cache *);
 
 struct seq_file;
 
 {
        int idx;
        struct memcg_cache_array *arr;
-       struct kmem_cache *s;
+       struct kmem_cache *s, *c;
 
        idx = memcg_cache_id(memcg);
 
+       get_online_cpus();
+       get_online_mems();
+
        mutex_lock(&slab_mutex);
        list_for_each_entry(s, &slab_caches, list) {
                if (!is_root_cache(s))
 
                arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
                                                lockdep_is_held(&slab_mutex));
+               c = arr->entries[idx];
+               if (!c)
+                       continue;
+
+               __kmem_cache_shrink(c, true);
                arr->entries[idx] = NULL;
        }
        mutex_unlock(&slab_mutex);
+
+       put_online_mems();
+       put_online_cpus();
 }
 
 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
 
        get_online_cpus();
        get_online_mems();
-       ret = __kmem_cache_shrink(cachep);
+       ret = __kmem_cache_shrink(cachep, false);
        put_online_mems();
        put_online_cpus();
        return ret;
 
        return 0;
 }
 
-int __kmem_cache_shrink(struct kmem_cache *d)
+int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
 {
        return 0;
 }
 
        int pages;
        int pobjects;
 
+       preempt_disable();
        do {
                pages = 0;
                pobjects = 0;
 
        } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
                                                                != oldpage);
+       if (unlikely(!s->cpu_partial)) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
+               local_irq_restore(flags);
+       }
+       preempt_enable();
 #endif
 }
 
  * being allocated from last increasing the chance that the last objects
  * are freed in them.
  */
-int __kmem_cache_shrink(struct kmem_cache *s)
+int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
 {
        int node;
        int i;
        unsigned long flags;
        int ret = 0;
 
+       if (deactivate) {
+               /*
+                * Disable empty slabs caching. Used to avoid pinning offline
+                * memory cgroups by kmem pages that can be freed.
+                */
+               s->cpu_partial = 0;
+               s->min_partial = 0;
+
+               /*
+                * s->cpu_partial is checked locklessly (see put_cpu_partial),
+                * so we have to make sure the change is visible.
+                */
+               kick_all_cpus_sync();
+       }
+
        flush_all(s);
        for_each_kmem_cache_node(s, node, n) {
-               if (!n->nr_partial)
-                       continue;
-
                INIT_LIST_HEAD(&discard);
                for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
                        INIT_LIST_HEAD(promote + i);
 
        mutex_lock(&slab_mutex);
        list_for_each_entry(s, &slab_caches, list)
-               __kmem_cache_shrink(s);
+               __kmem_cache_shrink(s, false);
        mutex_unlock(&slab_mutex);
 
        return 0;