}
#ifdef CONFIG_SLUB_CPU_PARTIAL
-static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
+static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
{
struct kmem_cache_node *n = NULL, *n2 = NULL;
- struct page *page, *discard_page = NULL;
+ struct slab *slab, *unusable = NULL;
unsigned long flags = 0;
- while (partial_page) {
- struct page new;
- struct page old;
+ while (partial_slab) {
+ struct slab new;
+ struct slab old;
- page = partial_page;
- partial_page = page->next;
+ slab = partial_slab;
+ partial_slab = slab->next;
- n2 = get_node(s, page_to_nid(page));
+ n2 = get_node(s, slab_nid(slab));
if (n != n2) {
if (n)
spin_unlock_irqrestore(&n->list_lock, flags);
do {
- old.freelist = page->freelist;
- old.counters = page->counters;
+ old.freelist = slab->freelist;
+ old.counters = slab->counters;
VM_BUG_ON(!old.frozen);
new.counters = old.counters;
new.frozen = 0;
- } while (!__cmpxchg_double_slab(s, page,
+ } while (!__cmpxchg_double_slab(s, slab_page(slab),
old.freelist, old.counters,
new.freelist, new.counters,
"unfreezing slab"));
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
- page->next = discard_page;
- discard_page = page;
+ slab->next = unusable;
+ unusable = slab;
} else {
- add_partial(n, page, DEACTIVATE_TO_TAIL);
+ add_partial(n, slab_page(slab), DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
}
if (n)
spin_unlock_irqrestore(&n->list_lock, flags);
- while (discard_page) {
- page = discard_page;
- discard_page = discard_page->next;
+ while (unusable) {
+ slab = unusable;
+ unusable = unusable->next;
stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, page);
+ discard_slab(s, slab_page(slab));
stat(s, FREE_SLAB);
}
}
*/
static void unfreeze_partials(struct kmem_cache *s)
{
- struct page *partial_page;
+ struct slab *partial_slab;
unsigned long flags;
local_lock_irqsave(&s->cpu_slab->lock, flags);
- partial_page = slab_page(this_cpu_read(s->cpu_slab->partial));
+ partial_slab = this_cpu_read(s->cpu_slab->partial);
this_cpu_write(s->cpu_slab->partial, NULL);
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- if (partial_page)
- __unfreeze_partials(s, partial_page);
+ if (partial_slab)
+ __unfreeze_partials(s, partial_slab);
}
static void unfreeze_partials_cpu(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
- struct page *partial_page;
+ struct slab *partial_slab;
- partial_page = slab_page(slub_percpu_partial(c));
+ partial_slab = slub_percpu_partial(c);
c->partial = NULL;
- if (partial_page)
- __unfreeze_partials(s, partial_page);
+ if (partial_slab)
+ __unfreeze_partials(s, partial_slab);
}
/*
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
if (slab_to_unfreeze) {
- __unfreeze_partials(s, slab_page(slab_to_unfreeze));
+ __unfreeze_partials(s, slab_to_unfreeze);
stat(s, CPU_PARTIAL_DRAIN);
}
}