* Management of partially allocated slabs.
*/
static inline void
-__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
+__add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
{
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
- list_add_tail(&page->slab_list, &n->partial);
+ list_add_tail(&slab->slab_list, &n->partial);
else
- list_add(&page->slab_list, &n->partial);
+ list_add(&slab->slab_list, &n->partial);
}
static inline void add_partial(struct kmem_cache_node *n,
- struct page *page, int tail)
+ struct slab *slab, int tail)
{
lockdep_assert_held(&n->list_lock);
- __add_partial(n, page, tail);
+ __add_partial(n, slab, tail);
}
static inline void remove_partial(struct kmem_cache_node *n,
- struct page *page)
+ struct slab *slab)
{
lockdep_assert_held(&n->list_lock);
- list_del(&page->slab_list);
+ list_del(&slab->slab_list);
n->nr_partial--;
}
"acquire_slab"))
return NULL;
- remove_partial(n, slab_page(slab));
+ remove_partial(n, slab);
WARN_ON(!freelist);
return freelist;
}
if (l != m) {
if (l == M_PARTIAL)
- remove_partial(n, slab_page(slab));
+ remove_partial(n, slab);
else if (l == M_FULL)
remove_full(s, n, slab_page(slab));
if (m == M_PARTIAL)
- add_partial(n, slab_page(slab), tail);
+ add_partial(n, slab, tail);
else if (m == M_FULL)
add_full(s, n, slab_page(slab));
}
slab->next = unusable;
unusable = slab;
} else {
- add_partial(n, slab_page(slab), DEACTIVATE_TO_TAIL);
+ add_partial(n, slab, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
}
*/
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
remove_full(s, n, slab_page(slab));
- add_partial(n, slab_page(slab), DEACTIVATE_TO_TAIL);
+ add_partial(n, slab, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
spin_unlock_irqrestore(&n->list_lock, flags);
/*
* Slab on the partial list.
*/
- remove_partial(n, slab_page(slab));
+ remove_partial(n, slab);
stat(s, FREE_REMOVE_PARTIAL);
} else {
/* Slab must be on the full list */
* No locks need to be taken here as it has just been
* initialized and there is no concurrent access.
*/
- __add_partial(n, slab_page(slab), DEACTIVATE_TO_HEAD);
+ __add_partial(n, slab, DEACTIVATE_TO_HEAD);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
#endif
/*
- * The larger the object size is, the more pages we want on the partial
+ * The larger the object size is, the more slabs we want on the partial
* list to avoid pounding the page allocator excessively.
*/
set_min_partial(s, ilog2(s->size) / 2);
spin_lock_irq(&n->list_lock);
list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
if (!slab->inuse) {
- remove_partial(n, slab_page(slab));
+ remove_partial(n, slab);
list_add(&slab->slab_list, &discard);
} else {
list_slab_objects(s, slab,