mm/slub: Convert __kmem_cache_do_shrink() to struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 19:14:32 +0000 (15:14 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 13:17:58 +0000 (09:17 -0400)
Adds a little type safety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index f760accb0febc9fc955bfdb5a3eaaf14d37c90c8..ea7f8d9716e0a1a673576674ae8694d5a3836c80 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4560,8 +4560,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
        int node;
        int i;
        struct kmem_cache_node *n;
-       struct page *page;
-       struct page *t;
+       struct slab *slab, *t;
        struct list_head discard;
        struct list_head promote[SHRINK_PROMOTE_MAX];
        unsigned long flags;
@@ -4578,22 +4577,22 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
                 * Build lists of slabs to discard or promote.
                 *
                 * Note that concurrent frees may occur while we hold the
-                * list_lock. page->inuse here is the upper limit.
+                * list_lock. slab->inuse here is the upper limit.
                 */
-               list_for_each_entry_safe(page, t, &n->partial, slab_list) {
-                       int free = page->objects - page->inuse;
+               list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
+                       int free = slab->objects - slab->inuse;
 
-                       /* Do not reread page->inuse */
+                       /* Do not reread slab->inuse */
                        barrier();
 
                        /* We do not keep full slabs on the list */
                        BUG_ON(free <= 0);
 
-                       if (free == page->objects) {
-                               list_move(&page->slab_list, &discard);
+                       if (free == slab->objects) {
+                               list_move(&slab->slab_list, &discard);
                                n->nr_partial--;
                        } else if (free <= SHRINK_PROMOTE_MAX)
-                               list_move(&page->slab_list, promote + free - 1);
+                               list_move(&slab->slab_list, promote + free - 1);
                }
 
                /*
@@ -4606,8 +4605,8 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
                spin_unlock_irqrestore(&n->list_lock, flags);
 
                /* Release empty slabs */
-               list_for_each_entry_safe(page, t, &discard, slab_list)
-                       discard_slab(s, page);
+               list_for_each_entry_safe(slab, t, &discard, slab_list)
+                       discard_slab(s, slab_page(slab));
 
                if (slabs_node(s, node))
                        ret = 1;