]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/slub: Convert free_partial() to use struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 19:17:27 +0000 (15:17 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 13:17:58 +0000 (09:17 -0400)
Add a little type safety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index ea7f8d9716e0a1a673576674ae8694d5a3836c80..875f3f6c1ae685178805aa411d8394ec3dce1815 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4241,23 +4241,23 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
        LIST_HEAD(discard);
-       struct page *page, *h;
+       struct slab *slab, *h;
 
        BUG_ON(irqs_disabled());
        spin_lock_irq(&n->list_lock);
-       list_for_each_entry_safe(page, h, &n->partial, slab_list) {
-               if (!page->inuse) {
-                       remove_partial(n, page);
-                       list_add(&page->slab_list, &discard);
+       list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
+               if (!slab->inuse) {
+                       remove_partial(n, slab_page(slab));
+                       list_add(&slab->slab_list, &discard);
                } else {
-                       list_slab_objects(s, page,
+                       list_slab_objects(s, slab_page(slab),
                          "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
        spin_unlock_irq(&n->list_lock);
 
-       list_for_each_entry_safe(page, h, &discard, slab_list)
-               discard_slab(s, page);
+       list_for_each_entry_safe(slab, h, &discard, slab_list)
+               discard_slab(s, slab_page(slab));
 }
 
 bool __kmem_cache_empty(struct kmem_cache *s)