]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/slub: Convert slab freeing to struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 21:45:54 +0000 (17:45 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 13:17:59 +0000 (09:17 -0400)
Improve type safety by passing a slab pointer through discard_slab()
to free_slab() and __free_slab().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index e3c8893f9bd50a0c77488d10ad3151b8813b41ad..75a411d6b76e62115a6c47fba7033281fd85b256 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1964,49 +1964,48 @@ static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
                flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
 }
 
-static void __free_slab(struct kmem_cache *s, struct page *page)
+static void __free_slab(struct kmem_cache *s, struct slab *slab)
 {
-       int order = compound_order(page);
+       struct page *page = slab_page(slab);
+       int order = slab_order(slab);
        int pages = 1 << order;
 
        if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
                void *p;
 
-               slab_pad_check(s, page);
-               for_each_object(p, s, page_address(page),
-                                               page->objects)
-                       check_object(s, page, p, SLUB_RED_INACTIVE);
+               slab_pad_check(s, slab_page(slab));
+               for_each_object(p, s, slab_address(slab), slab->objects)
+                       check_object(s, slab_page(slab), p, SLUB_RED_INACTIVE);
        }
 
-       __ClearPageSlabPfmemalloc(page);
+       __slab_clear_pfmemalloc(slab);
        __ClearPageSlab(page);
-       /* In union with page->mapping where page allocator expects NULL */
-       page->slab_cache = NULL;
+       page->mapping = NULL;
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
-       unaccount_slab_page(page, order, s);
+       unaccount_slab(slab, order, s);
        __free_pages(page, order);
 }
 
 static void rcu_free_slab(struct rcu_head *h)
 {
-       struct page *page = container_of(h, struct page, rcu_head);
+       struct slab *slab = container_of(h, struct slab, rcu_head);
 
-       __free_slab(page->slab_cache, page);
+       __free_slab(slab->slab_cache, slab);
 }
 
-static void free_slab(struct kmem_cache *s, struct page *page)
+static void free_slab(struct kmem_cache *s, struct slab *slab)
 {
        if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
-               call_rcu(&page->rcu_head, rcu_free_slab);
+               call_rcu(&slab->rcu_head, rcu_free_slab);
        } else
-               __free_slab(s, page);
+               __free_slab(s, slab);
 }
 
-static void discard_slab(struct kmem_cache *s, struct page *page)
+static void discard_slab(struct kmem_cache *s, struct slab *slab)
 {
-       dec_slabs_node(s, page_to_nid(page), page->objects);
-       free_slab(s, page);
+       dec_slabs_node(s, slab_nid(slab), slab->objects);
+       free_slab(s, slab);
 }
 
 /*
@@ -2431,7 +2430,7 @@ redo:
                stat(s, DEACTIVATE_FULL);
        else if (m == M_FREE) {
                stat(s, DEACTIVATE_EMPTY);
-               discard_slab(s, slab_page(slab));
+               discard_slab(s, slab);
                stat(s, FREE_SLAB);
        }
 }
@@ -2492,7 +2491,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
                unusable = unusable->next;
 
                stat(s, DEACTIVATE_EMPTY);
-               discard_slab(s, slab_page(slab));
+               discard_slab(s, slab);
                stat(s, FREE_SLAB);
        }
 }
@@ -3387,7 +3386,7 @@ slab_empty:
 
        spin_unlock_irqrestore(&n->list_lock, flags);
        stat(s, FREE_SLAB);
-       discard_slab(s, slab_page(slab));
+       discard_slab(s, slab);
 }
 
 /*
@@ -4257,7 +4256,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
        spin_unlock_irq(&n->list_lock);
 
        list_for_each_entry_safe(slab, h, &discard, slab_list)
-               discard_slab(s, slab_page(slab));
+               discard_slab(s, slab);
 }
 
 bool __kmem_cache_empty(struct kmem_cache *s)
@@ -4606,7 +4605,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
 
                /* Release empty slabs */
                list_for_each_entry_safe(slab, t, &discard, slab_list)
-                       discard_slab(s, slab_page(slab));
+                       discard_slab(s, slab);
 
                if (slabs_node(s, node))
                        ret = 1;