]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/slub: Convert early_kmem_cache_node_alloc() to use struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 18:40:23 +0000 (14:40 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 12:33:50 +0000 (08:33 -0400)
Add a little type safety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index 555c46cbae1f021f875fd834ee800563b0732181..41c4ccd67d951eb6df8ce52a40241f367f030514 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3891,38 +3891,38 @@ static struct kmem_cache *kmem_cache_node;
  */
 static void early_kmem_cache_node_alloc(int node)
 {
-       struct page *page;
+       struct slab *slab;
        struct kmem_cache_node *n;
 
        BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
 
-       page = slab_page(new_slab(kmem_cache_node, GFP_NOWAIT, node));
+       slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
 
-       BUG_ON(!page);
-       if (page_to_nid(page) != node) {
+       BUG_ON(!slab);
+       if (slab_nid(slab) != node) {
                pr_err("SLUB: Unable to allocate memory from node %d\n", node);
                pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
        }
 
-       n = page->freelist;
+       n = slab->freelist;
        BUG_ON(!n);
 #ifdef CONFIG_SLUB_DEBUG
        init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
        init_tracking(kmem_cache_node, n);
 #endif
        n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
-       page->freelist = get_freepointer(kmem_cache_node, n);
-       page->inuse = 1;
-       page->frozen = 0;
+       slab->freelist = get_freepointer(kmem_cache_node, n);
+       slab->inuse = 1;
+       slab->frozen = 0;
        kmem_cache_node->node[node] = n;
        init_kmem_cache_node(n);
-       inc_slabs_node(kmem_cache_node, node, page->objects);
+       inc_slabs_node(kmem_cache_node, node, slab->objects);
 
        /*
         * No locks need to be taken here as it has just been
         * initialized and there is no concurrent access.
         */
-       __add_partial(n, page, DEACTIVATE_TO_HEAD);
+       __add_partial(n, slab_page(slab), DEACTIVATE_TO_HEAD);
 }
 
 static void free_kmem_cache_nodes(struct kmem_cache *s)