From: Matthew Wilcox (Oracle) Date: Fri, 1 Oct 2021 19:28:57 +0000 (-0400) Subject: mm/slub: Convert slab_alloc_node() to use a struct slab X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=0bf069652b44b52620dcfbcab67330d7b623c78a;p=users%2Fwilly%2Flinux.git mm/slub: Convert slab_alloc_node() to use a struct slab Adds a little type safety. Signed-off-by: Matthew Wilcox (Oracle) --- diff --git a/mm/slub.c b/mm/slub.c index 29703bba0a7f..fd04aa96602c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3112,7 +3112,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, { void *object; struct kmem_cache_cpu *c; - struct page *page; + struct slab *slab; unsigned long tid; struct obj_cgroup *objcg = NULL; bool init = false; @@ -3144,9 +3144,9 @@ redo: /* * Irqless object alloc/free algorithm used here depends on sequence * of fetching cpu_slab's data. tid should be fetched before anything - * on c to guarantee that object and page associated with previous tid + * on c to guarantee that object and slab associated with previous tid * won't be used with current tid. If we fetch tid first, object and - * page could be one associated with next tid and our alloc/free + * slab could be one associated with next tid and our alloc/free * request will be failed. In this case, we will retry. So, no problem. */ barrier(); @@ -3159,7 +3159,7 @@ redo: */ object = c->freelist; - page = slab_page(c->slab); + slab = c->slab; /* * We cannot use the lockless fastpath on PREEMPT_RT because if a * slowpath has taken the local_lock_irqsave(), it is not protected @@ -3168,7 +3168,7 @@ redo: * there is a suitable cpu freelist. */ if (IS_ENABLED(CONFIG_PREEMPT_RT) || - unlikely(!object || !page || !node_match(page, node))) { + unlikely(!object || !slab || !node_match(slab_page(slab), node))) { object = __slab_alloc(s, gfpflags, node, addr, c); } else { void *next_object = get_freepointer_safe(s, object);