]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/slub: Convert acquire_slab() to take a struct page
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 21:32:11 +0000 (17:32 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 13:17:59 +0000 (09:17 -0400)
Improves type safety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index 5330d0b02f13d97c2ba63ee0c63ae7db95a427e1..3468f2b2fe3a9dd981eb4198f1764904782194ae 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2044,12 +2044,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
  * Returns a list of objects or NULL if it fails.
  */
 static inline void *acquire_slab(struct kmem_cache *s,
-               struct kmem_cache_node *n, struct page *page,
+               struct kmem_cache_node *n, struct slab *slab,
                int mode, int *objects)
 {
        void *freelist;
        unsigned long counters;
-       struct page new;
+       struct slab new;
 
        lockdep_assert_held(&n->list_lock);
 
@@ -2058,12 +2058,12 @@ static inline void *acquire_slab(struct kmem_cache *s,
         * The old freelist is the list of objects for the
         * per cpu allocation list.
         */
-       freelist = page->freelist;
-       counters = page->counters;
+       freelist = slab->freelist;
+       counters = slab->counters;
        new.counters = counters;
        *objects = new.objects - new.inuse;
        if (mode) {
-               new.inuse = page->objects;
+               new.inuse = slab->objects;
                new.freelist = NULL;
        } else {
                new.freelist = freelist;
@@ -2072,13 +2072,13 @@ static inline void *acquire_slab(struct kmem_cache *s,
        VM_BUG_ON(new.frozen);
        new.frozen = 1;
 
-       if (!__cmpxchg_double_slab(s, page,
+       if (!__cmpxchg_double_slab(s, slab_page(slab),
                        freelist, counters,
                        new.freelist, new.counters,
                        "acquire_slab"))
                return NULL;
 
-       remove_partial(n, page);
+       remove_partial(n, slab_page(slab));
        WARN_ON(!freelist);
        return freelist;
 }
@@ -2119,7 +2119,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
                if (!pfmemalloc_match(slab_page(slab), gfpflags))
                        continue;
 
-               t = acquire_slab(s, n, slab_page(slab), object == NULL, &objects);
+               t = acquire_slab(s, n, slab, object == NULL, &objects);
                if (!t)
                        break;