]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/slub: Convert full slab management to struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 22:09:46 +0000 (18:09 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 13:18:00 +0000 (09:18 -0400)
Pass struct slab to add_full() and remove_full().  Improves type
safety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index 6d81e54e61dfaea81c017a289bedf47903d6ecd2..32a1bd4c8a8898e4ba38111e3b95db926d3ac8eb 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1185,22 +1185,22 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
  * Tracking of fully allocated slabs for debugging purposes.
  */
 static void add_full(struct kmem_cache *s,
-       struct kmem_cache_node *n, struct page *page)
+       struct kmem_cache_node *n, struct slab *slab)
 {
        if (!(s->flags & SLAB_STORE_USER))
                return;
 
        lockdep_assert_held(&n->list_lock);
-       list_add(&page->slab_list, &n->full);
+       list_add(&slab->slab_list, &n->full);
 }
 
-static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
+static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
 {
        if (!(s->flags & SLAB_STORE_USER))
                return;
 
        lockdep_assert_held(&n->list_lock);
-       list_del(&page->slab_list);
+       list_del(&slab->slab_list);
 }
 
 /* Tracking of the number of slabs for debugging purposes */
@@ -1616,9 +1616,9 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
 static inline int check_object(struct kmem_cache *s, struct page *page,
                        void *object, u8 val) { return 1; }
 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
-                                       struct page *page) {}
+                                       struct slab *slab) {}
 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
-                                       struct page *page) {}
+                                       struct slab *slab) {}
 slab_flags_t kmem_cache_flags(unsigned int object_size,
        slab_flags_t flags, const char *name)
 {
@@ -2402,12 +2402,12 @@ redo:
                if (l == M_PARTIAL)
                        remove_partial(n, slab);
                else if (l == M_FULL)
-                       remove_full(s, n, slab_page(slab));
+                       remove_full(s, n, slab);
 
                if (m == M_PARTIAL)
                        add_partial(n, slab, tail);
                else if (m == M_FULL)
-                       add_full(s, n, slab_page(slab));
+                       add_full(s, n, slab);
        }
 
        l = m;
@@ -3361,7 +3361,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
         * then add it.
         */
        if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
-               remove_full(s, n, slab_page(slab));
+               remove_full(s, n, slab);
                add_partial(n, slab, DEACTIVATE_TO_TAIL);
                stat(s, FREE_ADD_PARTIAL);
        }
@@ -3377,7 +3377,7 @@ slab_empty:
                stat(s, FREE_REMOVE_PARTIAL);
        } else {
                /* Slab must be on the full list */
-               remove_full(s, n, slab_page(slab));
+               remove_full(s, n, slab);
        }
 
        spin_unlock_irqrestore(&n->list_lock, flags);