]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/slub: Convert validate_slab() to take a struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 19:07:45 +0000 (15:07 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 13:17:58 +0000 (09:17 -0400)
Also convert validate_slab_node to use a struct slab.  Adds a little
typesafety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index fdf3dbd4665f0e9df6762e7f46149837a6d9511f..5e10a9cc6939230a9bc7916d3b90e5c87d60d6c1 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4956,42 +4956,42 @@ static int count_total(struct page *page)
 #endif
 
 #ifdef CONFIG_SLUB_DEBUG
-static void validate_slab(struct kmem_cache *s, struct page *page,
+static void validate_slab(struct kmem_cache *s, struct slab *slab,
                          unsigned long *obj_map)
 {
        void *p;
-       void *addr = page_address(page);
+       void *addr = slab_address(slab);
        unsigned long flags;
 
-       slab_lock(page, &flags);
+       slab_lock(slab_page(slab), &flags);
 
-       if (!check_slab(s, page) || !on_freelist(s, page, NULL))
+       if (!check_slab(s, slab_page(slab)) || !on_freelist(s, slab_page(slab), NULL))
                goto unlock;
 
        /* Now we know that a valid freelist exists */
-       __fill_map(obj_map, s, page);
-       for_each_object(p, s, addr, page->objects) {
+       __fill_map(obj_map, s, slab_page(slab));
+       for_each_object(p, s, addr, slab->objects) {
                u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
                         SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
 
-               if (!check_object(s, page, p, val))
+               if (!check_object(s, slab_page(slab), p, val))
                        break;
        }
 unlock:
-       slab_unlock(page, &flags);
+       slab_unlock(slab_page(slab), &flags);
 }
 
 static int validate_slab_node(struct kmem_cache *s,
                struct kmem_cache_node *n, unsigned long *obj_map)
 {
        unsigned long count = 0;
-       struct page *page;
+       struct slab *slab;
        unsigned long flags;
 
        spin_lock_irqsave(&n->list_lock, flags);
 
-       list_for_each_entry(page, &n->partial, slab_list) {
-               validate_slab(s, page, obj_map);
+       list_for_each_entry(slab, &n->partial, slab_list) {
+               validate_slab(s, slab, obj_map);
                count++;
        }
        if (count != n->nr_partial) {
@@ -5003,8 +5003,8 @@ static int validate_slab_node(struct kmem_cache *s,
        if (!(s->flags & SLAB_STORE_USER))
                goto out;
 
-       list_for_each_entry(page, &n->full, slab_list) {
-               validate_slab(s, page, obj_map);
+       list_for_each_entry(slab, &n->full, slab_list) {
+               validate_slab(s, slab, obj_map);
                count++;
        }
        if (count != atomic_long_read(&n->nr_slabs)) {