]> www.infradead.org Git - users/willy/linux.git/commitdiff
mm/slub: Convert process_slab() to take a struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 1 Oct 2021 15:33:52 +0000 (11:33 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 12:33:50 +0000 (08:33 -0400)
Add some type safety by passing a struct slab instead of a struct page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/slub.c

index b34ca1ff3e1c07fc497f1ceb94f5f6a96313a2b6..f5aadbccdab4793dc5f8e2668b8ff5e64da31295 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5169,15 +5169,15 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
 }
 
 static void process_slab(struct loc_track *t, struct kmem_cache *s,
-               struct page *page, enum track_item alloc,
+               struct slab *slab, enum track_item alloc,
                unsigned long *obj_map)
 {
-       void *addr = page_address(page);
+       void *addr = slab_address(slab);
        void *p;
 
-       __fill_map(obj_map, s, page);
+       __fill_map(obj_map, s, slab_page(slab));
 
-       for_each_object(p, s, addr, page->objects)
+       for_each_object(p, s, addr, slab->objects)
                if (!test_bit(__obj_to_index(s, addr, p), obj_map))
                        add_location(t, s, get_track(s, p, alloc));
 }
@@ -6124,16 +6124,16 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
 
        for_each_kmem_cache_node(s, node, n) {
                unsigned long flags;
-               struct page *page;
+               struct slab *slab;
 
                if (!atomic_long_read(&n->nr_slabs))
                        continue;
 
                spin_lock_irqsave(&n->list_lock, flags);
-               list_for_each_entry(page, &n->partial, slab_list)
-                       process_slab(t, s, page, alloc, obj_map);
-               list_for_each_entry(page, &n->full, slab_list)
-                       process_slab(t, s, page, alloc, obj_map);
+               list_for_each_entry(slab, &n->partial, slab_list)
+                       process_slab(t, s, slab, alloc, obj_map);
+               list_for_each_entry(slab, &n->full, slab_list)
+                       process_slab(t, s, slab, alloc, obj_map);
                spin_unlock_irqrestore(&n->list_lock, flags);
        }