]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm, slub: allocate private object map for debugfs listings
authorVlastimil Babka <vbabka@suse.cz>
Mon, 23 Aug 2021 23:58:54 +0000 (09:58 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 25 Aug 2021 23:33:24 +0000 (09:33 +1000)
Slub has a static spinlock protected bitmap for marking which objects are
on freelist when it wants to list them, for situations where dynamically
allocating such map can lead to recursion or locking issues, and on-stack
bitmap would be too large.

The handlers of debugfs files alloc_traces and free_traces also currently
use this shared bitmap, but their syscall context makes it straightforward
to allocate a private map before entering locked sections, so switch these
processing paths to use a private bitmap.

Link: https://lkml.kernel.org/r/20210805152000.12817-3-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
mm/slub.c

index f6063ec97a554ec31a4255ab513de8d6529e1c8c..fb603fdf58cbcb2a6828b1674eb2aeff3bb25acc 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -454,6 +454,18 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
 static DEFINE_SPINLOCK(object_map_lock);
 
+static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
+                      struct page *page)
+{
+       void *addr = page_address(page);
+       void *p;
+
+       bitmap_zero(obj_map, page->objects);
+
+       for (p = page->freelist; p; p = get_freepointer(s, p))
+               set_bit(__obj_to_index(s, addr, p), obj_map);
+}
+
 #if IS_ENABLED(CONFIG_KUNIT)
 static bool slab_add_kunit_errors(void)
 {
@@ -483,17 +495,11 @@ static inline bool slab_add_kunit_errors(void) { return false; }
 static unsigned long *get_map(struct kmem_cache *s, struct page *page)
        __acquires(&object_map_lock)
 {
-       void *p;
-       void *addr = page_address(page);
-
        VM_BUG_ON(!irqs_disabled());
 
        spin_lock(&object_map_lock);
 
-       bitmap_zero(object_map, page->objects);
-
-       for (p = page->freelist; p; p = get_freepointer(s, p))
-               set_bit(__obj_to_index(s, addr, p), object_map);
+       __fill_map(object_map, s, page);
 
        return object_map;
 }
@@ -4879,17 +4885,17 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
 }
 
 static void process_slab(struct loc_track *t, struct kmem_cache *s,
-               struct page *page, enum track_item alloc)
+               struct page *page, enum track_item alloc,
+               unsigned long *obj_map)
 {
        void *addr = page_address(page);
        void *p;
-       unsigned long *map;
 
-       map = get_map(s, page);
+       __fill_map(obj_map, s, page);
+
        for_each_object(p, s, addr, page->objects)
-               if (!test_bit(__obj_to_index(s, addr, p), map))
+               if (!test_bit(__obj_to_index(s, addr, p), obj_map))
                        add_location(t, s, get_track(s, p, alloc));
-       put_map(map);
 }
 #endif  /* CONFIG_DEBUG_FS   */
 #endif /* CONFIG_SLUB_DEBUG */
@@ -5816,14 +5822,21 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
        struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
                                                sizeof(struct loc_track));
        struct kmem_cache *s = file_inode(filep)->i_private;
+       unsigned long *obj_map;
+
+       obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
+       if (!obj_map)
+               return -ENOMEM;
 
        if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
                alloc = TRACK_ALLOC;
        else
                alloc = TRACK_FREE;
 
-       if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
+       if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
+               bitmap_free(obj_map);
                return -ENOMEM;
+       }
 
        for_each_kmem_cache_node(s, node, n) {
                unsigned long flags;
@@ -5834,12 +5847,13 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
 
                spin_lock_irqsave(&n->list_lock, flags);
                list_for_each_entry(page, &n->partial, slab_list)
-                       process_slab(t, s, page, alloc);
+                       process_slab(t, s, page, alloc, obj_map);
                list_for_each_entry(page, &n->full, slab_list)
-                       process_slab(t, s, page, alloc);
+                       process_slab(t, s, page, alloc, obj_map);
                spin_unlock_irqrestore(&n->list_lock, flags);
        }
 
+       bitmap_free(obj_map);
        return 0;
 }