From: Vlastimil Babka Date: Mon, 23 Aug 2021 23:59:01 +0000 (+1000) Subject: mm, slub: fix memory and cpu hotplug related lock ordering issues X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=cc2ef29c3d30a886348e79941ef093a463157473;p=users%2Fjedix%2Flinux-maple.git mm, slub: fix memory and cpu hotplug related lock ordering issues Qian Cai reported [1] a lockdep splat on memory offline. [ 91.374541] WARNING: possible circular locking dependency detected [ 91.381411] 5.14.0-rc5-next-20210809+ #84 Not tainted [ 91.387149] ------------------------------------------------------ [ 91.394016] lsbug/1523 is trying to acquire lock: [ 91.399406] ffff800018e76530 (flush_lock){+.+.}-{3:3}, at: flush_all+0x50/0x1c8 [ 91.407425] but task is already holding lock: [ 91.414638] ffff800018e48468 (slab_mutex){+.+.}-{3:3}, at: slab_memory_callback+0x44/0x280 [ 91.423603] which lock already depends on the new lock. To fix it, we need to change the order in flush_all() so that cpus_read_lock() is first and mutex_lock(&flush_lock) second. Also when called from slab_mem_going_offline_callback() we are already under cpus_read_lock() and cannot take it again, so create a flush_all_cpus_locked() variant and decouple flushing from actual shrinking for this call path. Additionally, Mike Galbraith reported [2] wrong order of cpus_read_lock() and slab_mutex in kmem_cache_destroy() path and proposed a fix to reverse it. This patch is a fixup for the mmotm patch mm-slub-move-flush_cpu_slab-invocations-__free_slab-invocations-out-of-irq-context.patch [1] https://lore.kernel.org/lkml/0b36128c-3e12-77df-85fe-a153a714569b@quicinc.com/ [2] https://lore.kernel.org/lkml/2eb3cf340716c40f03a0a342ab40219b3d1de195.camel@gmx.de/ Link: https://lkml.kernel.org/r/50fe26ba-450b-af57-506d-438f67cfbce3@suse.cz Reported-by: Qian Cai Reported-by: Mike Galbraith Signed-off-by: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Stephen Rothwell --- diff --git a/mm/slab_common.c b/mm/slab_common.c index 1c673c323baf..ec2bb0beed75 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -502,6 +502,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; + cpus_read_lock(); mutex_lock(&slab_mutex); s->refcount--; @@ -516,6 +517,7 @@ void kmem_cache_destroy(struct kmem_cache *s) } out_unlock: mutex_unlock(&slab_mutex); + cpus_read_unlock(); } EXPORT_SYMBOL(kmem_cache_destroy); diff --git a/mm/slub.c b/mm/slub.c index 0da7074f4c9a..5795e423483b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2554,13 +2554,13 @@ static bool has_cpu_slab(int cpu, struct kmem_cache *s) static DEFINE_MUTEX(flush_lock); static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); -static void flush_all(struct kmem_cache *s) +static void flush_all_cpus_locked(struct kmem_cache *s) { struct slub_flush_work *sfw; unsigned int cpu; + lockdep_assert_cpus_held(); mutex_lock(&flush_lock); - cpus_read_lock(); for_each_online_cpu(cpu) { sfw = &per_cpu(slub_flush, cpu); @@ -2581,10 +2581,16 @@ static void flush_all(struct kmem_cache *s) flush_work(&sfw->work); } - cpus_read_unlock(); mutex_unlock(&flush_lock); } +static void flush_all(struct kmem_cache *s) +{ + cpus_read_lock(); + flush_all_cpus_locked(s); + cpus_read_unlock(); +} + /* * Use the cpu notifier to insure that the cpu slabs are flushed when * necessary. @@ -4127,7 +4133,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) int node; struct kmem_cache_node *n; - flush_all(s); + flush_all_cpus_locked(s); /* Attempt to free all objects */ for_each_kmem_cache_node(s, node, n) { free_partial(s, n); @@ -4403,7 +4409,7 @@ EXPORT_SYMBOL(kfree); * being allocated from last increasing the chance that the last objects * are freed in them. */ -int __kmem_cache_shrink(struct kmem_cache *s) +int __kmem_cache_do_shrink(struct kmem_cache *s) { int node; int i; @@ -4415,7 +4421,6 @@ int __kmem_cache_shrink(struct kmem_cache *s) unsigned long flags; int ret = 0; - flush_all(s); for_each_kmem_cache_node(s, node, n) { INIT_LIST_HEAD(&discard); for (i = 0; i < SHRINK_PROMOTE_MAX; i++) @@ -4465,13 +4470,21 @@ int __kmem_cache_shrink(struct kmem_cache *s) return ret; } +int __kmem_cache_shrink(struct kmem_cache *s) +{ + flush_all(s); + return __kmem_cache_do_shrink(s); +} + static int slab_mem_going_offline_callback(void *arg) { struct kmem_cache *s; mutex_lock(&slab_mutex); - list_for_each_entry(s, &slab_caches, list) - __kmem_cache_shrink(s); + list_for_each_entry(s, &slab_caches, list) { + flush_all_cpus_locked(s); + __kmem_cache_do_shrink(s); + } mutex_unlock(&slab_mutex); return 0;