]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/kmsan: fix kmsan kmalloc hook when no stack depots are allocated yet
authorAleksei Nikiforov <aleksei.nikiforov@linux.ibm.com>
Tue, 30 Sep 2025 11:56:01 +0000 (13:56 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 15 Oct 2025 04:28:40 +0000 (21:28 -0700)
If no stack depot is allocated yet, due to masking out __GFP_RECLAIM
flags kmsan called from kmalloc cannot allocate stack depot.  kmsan
fails to record origin and report issues.  This may result in KMSAN
failing to report issues.

Reusing flags from kmalloc without modifying them should be safe for kmsan.
For example, such chain of calls is possible:
test_uninit_kmalloc -> kmalloc -> __kmalloc_cache_noprof ->
slab_alloc_node -> slab_post_alloc_hook ->
kmsan_slab_alloc -> kmsan_internal_poison_memory.

Only when it is called in a context without flags present should
__GFP_RECLAIM flags be masked.

With this change all kmsan tests start working reliably.

Link: https://lkml.kernel.org/r/20250930115600.709776-2-aleksei.nikiforov@linux.ibm.com
Signed-off-by: Aleksei Nikiforov <aleksei.nikiforov@linux.ibm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/kmsan/core.c
mm/kmsan/hooks.c
mm/kmsan/shadow.c

index bd612e5aa7b492bea26f3901ac5d1e202cbfb0db..90f427b95a213ebc3925b2511ef277939d5f980a 100644 (file)
@@ -72,9 +72,6 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
 
        nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
 
-       /* Don't sleep. */
-       flags &= ~(__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM);
-
        handle = stack_depot_save(entries, nr_entries, flags);
        return stack_depot_set_extra_bits(handle, extra);
 }
index 2cee59d89c808f7a6b169856bca9b2ff5b0dc252..8f22d1f229813f0fc393affee826fd2e414533cc 100644 (file)
@@ -84,7 +84,8 @@ void kmsan_slab_free(struct kmem_cache *s, void *object)
        if (s->ctor)
                return;
        kmsan_enter_runtime();
-       kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
+       kmsan_internal_poison_memory(object, s->object_size,
+                                    GFP_KERNEL & ~(__GFP_RECLAIM),
                                     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
        kmsan_leave_runtime();
 }
@@ -114,7 +115,8 @@ void kmsan_kfree_large(const void *ptr)
        kmsan_enter_runtime();
        page = virt_to_head_page((void *)ptr);
        KMSAN_WARN_ON(ptr != page_address(page));
-       kmsan_internal_poison_memory((void *)ptr, page_size(page), GFP_KERNEL,
+       kmsan_internal_poison_memory((void *)ptr, page_size(page),
+                                    GFP_KERNEL & ~(__GFP_RECLAIM),
                                     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
        kmsan_leave_runtime();
 }
index 3cd733663100d0845c2e5e02f6fe2b48584d695d..e7f554a31bb446df3742cb3423dd6020fe192545 100644 (file)
@@ -208,7 +208,7 @@ void kmsan_free_page(struct page *page, unsigned int order)
                return;
        kmsan_enter_runtime();
        kmsan_internal_poison_memory(page_address(page), page_size(page),
-                                    GFP_KERNEL,
+                                    GFP_KERNEL & ~(__GFP_RECLAIM),
                                     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
        kmsan_leave_runtime();
 }