]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm/memmap: prevent double scanning of memmap by kmemleak
authorGuo Weikang <guoweikang.kernel@gmail.com>
Mon, 6 Jan 2025 02:11:25 +0000 (10:11 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 26 Jan 2025 04:22:30 +0000 (20:22 -0800)
kmemleak explicitly scans the mem_map through the valid struct page
objects.  However, memmap_alloc() was also adding this memory to the gray
object list, causing it to be scanned twice.  Remove memmap_alloc() from
the scan list and add a comment to clarify the behavior.

Link: https://lore.kernel.org/lkml/CAOm6qn=FVeTpH54wGDFMHuCOeYtvoTx30ktnv9-w3Nh8RMofEA@mail.gmail.com/
Link: https://lkml.kernel.org/r/20250106021126.1678334-1-guoweikang.kernel@gmail.com
Signed-off-by: Guo Weikang <guoweikang.kernel@gmail.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/memblock.h
mm/mm_init.c
mm/sparse-vmemmap.c

index 673d5cae7c8134031cfc9a85b8aff8bc209d4d3d..d48b56c1e558ea18d0e4266ac2ea0622feaa6354 100644 (file)
@@ -378,6 +378,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
 /* Flags for memblock allocation APIs */
 #define MEMBLOCK_ALLOC_ANYWHERE        (~(phys_addr_t)0)
 #define MEMBLOCK_ALLOC_ACCESSIBLE      0
+/*
+ *  MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
+ *  MEMBLOCK_ALLOC_ACCESSIBLE
+ */
 #define MEMBLOCK_ALLOC_NOLEAKTRACE     1
 
 /* We are using top down, so it is safe to use 0 here */
index 24b68b425afb1bac00caa9ceb7b8385d49ddb01c..2630cc30147e052af5374464224dcc3887273bfd 100644 (file)
@@ -1585,13 +1585,17 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
 {
        void *ptr;
 
+       /*
+        * Kmemleak will explicitly scan mem_map by traversing all valid
+        * `struct *page`,so memblock does not need to be added to the scan list.
+        */
        if (exact_nid)
                ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
-                                                  MEMBLOCK_ALLOC_ACCESSIBLE,
+                                                  MEMBLOCK_ALLOC_NOLEAKTRACE,
                                                   nid);
        else
                ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
-                                                MEMBLOCK_ALLOC_ACCESSIBLE,
+                                                MEMBLOCK_ALLOC_NOLEAKTRACE,
                                                 nid);
 
        if (ptr && size > 0)
index cec67c5f37d884a248032de8878de3ba5f3925a3..3287ebadd167d39ef144229a69bffb9c3dbc65d3 100644 (file)
@@ -31,6 +31,8 @@
 #include <asm/dma.h>
 #include <asm/pgalloc.h>
 
+#include "internal.h"
+
 /*
  * Allocate a block of memory to be used to back the virtual memory map
  * or to back the page tables that are used to create the mapping.
@@ -42,8 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
                                unsigned long align,
                                unsigned long goal)
 {
-       return memblock_alloc_try_nid_raw(size, align, goal,
-                                              MEMBLOCK_ALLOC_ACCESSIBLE, node);
+       return memmap_alloc(size, align, goal, node, false);
 }
 
 void * __meminit vmemmap_alloc_block(unsigned long size, int node)