]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
alloc_tag: avoid execmem_vmap() when !MMU
authorSuren Baghdasaryan <surenb@google.com>
Thu, 31 Oct 2024 23:36:11 +0000 (16:36 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 1 Nov 2024 04:29:20 +0000 (21:29 -0700)
With CONFIG_MMU=n __get_vm_area_node() is not available. Add CONFIG_MMU
dependency for memory allocation tagging since it uses __get_vm_area_node()
via execmem_vmap().

Link: https://lkml.kernel.org/r/20241031233611.3833002-1-surenb@google.com
Fixes: 57bc3834fb6f ("alloc_tag: populate memory for module tags as needed")
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202410250808.dQGyYjlk-lkp@intel.com/
Closes: https://lore.kernel.org/oe-lkp/202410251525.9f85854d-oliver.sang@intel.com
Closes: https://lore.kernel.org/oe-kbuild-all/202410261016.IO7C6Cml-lkp@intel.com/
Closes: https://lore.kernel.org/oe-kbuild-all/202410270919.LebQlmxD-lkp@intel.com/
Suggested-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@google.com>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Petr Pavlu <petr.pavlu@suse.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Sourav Panda <souravpanda@google.com>
Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Huth <thuth@redhat.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xiongwei Song <xiongwei.song@windriver.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/execmem.h
lib/Kconfig.debug
mm/execmem.c

index 5a5e2917f8706e1b1c32f3f3c3bdcb9ed05c65d2..64130ae19690a903eeb106f8592ddb073fe66046 100644 (file)
@@ -139,6 +139,7 @@ void *execmem_alloc(enum execmem_type type, size_t size);
  */
 void execmem_free(void *ptr);
 
+#ifdef CONFIG_MMU
 /**
  * execmem_vmap - create virtual mapping for EXECMEM_MODULE_DATA memory
  * @size: size of the virtual mapping in bytes
@@ -148,6 +149,7 @@ void execmem_free(void *ptr);
  * Return: the area descriptor on success or %NULL on failure.
  */
 struct vm_struct *execmem_vmap(size_t size);
+#endif
 
 /**
  * execmem_update_copy - copy an update to executable memory
index 7312ae7c3cc57b9d7e12286b4218206811d9705b..6798bbbcbd321f961999aded7671799be827369a 100644 (file)
@@ -993,6 +993,7 @@ config CODE_TAGGING
 config MEM_ALLOC_PROFILING
        bool "Enable memory allocation profiling"
        default n
+       depends on MMU
        depends on PROC_FS
        depends on !DEBUG_FORCE_WEAK_PER_CPU
        select CODE_TAGGING
index 5c0f9f2d6f83219e40bdb75e858cbff1119ee733..317b6a8d35be070a0a426eb1eba8d947e8b7e5f5 100644 (file)
@@ -64,6 +64,22 @@ static void *execmem_vmalloc(struct execmem_range *range, size_t size,
 
        return p;
 }
+
+struct vm_struct *execmem_vmap(size_t size)
+{
+       struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
+       struct vm_struct *area;
+
+       area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
+                                 range->start, range->end, NUMA_NO_NODE,
+                                 GFP_KERNEL, __builtin_return_address(0));
+       if (!area && range->fallback_start)
+               area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
+                                         range->fallback_start, range->fallback_end,
+                                         NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0));
+
+       return area;
+}
 #else
 static void *execmem_vmalloc(struct execmem_range *range, size_t size,
                             pgprot_t pgprot, unsigned long vm_flags)
@@ -368,22 +384,6 @@ void execmem_free(void *ptr)
                vfree(ptr);
 }
 
-struct vm_struct *execmem_vmap(size_t size)
-{
-       struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA];
-       struct vm_struct *area;
-
-       area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
-                                 range->start, range->end, NUMA_NO_NODE,
-                                 GFP_KERNEL, __builtin_return_address(0));
-       if (!area && range->fallback_start)
-               area = __get_vm_area_node(size, range->alignment, PAGE_SHIFT, VM_ALLOC,
-                                         range->fallback_start, range->fallback_end,
-                                         NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0));
-
-       return area;
-}
-
 void *execmem_update_copy(void *dst, const void *src, size_t size)
 {
        return text_poke_copy(dst, src, size);