]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
kasan: cleanup of kasan_enabled() checks
authorSabyrzhan Tasbolatov <snovitoll@gmail.com>
Thu, 9 Oct 2025 15:54:03 +0000 (20:54 +0500)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 22 Oct 2025 01:51:32 +0000 (18:51 -0700)
Deduplication of kasan_enabled() checks which are already used by callers.

* Altered functions:

check_page_allocation
Delete the check because callers have it already in __wrappers in
include/linux/kasan.h:
__kasan_kfree_large
__kasan_mempool_poison_pages
__kasan_mempool_poison_object

kasan_populate_vmalloc, kasan_release_vmalloc
Add __wrappers in include/linux/kasan.h.
They are called externally in mm/vmalloc.c.

__kasan_unpoison_vmalloc, __kasan_poison_vmalloc
Delete checks because there're already kasan_enabled() checks
in respective __wrappers in include/linux/kasan.h.

release_free_meta -- Delete the check because the higher caller path
has it already. See the stack trace:

__kasan_slab_free -- has the check already
__kasan_mempool_poison_object -- has the check already
poison_slab_object
kasan_save_free_info
release_free_meta
kasan_enabled() -- Delete here

Link: https://lkml.kernel.org/r/20251009155403.1379150-3-snovitoll@gmail.com
Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/kasan.h
mm/kasan/common.c
mm/kasan/generic.c
mm/kasan/shadow.c

index d12e1a5f5a9af80c012f01f3fe908cce39bdb925..f335c1d7b61d30b0a3853aac47d376a2e0d4f6ff 100644 (file)
@@ -571,11 +571,27 @@ static inline void kasan_init_hw_tags(void) { }
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 
 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
+static inline int kasan_populate_vmalloc(unsigned long addr,
+                                        unsigned long size, gfp_t gfp_mask)
+{
+       if (kasan_enabled())
+               return __kasan_populate_vmalloc(addr, size, gfp_mask);
+       return 0;
+}
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
                           unsigned long free_region_start,
                           unsigned long free_region_end,
                           unsigned long flags);
+static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
+                          unsigned long free_region_start,
+                          unsigned long free_region_end,
+                          unsigned long flags)
+{
+       if (kasan_enabled())
+               return __kasan_release_vmalloc(start, end, free_region_start,
+                                        free_region_end, flags);
+}
 
 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
index d4c14359feaf9d8fb4e92374ee9f9569c41d3574..22e5d67ff064e93724dec09f82542bd3d27afa5c 100644 (file)
@@ -305,9 +305,6 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
 
 static inline bool check_page_allocation(void *ptr, unsigned long ip)
 {
-       if (!kasan_enabled())
-               return false;
-
        if (ptr != page_address(virt_to_head_page(ptr))) {
                kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
                return true;
index 516b49accc4ff89968f31d8923bc21377490bfdf..2b8e73f5f6a70ab8f79fb7bebdd68487366704a7 100644 (file)
@@ -506,9 +506,6 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
 
 static void release_free_meta(const void *object, struct kasan_free_meta *meta)
 {
-       if (!kasan_enabled())
-               return;
-
        /* Check if free meta is valid. */
        if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
                return;
index a30d84bfdd52bdfbba0ca2232d42d1c13b1c8599..29a751a8a08d9713c421e62923a5adff8197d4d7 100644 (file)
@@ -354,7 +354,7 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask
        return 0;
 }
 
-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
+static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask)
 {
        unsigned long nr_pages, nr_total = PFN_UP(end - start);
        struct vmalloc_populate_data data;
@@ -395,14 +395,11 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_
        return ret;
 }
 
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
 {
        unsigned long shadow_start, shadow_end;
        int ret;
 
-       if (!kasan_enabled())
-               return 0;
-
        if (!is_vmalloc_or_module_addr((void *)addr))
                return 0;
 
@@ -424,7 +421,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mas
        shadow_start = PAGE_ALIGN_DOWN(shadow_start);
        shadow_end = PAGE_ALIGN(shadow_end);
 
-       ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
+       ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask);
        if (ret)
                return ret;
 
@@ -566,7 +563,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
  * pages entirely covered by the free region, we will not run in to any
  * trouble - any simultaneous allocations will be for disjoint regions.
  */
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
                           unsigned long free_region_start,
                           unsigned long free_region_end,
                           unsigned long flags)
@@ -575,9 +572,6 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
        unsigned long region_start, region_end;
        unsigned long size;
 
-       if (!kasan_enabled())
-               return;
-
        region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
        region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
 
@@ -626,9 +620,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
         * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
         */
 
-       if (!kasan_enabled())
-               return (void *)start;
-
        if (!is_vmalloc_or_module_addr(start))
                return (void *)start;
 
@@ -651,9 +642,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
  */
 void __kasan_poison_vmalloc(const void *start, unsigned long size)
 {
-       if (!kasan_enabled())
-               return;
-
        if (!is_vmalloc_or_module_addr(start))
                return;