#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
+static inline int kasan_populate_vmalloc(unsigned long addr,
+ unsigned long size, gfp_t gfp_mask)
+{
+ if (kasan_enabled())
+ return __kasan_populate_vmalloc(addr, size, gfp_mask);
+ return 0;
+}
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end,
unsigned long flags);
+static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags)
+{
+ if (kasan_enabled())
+ return __kasan_release_vmalloc(start, end, free_region_start,
+ free_region_end, flags);
+}
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
return 0;
}
-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
+static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask)
{
unsigned long nr_pages, nr_total = PFN_UP(end - start);
struct vmalloc_populate_data data;
return ret;
}
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
{
unsigned long shadow_start, shadow_end;
int ret;
- if (!kasan_enabled())
- return 0;
-
if (!is_vmalloc_or_module_addr((void *)addr))
return 0;
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
shadow_end = PAGE_ALIGN(shadow_end);
- ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
+ ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask);
if (ret)
return ret;
* pages entirely covered by the free region, we will not run in to any
* trouble - any simultaneous allocations will be for disjoint regions.
*/
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end,
unsigned long flags)
unsigned long region_start, region_end;
unsigned long size;
- if (!kasan_enabled())
- return;
-
region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
* with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
*/
- if (!kasan_enabled())
- return (void *)start;
-
if (!is_vmalloc_or_module_addr(start))
return (void *)start;
*/
void __kasan_poison_vmalloc(const void *start, unsigned long size)
{
- if (!kasan_enabled())
- return;
-
if (!is_vmalloc_or_module_addr(start))
return;