* @page_shift:        page_shift passed to vmap_range_noflush().
  *
  * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
- * vmalloc metadata address range.
+ * vmalloc metadata address range. Returns 0 on success, callers must check
+ * for non-zero return value.
  */
-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
-                                   pgprot_t prot, struct page **pages,
-                                   unsigned int page_shift);
+int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+                                  pgprot_t prot, struct page **pages,
+                                  unsigned int page_shift);
 
 /**
  * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
 {
 }
 
-static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
-                                                 unsigned long end,
-                                                 pgprot_t prot,
-                                                 struct page **pages,
-                                                 unsigned int page_shift)
+static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
+                                                unsigned long end,
+                                                pgprot_t prot,
+                                                struct page **pages,
+                                                unsigned int page_shift)
 {
+       return 0;
 }
 
 static inline void kmsan_vunmap_range_noflush(unsigned long start,
 
        kmsan_leave_runtime();
 }
 
-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
-                                   pgprot_t prot, struct page **pages,
-                                   unsigned int page_shift)
+int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+                                  pgprot_t prot, struct page **pages,
+                                  unsigned int page_shift)
 {
        unsigned long shadow_start, origin_start, shadow_end, origin_end;
        struct page **s_pages, **o_pages;
-       int nr, mapped;
+       int nr, mapped, err = 0;
 
        if (!kmsan_enabled)
-               return;
+               return 0;
 
        shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
        shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
        if (!shadow_start)
-               return;
+               return 0;
 
        nr = (end - start) / PAGE_SIZE;
        s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
        o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
-       if (!s_pages || !o_pages)
+       if (!s_pages || !o_pages) {
+               err = -ENOMEM;
                goto ret;
+       }
        for (int i = 0; i < nr; i++) {
                s_pages[i] = shadow_page_for(pages[i]);
                o_pages[i] = origin_page_for(pages[i]);
        kmsan_enter_runtime();
        mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
                                            s_pages, page_shift);
-       KMSAN_WARN_ON(mapped);
+       if (mapped) {
+               err = mapped;
+               goto ret;
+       }
        mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
                                            o_pages, page_shift);
-       KMSAN_WARN_ON(mapped);
+       if (mapped) {
+               err = mapped;
+               goto ret;
+       }
        kmsan_leave_runtime();
        flush_tlb_kernel_range(shadow_start, shadow_end);
        flush_tlb_kernel_range(origin_start, origin_end);
 ret:
        kfree(s_pages);
        kfree(o_pages);
+       return err;
 }
 
 /* Allocate metadata for pages allocated at boot time. */
 
 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
                pgprot_t prot, struct page **pages, unsigned int page_shift)
 {
-       kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+       int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
+                                                page_shift);
+
+       if (ret)
+               return ret;
        return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
 }