select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
        select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
 
 TEXT_OFFSET := 0x00080000
 endif
 
+# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
+# in 32-bit arithmetic
+KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
+                       (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+                       + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \
+                       - (1 << (64 - 32 - 3)) )) )
+
 export TEXT_OFFSET GZFLAGS
 
 core-y         += arch/arm64/kernel/ arch/arm64/mm/
 
--- /dev/null
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <asm/memory.h>
+
+/*
+ * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
+ * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
+ */
+#define KASAN_SHADOW_START      (VA_START)
+#define KASAN_SHADOW_END        (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
+
+/*
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ *     shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ * (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END]
+ * cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET
+ * should satisfy the following equation:
+ *      KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61)
+ */
+#define KASAN_SHADOW_OFFSET     (KASAN_SHADOW_END - (1ULL << (64 - 3)))
+
+void kasan_init(void);
+
+#else
+static inline void kasan_init(void) { }
+#endif
+
+#endif
+#endif
 
  *     fixed mappings and modules
  */
 #define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
+
+#ifndef CONFIG_KASAN
 #define VMALLOC_START          (VA_START)
+#else
+#include <asm/kasan.h>
+#define VMALLOC_START          (KASAN_SHADOW_END + SZ_64K)
+#endif
+
 #define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define vmemmap                        ((struct page *)(VMALLOC_END + SZ_64K))
 
 
 #define __HAVE_ARCH_MEMCPY
 extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *, const void *, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *, const void *, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMCHR
 extern void *memchr(const void *, int, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMSET
 extern void *memset(void *, int, __kernel_size_t);
+extern void *__memset(void *, int, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMCMP
 extern int memcmp(const void *, const void *, size_t);
 
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#endif
+
 #endif
 
 CFLAGS_efi-stub.o      := -DTEXT_OFFSET=$(TEXT_OFFSET)
 CFLAGS_armv8_deprecated.o := -I$(src)
 
+KASAN_SANITIZE_efi-stub.o      := n
+
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_insn.o = -pg
 CFLAGS_REMOVE_return_address.o = -pg
 
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
 EXPORT_SYMBOL(memchr);
 EXPORT_SYMBOL(memcmp);
 
 
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
        str_l   x24, memstart_addr, x6          // Save PHYS_OFFSET
        mov     x29, #0
+#ifdef CONFIG_KASAN
+       bl      kasan_early_init
+#endif
        b       start_kernel
 ENDPROC(__mmap_switched)
 
 
 __efistub_strncmp              = __pi_strncmp;
 __efistub___flush_dcache_area  = __pi___flush_dcache_area;
 
+#ifdef CONFIG_KASAN
+__efistub___memcpy             = __pi_memcpy;
+__efistub___memmove            = __pi_memmove;
+__efistub___memset             = __pi_memset;
+#endif
+
 __efistub__text                        = _text;
 __efistub__end                 = _end;
 __efistub__edata               = _edata;
 
 #include <linux/bitops.h>
 #include <linux/elf.h>
 #include <linux/gfp.h>
+#include <linux/kasan.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/moduleloader.h>
 
 void *module_alloc(unsigned long size)
 {
-       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                                   GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
-                                   NUMA_NO_NODE, __builtin_return_address(0));
+       void *p;
+
+       p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
+                               GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+                               NUMA_NO_NODE, __builtin_return_address(0));
+
+       if (p && (kasan_module_alloc(p, size) < 0)) {
+               vfree(p);
+               return NULL;
+       }
+
+       return p;
 }
 
 enum aarch64_reloc_op {
 
 #include <asm/elf.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
+#include <asm/kasan.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/smp_plat.h>
 
        paging_init();
        relocate_initrd();
+
+       kasan_init();
+
        request_standard_resources();
 
        early_ioremap_reset();
 
        stp \ptr, \regB, [\regC], \val
        .endm
 
+       .weak memcpy
+ENTRY(__memcpy)
 ENTRY(memcpy)
 #include "copy_template.S"
        ret
 ENDPIPROC(memcpy)
+ENDPROC(__memcpy)
 
 D_l    .req    x13
 D_h    .req    x14
 
+       .weak memmove
+ENTRY(__memmove)
 ENTRY(memmove)
        cmp     dstin, src
-       b.lo    memcpy
+       b.lo    __memcpy
        add     tmp1, src, count
        cmp     dstin, tmp1
-       b.hs    memcpy          /* No overlap.  */
+       b.hs    __memcpy                /* No overlap.  */
 
        add     dst, dstin, count
        add     src, src, count
        b.ne    .Ltail63
        ret
 ENDPIPROC(memmove)
+ENDPROC(__memmove)
 
 tmp3w          .req    w9
 tmp3           .req    x9
 
+       .weak memset
+ENTRY(__memset)
 ENTRY(memset)
        mov     dst, dstin      /* Preserve return value.  */
        and     A_lw, val, #255
        b.ne    .Ltail_maybe_long
        ret
 ENDPIPROC(memset)
+ENDPROC(__memset)
 
                                   context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
 obj-$(CONFIG_ARM64_PTDUMP)     += dump.o
+
+obj-$(CONFIG_KASAN)            += kasan_init.o
+KASAN_SANITIZE_kasan_init.o    := n
 
--- /dev/null
+/*
+ * This file contains kasan initialization code for ARM64.
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "kasan: " fmt
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/start_kernel.h>
+
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
+
+static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
+                                       unsigned long end)
+{
+       pte_t *pte;
+       unsigned long next;
+
+       if (pmd_none(*pmd))
+               pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+               next = addr + PAGE_SIZE;
+               set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
+                                       PAGE_KERNEL));
+       } while (pte++, addr = next, addr != end && pte_none(*pte));
+}
+
+static void __init kasan_early_pmd_populate(pud_t *pud,
+                                       unsigned long addr,
+                                       unsigned long end)
+{
+       pmd_t *pmd;
+       unsigned long next;
+
+       if (pud_none(*pud))
+               pud_populate(&init_mm, pud, kasan_zero_pmd);
+
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = pmd_addr_end(addr, end);
+               kasan_early_pte_populate(pmd, addr, next);
+       } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
+}
+
+static void __init kasan_early_pud_populate(pgd_t *pgd,
+                                       unsigned long addr,
+                                       unsigned long end)
+{
+       pud_t *pud;
+       unsigned long next;
+
+       if (pgd_none(*pgd))
+               pgd_populate(&init_mm, pgd, kasan_zero_pud);
+
+       pud = pud_offset(pgd, addr);
+       do {
+               next = pud_addr_end(addr, end);
+               kasan_early_pmd_populate(pud, addr, next);
+       } while (pud++, addr = next, addr != end && pud_none(*pud));
+}
+
+static void __init kasan_map_early_shadow(void)
+{
+       unsigned long addr = KASAN_SHADOW_START;
+       unsigned long end = KASAN_SHADOW_END;
+       unsigned long next;
+       pgd_t *pgd;
+
+       pgd = pgd_offset_k(addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               kasan_early_pud_populate(pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+void __init kasan_early_init(void)
+{
+       BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
+       BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+       BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+       kasan_map_early_shadow();
+}
+
+static void __init clear_pgds(unsigned long start,
+                       unsigned long end)
+{
+       /*
+        * Remove references to kasan page tables from
+        * swapper_pg_dir. pgd_clear() can't be used
+        * here because it's nop on 2,3-level pagetable setups
+        */
+       for (; start < end; start += PGDIR_SIZE)
+               set_pgd(pgd_offset_k(start), __pgd(0));
+}
+
+static void __init cpu_set_ttbr1(unsigned long ttbr1)
+{
+       asm(
+       "       msr     ttbr1_el1, %0\n"
+       "       isb"
+       :
+       : "r" (ttbr1));
+}
+
+void __init kasan_init(void)
+{
+       struct memblock_region *reg;
+
+       /*
+        * We are going to perform proper setup of shadow memory.
+        * At first we should unmap early shadow (clear_pgds() call bellow).
+        * However, instrumented code couldn't execute without shadow memory.
+        * tmp_pg_dir used to keep early shadow mapped until full shadow
+        * setup will be finished.
+        */
+       memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
+       cpu_set_ttbr1(__pa(tmp_pg_dir));
+       flush_tlb_all();
+
+       clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+       kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
+                       kasan_mem_to_shadow((void *)MODULES_VADDR));
+
+       for_each_memblock(memory, reg) {
+               void *start = (void *)__phys_to_virt(reg->base);
+               void *end = (void *)__phys_to_virt(reg->base + reg->size);
+
+               if (start >= end)
+                       break;
+
+               /*
+                * end + 1 here is intentional. We check several shadow bytes in
+                * advance to slightly speed up fastpath. In some rare cases
+                * we could cross boundary of mapped shadow, so we just map
+                * some more here.
+                */
+               vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
+                               (unsigned long)kasan_mem_to_shadow(end) + 1,
+                               pfn_to_nid(virt_to_pfn(start)));
+       }
+
+       memset(kasan_zero_page, 0, PAGE_SIZE);
+       cpu_set_ttbr1(__pa(swapper_pg_dir));
+       flush_tlb_all();
+
+       /* At this point kasan is fully initialized. Enable error messages */
+       init_task.kasan_depth = 0;
+       pr_info("KernelAddressSanitizer initialized\n");
+}
 
 #
 # Makefile for linux kernel
 #
+
+#
+# ARM64 maps efi runtime services in userspace addresses
+# which don't have KASAN shadow. So dereference of these addresses
+# in efi_call_virt() will cause crash if this code instrumented.
+#
+KASAN_SANITIZE_runtime-wrappers.o      := n
+
 obj-$(CONFIG_EFI)                      += efi.o vars.o reboot.o
 obj-$(CONFIG_EFI_VARS)                 += efivars.o
 obj-$(CONFIG_EFI_ESRT)                 += esrt.o
 
        call_threshold := 0
 endif
 
+KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
+
 CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
 
 CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
-               -fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \
+               -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET) \
                --param asan-stack=1 --param asan-globals=1 \
                --param asan-instrumentation-with-call-threshold=$(call_threshold))