#include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <linux/set_memory.h>
+#include <linux/kfence.h>
 
 #include <asm/barrier.h>
 #include <asm/cputype.h>
 #include <asm/ptdump.h>
 #include <asm/tlbflush.h>
 #include <asm/pgalloc.h>
+#include <asm/kfence.h>
 
 #define NO_BLOCK_MAPPINGS      BIT(0)
 #define NO_CONT_MAPPINGS       BIT(1)
 }
 early_param("crashkernel", enable_crash_mem_map);
 
+#ifdef CONFIG_KFENCE
+
+bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
+
+/* early_param() will be parsed before map_mem() below. */
+static int __init parse_kfence_early_init(char *arg)
+{
+       int val;
+
+       if (get_option(&arg, &val))
+               kfence_early_init = !!val;
+       return 0;
+}
+early_param("kfence.sample_interval", parse_kfence_early_init);
+
+static phys_addr_t __init arm64_kfence_alloc_pool(void)
+{
+       phys_addr_t kfence_pool;
+
+       if (!kfence_early_init)
+               return 0;
+
+       kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+       if (!kfence_pool) {
+               pr_err("failed to allocate kfence pool\n");
+               kfence_early_init = false;
+               return 0;
+       }
+
+       /* Temporarily mark as NOMAP. */
+       memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
+
+       return kfence_pool;
+}
+
+static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp)
+{
+       if (!kfence_pool)
+               return;
+
+       /* KFENCE pool needs page-level mapping. */
+       __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
+                       pgprot_tagged(PAGE_KERNEL),
+                       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+       memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
+       __kfence_pool = phys_to_virt(kfence_pool);
+}
+#else /* CONFIG_KFENCE */
+
+static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; }
+static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { }
+
+#endif /* CONFIG_KFENCE */
+
 static void __init map_mem(pgd_t *pgdp)
 {
        static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
        phys_addr_t kernel_start = __pa_symbol(_stext);
        phys_addr_t kernel_end = __pa_symbol(__init_begin);
        phys_addr_t start, end;
+       phys_addr_t early_kfence_pool;
        int flags = NO_EXEC_MAPPINGS;
        u64 i;
 
         */
        BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
 
+       early_kfence_pool = arm64_kfence_alloc_pool();
+
        if (can_set_direct_map())
                flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 
                }
        }
 #endif
+
+       arm64_kfence_map_pool(early_kfence_pool, pgdp);
 }
 
 void mark_rodata_ro(void)
 
 #include <asm/cacheflush.h>
 #include <asm/set_memory.h>
 #include <asm/tlbflush.h>
+#include <asm/kfence.h>
 
 struct page_change_data {
        pgprot_t set_mask;
 bool can_set_direct_map(void)
 {
        /*
-        * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
+        * rodata_full and DEBUG_PAGEALLOC require linear map to be
         * mapped at page granularity, so that it is possible to
         * protect/unprotect single pages.
+        *
+        * KFENCE pool requires page-granular mapping if initialized late.
         */
        return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
-               IS_ENABLED(CONFIG_KFENCE);
+               arm64_kfence_can_set_direct_map();
 }
 
 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)