#include <asm/pgtable-bits.h>
 
-#ifndef __ASSEMBLY__
+#ifndef CONFIG_MMU
+#define KERNEL_LINK_ADDR       PAGE_OFFSET
+#else
 
-/* Page Upper Directory not used in RISC-V */
-#include <asm-generic/pgtable-nopud.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-#include <linux/mm_types.h>
+#define ADDRESS_SPACE_END      (UL(-1))
 
-#ifdef CONFIG_MMU
+#ifdef CONFIG_64BIT
+/* Leave 2GB for kernel and BPF at the end of the address space */
+#define KERNEL_LINK_ADDR       (ADDRESS_SPACE_END - SZ_2G + 1)
+#else
+#define KERNEL_LINK_ADDR       PAGE_OFFSET
+#endif
 
 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
 #define VMALLOC_END      (PAGE_OFFSET - 1)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
 
 #define BPF_JIT_REGION_SIZE    (SZ_128M)
+#ifdef CONFIG_64BIT
+/* KASLR should leave at least 128MB for BPF after the kernel */
+#define BPF_JIT_REGION_START   PFN_ALIGN((unsigned long)&_end)
+#define BPF_JIT_REGION_END     (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
+#else
 #define BPF_JIT_REGION_START   (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
 #define BPF_JIT_REGION_END     (VMALLOC_END)
+#endif
+
+/* Modules always live before the kernel */
+#ifdef CONFIG_64BIT
+#define MODULES_VADDR  (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
+#define MODULES_END    (PFN_ALIGN((unsigned long)&_start))
+#endif
 
 /*
  * Roughly size the vmemmap space to be large enough to fit enough
 #define FIXADDR_SIZE     PGDIR_SIZE
 #endif
 #define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
-
 #endif
 
+#ifndef __ASSEMBLY__
+
+/* Page Upper Directory not used in RISC-V */
+#include <asm-generic/pgtable-nopud.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include <linux/mm_types.h>
+
 #ifdef CONFIG_64BIT
 #include <asm/pgtable-64.h>
 #else
 
 #define kern_addr_valid(addr)   (1) /* FIXME */
 
+extern char _start[];
 extern void *dtb_early_va;
 extern uintptr_t dtb_early_pa;
 void setup_bootmem(void);
 
 
 #include "../kernel/head.h"
 
+unsigned long kernel_virt_addr = KERNEL_LINK_ADDR;
+EXPORT_SYMBOL(kernel_virt_addr);
+
 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
                                                        __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
                  (unsigned long)VMALLOC_END);
        print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
                  (unsigned long)high_memory);
+#ifdef CONFIG_64BIT
+       print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
+                 (unsigned long)ADDRESS_SPACE_END);
+#endif
 }
 #else
 static void print_vm_layout(void) { }
        /* The maximal physical memory size is -PAGE_OFFSET. */
        memblock_enforce_memory_limit(-PAGE_OFFSET);
 
-       /* Reserve from the start of the kernel to the end of the kernel */
-       memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+       /*
+        * Reserve from the start of the kernel to the end of the kernel
+        * and make sure we align the reservation on PMD_SIZE since we will
+        * map the kernel in the linear mapping as read-only: we do not want
+        * any allocation to happen between _end and the next pmd aligned page.
+        */
+       memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK);
 
        /*
         * memblock allocator is not aware of the fact that last 4K bytes of
 #ifdef CONFIG_MMU
 static struct pt_alloc_ops pt_ops;
 
+/* Offset between linear mapping virtual address and kernel load address */
 unsigned long va_pa_offset;
 EXPORT_SYMBOL(va_pa_offset);
+#ifdef CONFIG_64BIT
+/* Offset between kernel mapping virtual address and kernel load address */
+unsigned long va_kernel_pa_offset;
+EXPORT_SYMBOL(va_kernel_pa_offset);
+#endif
 unsigned long pfn_base;
 EXPORT_SYMBOL(pfn_base);
 
 
 static phys_addr_t __init alloc_pmd_early(uintptr_t va)
 {
-       BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
+       BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT);
 
        return (uintptr_t)early_pmd;
 }
 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 #endif
 
+uintptr_t load_pa, load_sz;
+
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+{
+       uintptr_t va, end_va;
+
+       end_va = kernel_virt_addr + load_sz;
+       for (va = kernel_virt_addr; va < end_va; va += map_size)
+               create_pgd_mapping(pgdir, va,
+                                  load_pa + (va - kernel_virt_addr),
+                                  map_size, PAGE_KERNEL_EXEC);
+}
+
 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 {
-       uintptr_t va, pa, end_va;
-       uintptr_t load_pa = (uintptr_t)(&_start);
-       uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
+       uintptr_t pa;
        uintptr_t map_size;
 #ifndef __PAGETABLE_PMD_FOLDED
        pmd_t fix_bmap_spmd, fix_bmap_epmd;
 #endif
+       load_pa = (uintptr_t)(&_start);
+       load_sz = (uintptr_t)(&_end) - load_pa;
 
        va_pa_offset = PAGE_OFFSET - load_pa;
+#ifdef CONFIG_64BIT
+       va_kernel_pa_offset = kernel_virt_addr - load_pa;
+#endif
+
        pfn_base = PFN_DOWN(load_pa);
 
        /*
        create_pmd_mapping(fixmap_pmd, FIXADDR_START,
                           (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
        /* Setup trampoline PGD and PMD */
-       create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
+       create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
                           (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
-       create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
+       create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
                           load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
 #else
        /* Setup trampoline PGD */
-       create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
+       create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
                           load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
 #endif
 
        /*
-        * Setup early PGD covering entire kernel which will allows
+        * Setup early PGD covering entire kernel which will allow
         * us to reach paging_init(). We map all memory banks later
         * in setup_vm_final() below.
         */
-       end_va = PAGE_OFFSET + load_sz;
-       for (va = PAGE_OFFSET; va < end_va; va += map_size)
-               create_pgd_mapping(early_pg_dir, va,
-                                  load_pa + (va - PAGE_OFFSET),
-                                  map_size, PAGE_KERNEL_EXEC);
+       create_kernel_page_table(early_pg_dir, map_size);
 
 #ifndef __PAGETABLE_PMD_FOLDED
        /* Setup early PMD for DTB */
                           pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
        dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
 #else /* CONFIG_BUILTIN_DTB */
+#ifdef CONFIG_64BIT
+       /*
+        * __va can't be used since it would return a linear mapping address
+        * whereas dtb_early_va will be used before setup_vm_final installs
+        * the linear mapping.
+        */
+       dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
+#else
        dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_64BIT */
 #endif /* CONFIG_BUILTIN_DTB */
 #else
 #ifndef CONFIG_BUILTIN_DTB
                           pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
        dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
 #else /* CONFIG_BUILTIN_DTB */
+#ifdef CONFIG_64BIT
+       dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
+#else
        dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_64BIT */
 #endif /* CONFIG_BUILTIN_DTB */
 #endif
        dtb_early_pa = dtb_pa;
 #endif
 }
 
+#ifdef CONFIG_64BIT
+void protect_kernel_linear_mapping_text_rodata(void)
+{
+       unsigned long text_start = (unsigned long)lm_alias(_start);
+       unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
+       unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
+       unsigned long data_start = (unsigned long)lm_alias(_data);
+
+       set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
+       set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
+
+       set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
+       set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
+}
+#endif
+
 static void __init setup_vm_final(void)
 {
        uintptr_t va, map_size;
                           __pa_symbol(fixmap_pgd_next),
                           PGDIR_SIZE, PAGE_TABLE);
 
-       /* Map all memory banks */
+       /* Map all memory banks in the linear mapping */
        for_each_mem_range(i, &start, &end) {
                if (start >= end)
                        break;
                for (pa = start; pa < end; pa += map_size) {
                        va = (uintptr_t)__va(pa);
                        create_pgd_mapping(swapper_pg_dir, va, pa,
-                                          map_size, PAGE_KERNEL_EXEC);
+                                          map_size,
+#ifdef CONFIG_64BIT
+                                          PAGE_KERNEL
+#else
+                                          PAGE_KERNEL_EXEC
+#endif
+                                       );
+
                }
        }
 
+#ifdef CONFIG_64BIT
+       /* Map the kernel */
+       create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
+#endif
+
        /* Clear fixmap PTE and PMD mappings */
        clear_fixmap(FIX_PTE);
        clear_fixmap(FIX_PMD);