]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
riscv: Support CONFIG_RELOCATABLE on NOMMU
authorSamuel Holland <samuel.holland@sifive.com>
Sat, 26 Oct 2024 17:13:55 +0000 (10:13 -0700)
committerPalmer Dabbelt <palmer@rivosinc.com>
Wed, 26 Mar 2025 22:56:41 +0000 (15:56 -0700)
Move relocate_kernel() out of the CONFIG_MMU block so it can be called
from the NOMMU version of setup_vm(). Set some offsets in kernel_map so
relocate_kernel() does not need to be modified. Relocatable NOMMU
kernels can be loaded to any physical memory address; they no longer
depend on CONFIG_PAGE_OFFSET.

Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
Link: https://lore.kernel.org/r/20241026171441.3047904-4-samuel.holland@sifive.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
arch/riscv/Kconfig
arch/riscv/include/asm/pgtable.h
arch/riscv/mm/init.c

index 7612c52e9b1e35607f1dd4603a596416d3357a71..62fb6d9560e5dd59726ec49c713cd7735dcb78e7 100644 (file)
@@ -1075,7 +1075,7 @@ config PARAVIRT_TIME_ACCOUNTING
 
 config RELOCATABLE
        bool "Build a relocatable kernel"
-       depends on MMU && 64BIT && !XIP_KERNEL
+       depends on 64BIT && !XIP_KERNEL
        select MODULE_SECTIONS if MODULES
        help
           This builds a kernel as a Position Independent Executable (PIE),
index eb7b25ef556ecc1124458d97e17380aa429de9e9..babd951222db1fc5723443af75d78edfd7992a31 100644 (file)
 #include <asm/pgtable-bits.h>
 
 #ifndef CONFIG_MMU
+#ifdef CONFIG_RELOCATABLE
+#define KERNEL_LINK_ADDR       UL(0)
+#else
 #define KERNEL_LINK_ADDR       _AC(CONFIG_PAGE_OFFSET, UL)
+#endif
 #define KERN_VIRT_SIZE         (UL(-1))
 #else
 
index 15b2eda4c364b39261f94c371d791a24fb929aeb..82d14f94c996c9c99eaa5bbc6130555b147bf48d 100644 (file)
@@ -323,6 +323,44 @@ static void __init setup_bootmem(void)
                hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
 }
 
+#ifdef CONFIG_RELOCATABLE
+extern unsigned long __rela_dyn_start, __rela_dyn_end;
+
+static void __init relocate_kernel(void)
+{
+       Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start;
+       /*
+        * This holds the offset between the linked virtual address and the
+        * relocated virtual address.
+        */
+       uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
+       /*
+        * This holds the offset between kernel linked virtual address and
+        * physical address.
+        */
+       uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
+
+       for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) {
+               Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
+               Elf64_Addr relocated_addr = rela->r_addend;
+
+               if (rela->r_info != R_RISCV_RELATIVE)
+                       continue;
+
+               /*
+                * Make sure to not relocate vdso symbols like rt_sigreturn
+                * which are linked from the address 0 in vmlinux since
+                * vdso symbol addresses are actually used as an offset from
+                * mm->context.vdso in VDSO_OFFSET macro.
+                */
+               if (relocated_addr >= KERNEL_LINK_ADDR)
+                       relocated_addr += reloc_offset;
+
+               *(Elf64_Addr *)addr = relocated_addr;
+       }
+}
+#endif /* CONFIG_RELOCATABLE */
+
 #ifdef CONFIG_MMU
 struct pt_alloc_ops pt_ops __meminitdata;
 
@@ -893,44 +931,6 @@ retry:
 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 #endif
 
-#ifdef CONFIG_RELOCATABLE
-extern unsigned long __rela_dyn_start, __rela_dyn_end;
-
-static void __init relocate_kernel(void)
-{
-       Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start;
-       /*
-        * This holds the offset between the linked virtual address and the
-        * relocated virtual address.
-        */
-       uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
-       /*
-        * This holds the offset between kernel linked virtual address and
-        * physical address.
-        */
-       uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
-
-       for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) {
-               Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
-               Elf64_Addr relocated_addr = rela->r_addend;
-
-               if (rela->r_info != R_RISCV_RELATIVE)
-                       continue;
-
-               /*
-                * Make sure to not relocate vdso symbols like rt_sigreturn
-                * which are linked from the address 0 in vmlinux since
-                * vdso symbol addresses are actually used as an offset from
-                * mm->context.vdso in VDSO_OFFSET macro.
-                */
-               if (relocated_addr >= KERNEL_LINK_ADDR)
-                       relocated_addr += reloc_offset;
-
-               *(Elf64_Addr *)addr = relocated_addr;
-       }
-}
-#endif /* CONFIG_RELOCATABLE */
-
 #ifdef CONFIG_XIP_KERNEL
 static void __init create_kernel_page_table(pgd_t *pgdir,
                                            __always_unused bool early)
@@ -1378,6 +1378,12 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 {
        dtb_early_va = (void *)dtb_pa;
        dtb_early_pa = dtb_pa;
+
+#ifdef CONFIG_RELOCATABLE
+       kernel_map.virt_addr = (uintptr_t)_start;
+       kernel_map.phys_addr = (uintptr_t)_start;
+       relocate_kernel();
+#endif
 }
 
 static inline void setup_vm_final(void)