]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/kexec: Invoke copy of relocate_kernel() instead of the original
authorDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 5 Dec 2024 15:05:13 +0000 (15:05 +0000)
committerIngo Molnar <mingo@kernel.org>
Fri, 6 Dec 2024 09:41:59 +0000 (10:41 +0100)
This currently calls set_memory_x() from machine_kexec_prepare() just
like the 32-bit version does. That's actually a bit earlier than I'd
like, as it leaves the page RWX all the time the image is even *loaded*.

Subsequent commits will eliminate all the writes to the page between the
point it's marked executable in machine_kexec_prepare() the time that
relocate_kernel() is running and has switched to the identmap %cr3, so
that it can be ROX. But that can't happen until it's moved to the .data
section of the kernel, and *that* can't happen until we start executing
the copy instead of executing it in place in the kernel .text. So break
the circular dependency in those commits by letting it be RWX for now.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Link: https://lore.kernel.org/r/20241205153343.3275139-8-dwmw2@infradead.org
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/relocate_kernel_64.S

index 3a4cbac1a0c64c6ce84f60bb19b268d90c0d3756..9567347f7a9b483d2bf610a74bf0c1a069b05d2d 100644 (file)
@@ -157,7 +157,12 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd,
        pmd_t *pmd;
        pte_t *pte;
 
-       vaddr = (unsigned long)relocate_kernel;
+       /*
+        * For the transition to the identity mapped page tables, the control
+        * code page also needs to be mapped at the virtual address it starts
+        * off running from.
+        */
+       vaddr = (unsigned long)__va(control_page);
        paddr = control_page;
        pgd += pgd_index(vaddr);
        if (!pgd_present(*pgd)) {
@@ -311,11 +316,17 @@ int machine_kexec_prepare(struct kimage *image)
 
        __memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
 
+       set_memory_x((unsigned long)control_page, 1);
+
        return 0;
 }
 
 void machine_kexec_cleanup(struct kimage *image)
 {
+       void *control_page = page_address(image->control_code_page);
+
+       set_memory_nx((unsigned long)control_page, 1);
+
        free_transition_pgtable(image);
 }
 
@@ -325,6 +336,11 @@ void machine_kexec_cleanup(struct kimage *image)
  */
 void machine_kexec(struct kimage *image)
 {
+       unsigned long (*relocate_kernel_ptr)(unsigned long indirection_page,
+                                            unsigned long page_list,
+                                            unsigned long start_address,
+                                            unsigned int preserve_context,
+                                            unsigned int host_mem_enc_active);
        unsigned long page_list[PAGES_NR];
        unsigned int host_mem_enc_active;
        int save_ftrace_enabled;
@@ -371,6 +387,8 @@ void machine_kexec(struct kimage *image)
                page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
                                                << PAGE_SHIFT);
 
+       relocate_kernel_ptr = control_page;
+
        /*
         * The segment registers are funny things, they have both a
         * visible and an invisible part.  Whenever the visible part is
@@ -390,11 +408,11 @@ void machine_kexec(struct kimage *image)
        native_gdt_invalidate();
 
        /* now call it */
-       image->start = relocate_kernel((unsigned long)image->head,
-                                      (unsigned long)page_list,
-                                      image->start,
-                                      image->preserve_context,
-                                      host_mem_enc_active);
+       image->start = relocate_kernel_ptr((unsigned long)image->head,
+                                          (unsigned long)page_list,
+                                          image->start,
+                                          image->preserve_context,
+                                          host_mem_enc_active);
 
 #ifdef CONFIG_KEXEC_JUMP
        if (image->preserve_context)
index ca7f1e1d5b11438d7c60cc7fde3c7e47f677dc19..d0a87b39db6a5e9ddf5777a2bd3e1bd28ddd785c 100644 (file)
@@ -39,6 +39,7 @@
 #define CP_PA_TABLE_PAGE       DATA(0x20)
 #define CP_PA_SWAP_PAGE                DATA(0x28)
 #define CP_PA_BACKUP_PAGES_MAP DATA(0x30)
+#define CP_VA_CONTROL_PAGE     DATA(0x38)
 
        .text
        .align PAGE_SIZE
@@ -99,6 +100,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
        movq    %r9, CP_PA_TABLE_PAGE(%r11)
        movq    %r10, CP_PA_SWAP_PAGE(%r11)
        movq    %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
+       movq    %r11, CP_VA_CONTROL_PAGE(%r11)
 
        /* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */
        movq    %rcx, %r11
@@ -235,7 +237,8 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
        movq    %rax, %cr3
        lea     PAGE_SIZE(%r8), %rsp
        call    swap_pages
-       movq    $virtual_mapped, %rax
+       movq    CP_VA_CONTROL_PAGE(%r8), %rax
+       addq    $(virtual_mapped - relocate_kernel), %rax
        pushq   %rax
        ANNOTATE_UNRET_SAFE
        ret