]> www.infradead.org Git - linux.git/commitdiff
x86/smpboot: Implement a bit spinlock to protect the realmode stack
authorThomas Gleixner <tglx@linutronix.de>
Fri, 12 May 2023 21:07:53 +0000 (23:07 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 15 May 2023 11:45:03 +0000 (13:45 +0200)
Parallel AP bringup requires that the APs can run fully parallel through
the early startup code including the real mode trampoline.

To prepare for this implement a bit-spinlock to serialize access to the
real mode stack so that parallel upcoming APs are not going to corrupt each
others stack while going through the real mode startup code.

Co-developed-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Tested-by: Helge Deller <deller@gmx.de> # parisc
Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck
Link: https://lore.kernel.org/r/20230512205257.355425551@linutronix.de
arch/x86/include/asm/realmode.h
arch/x86/kernel/head_64.S
arch/x86/realmode/init.c
arch/x86/realmode/rm/trampoline_64.S

index f6a1737c77be2dc128147978e7f9f85945aed7a6..87e5482acd0dca56c4169a8c5321057447bbf6a5 100644 (file)
@@ -52,6 +52,7 @@ struct trampoline_header {
        u64 efer;
        u32 cr4;
        u32 flags;
+       u32 lock;
 #endif
 };
 
@@ -64,6 +65,8 @@ extern unsigned long initial_stack;
 extern unsigned long initial_vc_handler;
 #endif
 
+extern u32 *trampoline_lock;
+
 extern unsigned char real_mode_blob[];
 extern unsigned char real_mode_relocs[];
 
index 8458033bb9f1d72b22626df0a50fec4f6d14d7e3..f99e9ab6bd2681929959621ad933737ffb66e0b9 100644 (file)
@@ -251,6 +251,16 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        movq    pcpu_hot + X86_current_task(%rdx), %rax
        movq    TASK_threadsp(%rax), %rsp
 
+       /*
+        * Now that this CPU is running on its own stack, drop the realmode
+        * protection. For the boot CPU the pointer is NULL!
+        */
+       movq    trampoline_lock(%rip), %rax
+       testq   %rax, %rax
+       jz      .Lsetup_gdt
+       movl    $0, (%rax)
+
+.Lsetup_gdt:
        /*
         * We must switch to a new descriptor in kernel space for the GDT
         * because soon the kernel won't have access anymore to the userspace
@@ -433,6 +443,8 @@ SYM_DATA(initial_code,      .quad x86_64_start_kernel)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 SYM_DATA(initial_vc_handler,   .quad handle_vc_boot_ghcb)
 #endif
+
+SYM_DATA(trampoline_lock, .quad 0);
        __FINITDATA
 
        __INIT
index af565816d2ba6aed5df706910d9e43e773b53fcb..788e5559549f39ab72573fb582d5964ce3490ed0 100644 (file)
@@ -154,6 +154,9 @@ static void __init setup_real_mode(void)
 
        trampoline_header->flags = 0;
 
+       trampoline_lock = &trampoline_header->lock;
+       *trampoline_lock = 0;
+
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
 
        /* Map the real mode stub as virtual == physical */
index e38d61d6562e4611c9150b935b018bee46717fe5..4822ad2a5e898a168494362f2840d7d0fb96bd6d 100644 (file)
        .text
        .code16
 
+.macro LOAD_REALMODE_ESP
+       /*
+        * Make sure only one CPU fiddles with the realmode stack
+        */
+.Llock_rm\@:
+        lock btsl       $0, tr_lock
+        jnc             2f
+        pause
+        jmp             .Llock_rm\@
+2:
+       # Setup stack
+       movl    $rm_stack_end, %esp
+.endm
+
        .balign PAGE_SIZE
 SYM_CODE_START(trampoline_start)
        cli                     # We should be safe anyway
@@ -49,8 +63,7 @@ SYM_CODE_START(trampoline_start)
        mov     %ax, %es
        mov     %ax, %ss
 
-       # Setup stack
-       movl    $rm_stack_end, %esp
+       LOAD_REALMODE_ESP
 
        call    verify_cpu              # Verify the cpu supports long mode
        testl   %eax, %eax              # Check for return code
@@ -93,8 +106,7 @@ SYM_CODE_START(sev_es_trampoline_start)
        mov     %ax, %es
        mov     %ax, %ss
 
-       # Setup stack
-       movl    $rm_stack_end, %esp
+       LOAD_REALMODE_ESP
 
        jmp     .Lswitch_to_protected
 SYM_CODE_END(sev_es_trampoline_start)
@@ -177,7 +189,7 @@ SYM_CODE_START(pa_trampoline_compat)
         * In compatibility mode.  Prep ESP and DX for startup_32, then disable
         * paging and complete the switch to legacy 32-bit mode.
         */
-       movl    $rm_stack_end, %esp
+       LOAD_REALMODE_ESP
        movw    $__KERNEL_DS, %dx
 
        movl    $(CR0_STATE & ~X86_CR0_PG), %eax
@@ -241,6 +253,7 @@ SYM_DATA_START(trampoline_header)
        SYM_DATA(tr_efer,               .space 8)
        SYM_DATA(tr_cr4,                .space 4)
        SYM_DATA(tr_flags,              .space 4)
+       SYM_DATA(tr_lock,               .space 4)
 SYM_DATA_END(trampoline_header)
 
 #include "trampoline_common.S"