]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
x86/smpboot: Support parallel startup of secondary CPUs
authorDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 21 Mar 2023 19:40:05 +0000 (19:40 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 30 Mar 2023 12:27:08 +0000 (14:27 +0200)
Rework the real-mode startup code to allow for APs to be brought up in
parallel. This is in two parts:

1. Introduce a bit-spinlock to prevent them from all using the real
   mode stack at the same time.

2. Avoid needing to use the global smpboot_control variable to pass
   each AP its CPU#.

To achieve the latter, export the cpuid_to_apicid[] array so that each
AP can find its own CPU# by searching therein based on its APIC ID.

Introduce flags in the top bits of smpboot_control which indicate methods
by which an AP should find its CPU#. For a serialized bringup, the CPU#
is explicitly passed in the low bits of smpboot_control as before. For
parallel mode there are flags directing the AP to find its APIC ID in
CPUID leaf 0x0b (for X2APIC mode) or CPUID leaf 0x01 where 8 bits are
sufficient, then perform the cpuid_to_apicid[] lookup with that.

Parallel startup may be disabled by a command line option, and also if:
 • AMD SEV-ES is in use, since the AP may not use CPUID that early.
 • X2APIC is enabled, but CPUID leaf 0xb is not present and correct.
 • X2APIC is not enabled but not even CPUID leaf 0x01 exists.

Aside from the fact that APs will now look up their CPU# via the
newly-exported cpuid_to_apicid[] table, there is no behavioural change
intended yet, since new parallel CPUHP states have not — yet — been
added.

[ tglx: Initial proof of concept patch with bitlock and APIC ID lookup ]
[ dwmw2: Rework and testing, commit message, CPUID 0x1 and CPU0 support ]
[ seanc: Fix stray override of initial_gs in common_cpu_up() ]
[ Oleksandr Natalenko: reported suspend/resume issue fixed in
  x86_acpi_suspend_lowlevel ]

Co-developed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Co-developed-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Usama Arif <usama.arif@bytedance.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/Kconfig
arch/x86/include/asm/realmode.h
arch/x86/include/asm/smp.h
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/head_64.S
arch/x86/kernel/smpboot.c
arch/x86/realmode/init.c
arch/x86/realmode/rm/trampoline_64.S

index 805ad5d87933bcece64f9f7f3831dd5dc52f0de2..de2779f9a9e68d70abe7b5eafd20e1248951adbc 100644 (file)
@@ -272,8 +272,9 @@ config X86
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_USER_RETURN_NOTIFIER
        select HAVE_GENERIC_VDSO
+       select HOTPLUG_PARALLEL                 if SMP && X86_64
        select HOTPLUG_SMT                      if SMP
-       select HOTPLUG_SPLIT_STARTUP            if SMP
+       select HOTPLUG_SPLIT_STARTUP            if SMP && X86_32
        select IRQ_FORCED_THREADING
        select NEED_PER_CPU_EMBED_FIRST_CHUNK
        select NEED_PER_CPU_PAGE_FIRST_CHUNK
index f6a1737c77be2dc128147978e7f9f85945aed7a6..87e5482acd0dca56c4169a8c5321057447bbf6a5 100644 (file)
@@ -52,6 +52,7 @@ struct trampoline_header {
        u64 efer;
        u32 cr4;
        u32 flags;
+       u32 lock;
 #endif
 };
 
@@ -64,6 +65,8 @@ extern unsigned long initial_stack;
 extern unsigned long initial_vc_handler;
 #endif
 
+extern u32 *trampoline_lock;
+
 extern unsigned char real_mode_blob[];
 extern unsigned char real_mode_relocs[];
 
index 919f6181622bded091f30e78a0e7f3ff361a55cc..90b1555ed862e22e0246e1f6137b5b18d716832f 100644 (file)
@@ -197,4 +197,12 @@ extern unsigned int smpboot_control;
 
 #endif /* !__ASSEMBLY__ */
 
+/* Control bits for startup_64 */
+#define STARTUP_APICID_CPUID_1F 0x80000000
+#define STARTUP_APICID_CPUID_0B 0x40000000
+#define STARTUP_APICID_CPUID_01 0x20000000
+
+/* Top 8 bits are reserved for control */
+#define STARTUP_PARALLEL_MASK  0xFF000000
+
 #endif /* _ASM_X86_SMP_H */
index 1328c221af30e8dba1ef7c89a679de3c64132a08..6dfecb27b84611e9b1fa3bde3e60426812d657e6 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/cacheflush.h>
 #include <asm/realmode.h>
 #include <asm/hypervisor.h>
+#include <asm/smp.h>
 
 #include <linux/ftrace.h>
 #include "../../realmode/rm/wakeup.h"
@@ -127,7 +128,13 @@ int x86_acpi_suspend_lowlevel(void)
         * value is in the actual %rsp register.
         */
        current->thread.sp = (unsigned long)temp_stack + sizeof(temp_stack);
-       smpboot_control = smp_processor_id();
+       /*
+        * Ensure the CPU knows which one it is when it comes back, if
+        * it isn't in parallel mode and expected to work that out for
+        * itself.
+        */
+       if (!(smpboot_control & STARTUP_PARALLEL_MASK))
+               smpboot_control = smp_processor_id();
 #endif
        initial_code = (unsigned long)wakeup_long64;
        saved_magic = 0x123456789abcdef0L;
index 20d9a604da7c4b624f701182f0ead13244d1323f..ac1d7e5da1f233e1927f7a08d9b3db1b432cc4e2 100644 (file)
@@ -2377,7 +2377,7 @@ static int nr_logical_cpuids = 1;
 /*
  * Used to store mapping between logical CPU IDs and APIC IDs.
  */
-static int cpuid_to_apicid[] = {
+int cpuid_to_apicid[] = {
        [0 ... NR_CPUS - 1] = -1,
 };
 
index bd72368cdce066ad4c49162c46ac8ee2f6b510f3..c82c1fd3aec4a51437d57d2e8191a07a837848bc 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/apic.h>
 #include <asm/memtype.h>
 #include <asm/processor.h>
+#include <asm/cpu.h>
 
 #include "cpu.h"
 
index 1600b38541fc9b24b537eca32915c692252d5083..08ad5ef5acd43e69eb401164750a01995a2aa39a 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
 #include <asm/fixmap.h>
+#include <asm/smp.h>
 
 /*
  * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
@@ -234,8 +235,70 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        ANNOTATE_NOENDBR // above
 
 #ifdef CONFIG_SMP
+       /*
+        * For parallel boot, the APIC ID is retrieved from CPUID, and then
+        * used to look up the CPU number.  For booting a single CPU, the
+        * CPU number is encoded in smpboot_control.
+        *
+        * Bit 31       STARTUP_APICID_CPUID_1F flag (use CPUID 0x1f)
+        * Bit 30       STARTUP_APICID_CPUID_0B flag (use CPUID 0x0b)
+        * Bit 29       STARTUP_APICID_CPUID_01 flag (use CPUID 0x01)
+        * Bit 0-24     CPU# if STARTUP_APICID_CPUID_xx flags are not set
+        */
        movl    smpboot_control(%rip), %ecx
+       testl   $STARTUP_APICID_CPUID_1F, %ecx
+       jnz     .Luse_cpuid_1f
+       testl   $STARTUP_APICID_CPUID_0B, %ecx
+       jnz     .Luse_cpuid_0b
+       testl   $STARTUP_APICID_CPUID_01, %ecx
+       jnz     .Luse_cpuid_01
+       andl    $(~STARTUP_PARALLEL_MASK), %ecx
+       jmp     .Lsetup_cpu
+
+.Luse_cpuid_01:
+       mov     $0x01, %eax
+       cpuid
+       mov     %ebx, %edx
+       shr     $24, %edx
+       jmp     .Lsetup_AP
 
+.Luse_cpuid_0b:
+       mov     $0x0B, %eax
+       xorl    %ecx, %ecx
+       cpuid
+       jmp     .Lsetup_AP
+
+.Luse_cpuid_1f:
+       mov     $0x1f, %eax
+       xorl    %ecx, %ecx
+       cpuid
+
+.Lsetup_AP:
+       /* EDX contains the APIC ID of the current CPU */
+       xorq    %rcx, %rcx
+       leaq    cpuid_to_apicid(%rip), %rbx
+
+.Lfind_cpunr:
+       cmpl    (%rbx,%rcx,4), %edx
+       jz      .Lsetup_cpu
+       inc     %ecx
+#ifdef CONFIG_FORCE_NR_CPUS
+       cmpl    $NR_CPUS, %ecx
+#else
+       cmpl    nr_cpu_ids(%rip), %ecx
+#endif
+       jb      .Lfind_cpunr
+
+       /*  APIC ID not found in the table. Drop the trampoline lock and bail. */
+       movq    trampoline_lock(%rip), %rax
+       lock
+       btrl    $0, (%rax)
+
+1:     cli
+       hlt
+       jmp     1b
+
+.Lsetup_cpu:
        /* Get the per cpu offset for the given CPU# which is in ECX */
        movq    __per_cpu_offset(,%rcx,8), %rdx
 #else
@@ -251,6 +314,17 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
        movq    pcpu_hot + X86_current_task(%rdx), %rax
        movq    TASK_threadsp(%rax), %rsp
 
+       /*
+        * Now that this CPU is running on its own stack, drop the realmode
+        * protection. For the boot CPU the pointer is NULL!
+        */
+       movq    trampoline_lock(%rip), %rax
+       testq   %rax, %rax
+       jz      .Lsetup_gdt
+       lock
+       btrl    $0, (%rax)
+
+.Lsetup_gdt:
        /*
         * We must switch to a new descriptor in kernel space for the GDT
         * because soon the kernel won't have access anymore to the userspace
@@ -435,6 +509,8 @@ SYM_DATA(initial_code,      .quad x86_64_start_kernel)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 SYM_DATA(initial_vc_handler,   .quad handle_vc_boot_ghcb)
 #endif
+
+SYM_DATA(trampoline_lock, .quad 0);
        __FINITDATA
 
        __INIT
index 00e4f9364cd2c446d0cace62c943f8b49ee1667f..7955b86d4e9c7bca31bdc8732a0d3f0d1ac99bf4 100644 (file)
@@ -998,7 +998,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
        if (IS_ENABLED(CONFIG_X86_32)) {
                early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
                initial_stack  = idle->thread.sp;
-       } else {
+       } else if (!(smpboot_control & STARTUP_PARALLEL_MASK)) {
                smpboot_control = cpu;
        }
 
@@ -1229,6 +1229,51 @@ void __init smp_prepare_cpus_common(void)
        set_cpu_sibling_map(0);
 }
 
+#ifdef CONFIG_X86_64
+/*
+ * Allow 64-bit AP bringup in parallel if the CPU reports its APIC ID in
+ * CPUID. Either leaf 0x0B/0x1F for X2APIC mode, or leaf 0x01 if 8 bits are
+ * sufficient. Otherwise it's too hard.
+ */
+bool __init arch_cpuhp_init_parallel_bringup(void)
+{
+       unsigned int ctrl = 0;
+
+       if (boot_cpu_data.cpuid_level < 0x01)
+               return false;
+
+       /* Encrypted guests require special CPUID handling. */
+       if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
+               pr_info("Parallel CPU startup disabled due to guest state encryption\n");
+               return false;
+       }
+
+       switch (topology_extended_leaf) {
+       case 0x0b:
+               ctrl = STARTUP_APICID_CPUID_0B;
+               break;
+       case 0x1f:
+               ctrl = STARTUP_APICID_CPUID_1F;
+               break;
+       case 0x00:
+               if (!x2apic_mode) {
+                       /* For !x2APIC mode 8 bits from leaf 0x01 are sufficient */
+                       ctrl = STARTUP_APICID_CPUID_01;
+                       break;
+               }
+               fallthrough;
+       default:
+               pr_info("Parallel CPU startup disabled. Unsupported topology leaf %u\n",
+                       topology_extended_leaf);
+               return false;
+       }
+
+       pr_debug("Parallel CPU startup enabled: 0x%08x\n", ctrl);
+       smpboot_control = ctrl;
+       return true;
+}
+#endif
+
 /*
  * Prepare for SMP bootup.
  * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
index af565816d2ba6aed5df706910d9e43e773b53fcb..788e5559549f39ab72573fb582d5964ce3490ed0 100644 (file)
@@ -154,6 +154,9 @@ static void __init setup_real_mode(void)
 
        trampoline_header->flags = 0;
 
+       trampoline_lock = &trampoline_header->lock;
+       *trampoline_lock = 0;
+
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
 
        /* Map the real mode stub as virtual == physical */
index e38d61d6562e4611c9150b935b018bee46717fe5..2dfb1c40016769ea58847b5e3dd4a9d3fd0cfbb8 100644 (file)
        .text
        .code16
 
+.macro LOAD_REALMODE_ESP
+       /*
+        * Make sure only one CPU fiddles with the realmode stack
+        */
+.Llock_rm\@:
+       btl     $0, tr_lock
+       jnc     2f
+       pause
+       jmp     .Llock_rm\@
+2:
+       lock
+       btsl    $0, tr_lock
+       jc      .Llock_rm\@
+
+       # Setup stack
+       movl    $rm_stack_end, %esp
+.endm
+
        .balign PAGE_SIZE
 SYM_CODE_START(trampoline_start)
        cli                     # We should be safe anyway
@@ -49,8 +67,7 @@ SYM_CODE_START(trampoline_start)
        mov     %ax, %es
        mov     %ax, %ss
 
-       # Setup stack
-       movl    $rm_stack_end, %esp
+       LOAD_REALMODE_ESP
 
        call    verify_cpu              # Verify the cpu supports long mode
        testl   %eax, %eax              # Check for return code
@@ -93,8 +110,7 @@ SYM_CODE_START(sev_es_trampoline_start)
        mov     %ax, %es
        mov     %ax, %ss
 
-       # Setup stack
-       movl    $rm_stack_end, %esp
+       LOAD_REALMODE_ESP
 
        jmp     .Lswitch_to_protected
 SYM_CODE_END(sev_es_trampoline_start)
@@ -177,7 +193,7 @@ SYM_CODE_START(pa_trampoline_compat)
         * In compatibility mode.  Prep ESP and DX for startup_32, then disable
         * paging and complete the switch to legacy 32-bit mode.
         */
-       movl    $rm_stack_end, %esp
+       LOAD_REALMODE_ESP
        movw    $__KERNEL_DS, %dx
 
        movl    $(CR0_STATE & ~X86_CR0_PG), %eax
@@ -241,6 +257,7 @@ SYM_DATA_START(trampoline_header)
        SYM_DATA(tr_efer,               .space 8)
        SYM_DATA(tr_cr4,                .space 4)
        SYM_DATA(tr_flags,              .space 4)
+       SYM_DATA(tr_lock,               .space 4)
 SYM_DATA_END(trampoline_header)
 
 #include "trampoline_common.S"