]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
parallel part 2 parallel-5.16-part2
authorDavid Woodhouse <dwmw@amazon.co.uk>
Tue, 14 Dec 2021 19:58:54 +0000 (19:58 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Wed, 15 Dec 2021 08:25:48 +0000 (08:25 +0000)
arch/x86/include/asm/cpumask.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/smpboot.c
arch/x86/xen/smp_pv.c

index 3afa990d756b5159e8eb106564743b94b3b1590c..4dd9c167e63a94905dcd91fef1ebbbbd1c5a5e21 100644 (file)
@@ -7,6 +7,7 @@
 extern cpumask_var_t cpu_callin_mask;
 extern cpumask_var_t cpu_callout_mask;
 extern cpumask_var_t cpu_initialized_mask;
+extern cpumask_var_t cpu_finishup_mask;
 extern cpumask_var_t cpu_sibling_setup_mask;
 
 extern void setup_cpu_local_masks(void);
index 98b4697b2c9b0230f6bb381f48286423d3b469ff..0cd6373bc3f2a603c6c59eb49f07633767c0cee1 100644 (file)
@@ -68,6 +68,7 @@ u32 elf_hwcap2 __read_mostly;
 cpumask_var_t cpu_initialized_mask;
 cpumask_var_t cpu_callout_mask;
 cpumask_var_t cpu_callin_mask;
+cpumask_var_t cpu_finishup_mask;
 
 /* representing cpus for which sibling maps can be computed */
 cpumask_var_t cpu_sibling_setup_mask;
@@ -94,6 +95,7 @@ void __init setup_cpu_local_masks(void)
        alloc_bootmem_cpumask_var(&cpu_initialized_mask);
        alloc_bootmem_cpumask_var(&cpu_callin_mask);
        alloc_bootmem_cpumask_var(&cpu_callout_mask);
+       alloc_bootmem_cpumask_var(&cpu_finishup_mask);
        alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
 }
 
index 28039fc37164e0d9353e0a0ad02ff45656a48ad6..e9bdaf68a78a4c4dedeb429a5e55fbcf107f0c79 100644 (file)
@@ -212,15 +212,20 @@ static void smp_callin(void)
        wmb();
 
        /*
-        * This runs the AP through all the cpuhp states to its target
-        * state (CPUHP_ONLINE in the case of serial bringup).
+        * Allow the master to continue.
         */
-       notify_cpu_starting(cpuid);
+       cpumask_set_cpu(cpuid, cpu_callin_mask);
+       while (!cpumask_test_cpu(cpuid, cpu_finishup_mask))
+               cpu_relax();
 
        /*
-        * Allow the master to continue.
+        * This runs the AP through all the cpuhp states to its target
+        * state (CPUHP_ONLINE in the case of serial bringup).
         */
-       cpumask_set_cpu(cpuid, cpu_callin_mask);
+       static arch_spinlock_t cpuhp_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+       arch_spin_lock(&cpuhp_lock);
+       notify_cpu_starting(cpuid);
+       arch_spin_unlock(&cpuhp_lock);
 }
 
 static int cpu0_logical_apicid;
@@ -1138,7 +1143,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
        init_espfix_ap(cpu);
 
        /* So we see what's up */
-       announce_cpu(cpu, apicid);
+       //announce_cpu(cpu, apicid);
 
        /*
         * This grunge runs the startup process for
@@ -1231,7 +1236,11 @@ static int do_wait_cpu_callin(unsigned int cpu)
        /*
         * Wait till AP completes initial initialization.
         */
-       return do_wait_cpu_cpumask(cpu, cpu_callin_mask);
+       if (do_wait_cpu_cpumask(cpu, cpu_callin_mask))
+               return -1;
+
+       cpumask_set_cpu(cpu, cpu_finishup_mask);
+       return 0;
 }
 
 /*
@@ -1341,17 +1350,20 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
        int ret;
 
+       t[3] = get_cycles();
        /* If parallel AP bringup isn't enabled, perform the first steps now. */
        if (!do_parallel_bringup) {
                ret = do_cpu_up(cpu, tidle);
                if (ret)
                        return ret;
+
+               ret = do_wait_cpu_initialized(cpu);
+               if (ret)
+                       return ret;
        }
-       t[3] = get_cycles();
-       ret = do_wait_cpu_initialized(cpu);
-       if (ret)
-               return ret;
+
        t[4] = get_cycles();
+
        ret = do_wait_cpu_callin(cpu);
        if (ret)
                return ret;
@@ -1560,9 +1572,12 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        if (IS_ENABLED(CONFIG_X86_32) || boot_cpu_data.cpuid_level < 0x0B)
                do_parallel_bringup = false;
 
-       if (do_parallel_bringup)
+       if (do_parallel_bringup) {
                cpuhp_setup_state_nocalls(CPUHP_BP_PARALLEL_DYN, "x86/cpu:kick",
                                          native_cpu_kick, NULL);
+               cpuhp_setup_state_nocalls(CPUHP_BP_PARALLEL_DYN, "x86/cpu:wait-init",
+                                         do_wait_cpu_initialized, NULL);
+       }
 }
 
 void arch_thaw_secondary_cpus_begin(void)
@@ -1584,6 +1599,7 @@ void __init native_smp_prepare_boot_cpu(void)
        switch_to_new_gdt(me);
        /* already set me in cpu_online_mask in boot_cpu_init() */
        cpumask_set_cpu(me, cpu_callout_mask);
+       cpumask_set_cpu(me, cpu_finishup_mask);
        cpu_set_state_online(me);
        native_pv_lock_init();
 }
@@ -1764,6 +1780,7 @@ static void remove_cpu_from_maps(int cpu)
 {
        set_cpu_online(cpu, false);
        cpumask_clear_cpu(cpu, cpu_callout_mask);
+       cpumask_clear_cpu(cpu, cpu_finishup_mask);
        cpumask_clear_cpu(cpu, cpu_callin_mask);
        /* was set by cpu_init() */
        cpumask_clear_cpu(cpu, cpu_initialized_mask);
index ff37dff20dc0aee5ef5d887825c12793e1bfa90e..6f4a6bfa2c83d9aa4061e8d7a4719ca2f0a708bc 100644 (file)
@@ -271,6 +271,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 
        /* used to tell cpu_init() that it can proceed with initialization */
        cpumask_set_cpu(cpu, cpu_callout_mask);
+       cpumask_set_cpu(cpu, cpu_finishup_mask);
        if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
                return 0;