x86/smpboot: Split up native_cpu_up into separate phases
authorDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 28 Jan 2021 16:12:14 +0000 (16:12 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 28 Jan 2021 20:11:58 +0000 (20:11 +0000)
There are four logical parts to what native_cpu_up() does.

First it actually wakes AP.

Second, it waits for the AP to make it as far as wait_for_master_cpu()
which sets that CPU's bit in cpu_initialized_mask, and sets the bit in
cpu_callout_mask to let the AP proceed through cpu_init().

Then, it waits for the AP to finish cpu_init() and get as far as the
smp_callin() call, which sets that CPU's bit in cpu_callin_mask.

Finally, it does the TSC synchronization and waits for the AP to actually
mark itself online in cpu_online_mask.

This commit should have no behavioural change, but merely splits those
phases out into separate functions so that future commits and make them
happen in parallel for all APs.

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
arch/x86/kernel/smpboot.c

index bec0059d3d3ddf435ece8b908220beb016e8a060..649b8236309b35e41e0808e7ecf322a6f7003a00 100644 (file)
@@ -1039,9 +1039,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
 {
        /* start_ip had better be page-aligned! */
        unsigned long start_ip = real_mode_header->trampoline_start;
-
        unsigned long boot_error = 0;
-       unsigned long timeout;
 
        idle->thread.sp = (unsigned long)task_pt_regs(idle);
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
@@ -1094,55 +1092,70 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
                boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
                                                     cpu0_nmi_registered);
 
-       if (!boot_error) {
-               /*
-                * Wait 10s total for first sign of life from AP
-                */
-               boot_error = -1;
-               timeout = jiffies + 10*HZ;
-               while (time_before(jiffies, timeout)) {
-                       if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
-                               /*
-                                * Tell AP to proceed with initialization
-                                */
-                               cpumask_set_cpu(cpu, cpu_callout_mask);
-                               boot_error = 0;
-                               break;
-                       }
-                       schedule();
-               }
-       }
+       return boot_error;
+}
 
-       if (!boot_error) {
-               /*
-                * Wait till AP completes initial initialization
-                */
-               while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
-                       /*
-                        * Allow other tasks to run while we wait for the
-                        * AP to come online. This also gives a chance
-                        * for the MTRR work(triggered by the AP coming online)
-                        * to be completed in the stop machine context.
-                        */
-                       schedule();
-               }
+static int do_wait_cpu_cpumask(unsigned int cpu, const struct cpumask *mask)
+{
+       unsigned long timeout;
+
+       /*
+        * Wait up to 10s for the CPU to report in.
+        */
+       timeout = jiffies + 10*HZ;
+       while (time_before(jiffies, timeout)) {
+               if (cpumask_test_cpu(cpu, mask))
+                       return 0;
+
+               schedule();
        }
+       return -1;
+}
 
-       if (x86_platform.legacy.warm_reset) {
-               /*
-                * Cleanup possible dangling ends...
-                */
-               smpboot_restore_warm_reset_vector();
+static int do_wait_cpu_initialized(unsigned int cpu)
+{
+       /*
+        * Wait for first sign of life from AP.
+        */
+       if (do_wait_cpu_cpumask(cpu, cpu_initialized_mask))
+               return -1;
+
+       cpumask_set_cpu(cpu, cpu_callout_mask);
+       return 0;
+}
+
+static int do_wait_cpu_callin(unsigned int cpu)
+{
+       /*
+        * Wait till AP completes initial initialization.
+        */
+       return do_wait_cpu_cpumask(cpu, cpu_callin_mask);
+}
+
+static int do_wait_cpu_online(unsigned int cpu)
+{
+       unsigned long flags;
+
+       /*
+        * Check TSC synchronization with the AP (keep irqs disabled
+        * while doing so):
+        */
+       local_irq_save(flags);
+       check_tsc_sync_source(cpu);
+       local_irq_restore(flags);
+
+       while (!cpu_online(cpu)) {
+               cpu_relax();
+               touch_nmi_watchdog();
        }
 
-       return boot_error;
+       return 0;
 }
 
-int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+int do_cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
        int apicid = apic->cpu_present_to_apicid(cpu);
        int cpu0_nmi_registered = 0;
-       unsigned long flags;
        int err, ret = 0;
 
        lockdep_assert_irqs_enabled();
@@ -1189,19 +1202,6 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
                goto unreg_nmi;
        }
 
-       /*
-        * Check TSC synchronization with the AP (keep irqs disabled
-        * while doing so):
-        */
-       local_irq_save(flags);
-       check_tsc_sync_source(cpu);
-       local_irq_restore(flags);
-
-       while (!cpu_online(cpu)) {
-               cpu_relax();
-               touch_nmi_watchdog();
-       }
-
 unreg_nmi:
        /*
         * Clean up the nmi handler. Do this after the callin and callout sync
@@ -1213,6 +1213,34 @@ unreg_nmi:
        return ret;
 }
 
+int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+       int ret;
+
+       ret = do_cpu_up(cpu, tidle);
+       if (ret)
+               return ret;
+
+       ret = do_wait_cpu_initialized(cpu);
+       if (ret)
+               return ret;
+
+       ret = do_wait_cpu_callin(cpu);
+       if (ret)
+               return ret;
+
+       ret = do_wait_cpu_online(cpu);
+
+       if (x86_platform.legacy.warm_reset) {
+               /*
+                * Cleanup possible dangling ends...
+                */
+               smpboot_restore_warm_reset_vector();
+       }
+
+       return ret;
+}
+
 /**
  * arch_disable_smp_support() - disables SMP support for x86 at runtime
  */