From: David Woodhouse Date: Wed, 15 Dec 2021 08:11:49 +0000 (+0000) Subject: timing debug X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=refs%2Fheads%2Fparallel-5.17;p=users%2Fdwmw2%2Flinux.git timing debug --- diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7baaadf5483c1..1a58e0c2a731e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1350,10 +1350,13 @@ unreg_nmi: return ret; } +extern cycles_t cpu_up_times[100]; +#define t cpu_up_times int native_cpu_up(unsigned int cpu, struct task_struct *tidle) { int ret; + t[3] = get_cycles(); /* If parallel AP bringup isn't enabled, perform the first steps now. */ if (!do_parallel_bringup) { ret = do_cpu_up(cpu, tidle); @@ -1365,19 +1368,21 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) return ret; } + t[4] = get_cycles(); + ret = do_wait_cpu_callin(cpu); if (ret) return ret; - + t[5] = get_cycles(); ret = do_wait_cpu_online(cpu); - + t[6] = get_cycles(); if (x86_platform.legacy.warm_reset) { /* * Cleanup possible dangling ends... */ smpboot_restore_warm_reset_vector(); } - + t[7] = get_cycles(); return ret; } diff --git a/kernel/cpu.c b/kernel/cpu.c index 52b0e0f252a5f..69f8af88a30b2 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -555,12 +555,16 @@ static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target) return ret; } +cycles_t cpu_up_times[100]; +#define t cpu_up_times + static int bringup_wait_for_ap(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ wait_for_ap_thread(st, true); + t[9] = get_cycles(); if (WARN_ON_ONCE((!cpu_online(cpu)))) return -ECANCELED; @@ -606,6 +610,7 @@ static int bringup_cpu(unsigned int cpu) irq_unlock_sparse(); if (ret) return ret; + t[8] = get_cycles(); return bringup_wait_for_ap(cpu); } @@ -1340,6 +1345,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) if (st->state >= target) goto out; + t[0] = get_cycles(); if (st->state == CPUHP_OFFLINE) { /* Let it fail before we try to bring the cpu up */ idle = idle_thread_get(cpu); @@ -1356,6 +1362,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) * If the current CPU state is in the range of the AP hotplug thread, * then we need to kick the thread once more. */ + t[1] = get_cycles(); if (st->state > CPUHP_BRINGUP_CPU) { ret = cpuhp_kick_ap_work(cpu); /* @@ -1365,7 +1372,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) if (ret) goto out; } - + t[2] = get_cycles(); /* * Try to reach the target state. We max out on the BP at * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is @@ -1373,6 +1380,11 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) */ target = min((int)target, CPUHP_BRINGUP_CPU); ret = cpuhp_up_callbacks(cpu, st, target); + t[10] = get_cycles(); + + printk("CPU %d to %d/%s in %lld %lld %lld %lld . %lld %lld %lld %lld . %lld %lld\n", cpu, target, + cpuhp_hp_states[target].name, t[1] - t[0], t[2] - t[1], t[3] - t[2], t[4] - t[3], + t[5] - t[4], t[6] - t[5], t[7] - t[6], t[8] - t[7], t[9] - t[8], t[10] - t[9]); out: cpus_write_unlock(); arch_smt_update();