return;
wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
- pr_info("stealtime: cpu %d, msr %llx\n", cpu,
+ if (0) pr_info("stealtime: cpu %d, msr %llx\n", cpu,
(unsigned long long) slow_virt_to_phys(st));
}
kvm_sched_clock_offset = kvm_clock_read();
pv_ops.time.sched_clock = kvm_sched_clock_read;
- pr_info("kvm-clock: using sched offset of %llu cycles",
+ if (0) pr_info("kvm-clock: using sched offset of %llu cycles",
kvm_sched_clock_offset);
BUILD_BUG_ON(sizeof(kvm_sched_clock_offset) >
pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
wrmsrl(msr_kvm_system_time, pa);
- pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
+ if (0) pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
}
static void kvm_save_sched_clock_state(void)
goto found;
new = logical_die++;
- if (new != die) {
+ if (0 && new != die) {
pr_info("CPU %u Converting physical %u to logical die %u\n",
cpu, die, new);
}
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
+ cycles_t t1, t2, t3, t4, t5;
int ret;
+ t1 = get_cycles();
ret = do_cpu_up(cpu, tidle);
if (ret)
return ret;
+ t2 = get_cycles();
ret = do_wait_cpu_initialized(cpu);
if (ret)
return ret;
+ t3 = get_cycles();
ret = do_wait_cpu_callin(cpu);
if (ret)
return ret;
+ t4 = get_cycles();
ret = do_wait_cpu_online(cpu);
+ t5 = get_cycles();
+
+ printk("CPU#%d up in %10lld,%10lld,%10lld,%10lld (%10lld)\n", cpu,
+ t2-t1, t3-t2, t4-t3, t5-t4, t5-t1);
+
if (x86_platform.legacy.warm_reset) {
/*
* Cleanup possible dangling ends...