Rename the paravirtualized calculate_cpu_khz to calibrate_tsc.
In all cases, we actually calibrate_tsc and use that as the cpu_khz value.
Signed-off-by: Alok N Kataria <akataria@vmware.com>
Signed-off-by: Dan Hecht <dhecht@vmware.com>
Cc: Dan Hecht <dhecht@vmware.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
        .get_wallclock = native_get_wallclock,
        .set_wallclock = native_set_wallclock,
        .sched_clock = native_sched_clock,
-       .get_cpu_khz = native_calculate_cpu_khz,
+       .get_tsc_khz = native_calibrate_tsc,
 };
 
 struct pv_irq_ops pv_irq_ops = {
 
 }
 
 /**
- * tsc_calibrate - calibrate the tsc on boot
+ * native_calibrate_tsc - calibrate the tsc on boot
  */
-static unsigned int __init tsc_calibrate(void)
+unsigned long native_calibrate_tsc(void)
 {
        unsigned long flags;
        u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2;
        return tsc_khz_val;
 }
 
-unsigned long native_calculate_cpu_khz(void)
-{
-       return tsc_calibrate();
-}
 
 #ifdef CONFIG_X86_32
 /* Only called from the Powernow K7 cpu freq driver */
        unsigned long cpu_khz_old = cpu_khz;
 
        if (cpu_has_tsc) {
-               cpu_khz = calculate_cpu_khz();
-               tsc_khz = cpu_khz;
+               tsc_khz = calibrate_tsc();
+               cpu_khz = tsc_khz;
                cpu_data(0).loops_per_jiffy =
                        cpufreq_scale(cpu_data(0).loops_per_jiffy,
                                        cpu_khz_old, cpu_khz);
        if (!cpu_has_tsc)
                return;
 
-       cpu_khz = calculate_cpu_khz();
-       tsc_khz = cpu_khz;
+       tsc_khz = calibrate_tsc();
+       cpu_khz = tsc_khz;
 
-       if (!cpu_khz) {
+       if (!tsc_khz) {
                mark_tsc_unstable("could not calculate TSC khz");
                return;
        }
 
                pv_apic_ops.setup_secondary_clock = vmi_time_ap_init;
 #endif
                pv_time_ops.sched_clock = vmi_sched_clock;
-               pv_time_ops.get_cpu_khz = vmi_cpu_khz;
+               pv_time_ops.get_tsc_khz = vmi_tsc_khz;
 
                /* We have true wallclock functions; disable CMOS clock sync */
                no_sync_cmos_clock = 1;
 
        return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
 }
 
-/* paravirt_ops.get_cpu_khz = vmi_cpu_khz */
-unsigned long vmi_cpu_khz(void)
+/* paravirt_ops.get_tsc_khz = vmi_tsc_khz */
+unsigned long vmi_tsc_khz(void)
 {
        unsigned long long khz;
        khz = vmi_timer_ops.get_cycle_frequency();
 
  * what speed it runs at, or 0 if it's unusable as a reliable clock source.
  * This matches what we want here: if we return 0 from this function, the x86
  * TSC clock will give up and not register itself. */
-static unsigned long lguest_cpu_khz(void)
+static unsigned long lguest_tsc_khz(void)
 {
        return lguest_data.tsc_khz;
 }
        /* time operations */
        pv_time_ops.get_wallclock = lguest_get_wallclock;
        pv_time_ops.time_init = lguest_time_init;
-       pv_time_ops.get_cpu_khz = lguest_cpu_khz;
+       pv_time_ops.get_tsc_khz = lguest_tsc_khz;
 
        /* Now is a good time to look at the implementations of these functions
         * before returning to the rest of lguest_init(). */
 
 
        .set_wallclock = xen_set_wallclock,
        .get_wallclock = xen_get_wallclock,
-       .get_cpu_khz = xen_cpu_khz,
+       .get_tsc_khz = xen_tsc_khz,
        .sched_clock = xen_sched_clock,
 };
 
 
 }
 
 
-/* Get the CPU speed from Xen */
-unsigned long xen_cpu_khz(void)
+/* Get the TSC speed from Xen */
+unsigned long xen_tsc_khz(void)
 {
        u64 xen_khz = 1000000ULL << 32;
        const struct pvclock_vcpu_time_info *info =
 
 
 void xen_setup_timer(int cpu);
 void xen_setup_cpu_clockevents(void);
-unsigned long xen_cpu_khz(void);
+unsigned long xen_tsc_khz(void);
 void __init xen_time_init(void);
 unsigned long xen_get_wallclock(void);
 int xen_set_wallclock(unsigned long time);
 
        int (*set_wallclock)(unsigned long);
 
        unsigned long long (*sched_clock)(void);
-       unsigned long (*get_cpu_khz)(void);
+       unsigned long (*get_tsc_khz)(void);
 };
 
 struct pv_cpu_ops {
 {
        return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
 }
-#define calculate_cpu_khz() (pv_time_ops.get_cpu_khz())
+#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
 
 static inline unsigned long long paravirt_read_pmc(int counter)
 {
 
 #define TICK_SIZE (tick_nsec / 1000)
 
 unsigned long long native_sched_clock(void);
-unsigned long native_calculate_cpu_khz(void);
+unsigned long native_calibrate_tsc(void);
 
 extern int timer_ack;
 extern int no_timer_check;
 extern int recalibrate_cpu_khz(void);
 
 #ifndef CONFIG_PARAVIRT
-#define calculate_cpu_khz() native_calculate_cpu_khz()
+#define calibrate_tsc() native_calibrate_tsc()
 #endif
 
 /* Accelerators for sched_clock()
 
 extern unsigned long vmi_get_wallclock(void);
 extern int vmi_set_wallclock(unsigned long now);
 extern unsigned long long vmi_sched_clock(void);
-extern unsigned long vmi_cpu_khz(void);
+extern unsigned long vmi_tsc_khz(void);
 
 #ifdef CONFIG_X86_LOCAL_APIC
 extern void __devinit vmi_time_bsp_init(void);