if (has_cpuflag(X86_FEATURE_TSC)) {
                debug_putstr(" RDTSC");
-               raw = native_read_tsc();
+               raw = rdtsc();
 
                random ^= raw;
                use_i8254 = false;
 
         * but no one has ever seen it happen.
         */
        rdtsc_barrier();
-       ret = (cycle_t)native_read_tsc();
+       ret = (cycle_t)rdtsc();
 
        last = gtod->cycle_last;
 
 
 extern int rdmsr_safe_regs(u32 regs[8]);
 extern int wrmsr_safe_regs(u32 regs[8]);
 
-static __always_inline unsigned long long native_read_tsc(void)
+/**
+ * rdtsc() - returns the current TSC without ordering constraints
+ *
+ * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
+ * only ordering constraint it supplies is the ordering implied by
+ * "asm volatile": it will put the RDTSC in the place you expect.  The
+ * CPU can and will speculatively execute that RDTSC, though, so the
+ * results can be non-monotonic if compared on different CPUs.
+ */
+static __always_inline unsigned long long rdtsc(void)
 {
        DECLARE_ARGS(val, low, high);
 
 
 static __always_inline
 u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
 {
-       u64 delta = native_read_tsc() - src->tsc_timestamp;
+       u64 delta = rdtsc() - src->tsc_timestamp;
        return pvclock_scale_delta(delta, src->tsc_to_system_mul,
                                   src->tsc_shift);
 }
 
         * on during the bootup the random pool has true entropy too.
         */
        get_random_bytes(&canary, sizeof(canary));
-       tsc = native_read_tsc();
+       tsc = rdtsc();
        canary += tsc + (tsc << 32UL);
 
        current->stack_canary = canary;
 
                return 0;
 #endif
 
-       return native_read_tsc();
+       return rdtsc();
 }
 
 extern void tsc_init(void);
 
 
        /* Verify whether apbt counter works */
        t1 = dw_apb_clocksource_read(clocksource_apbt);
-       start = native_read_tsc();
+       start = rdtsc();
 
        /*
         * We don't know the TSC frequency yet, but waiting for
         */
        do {
                rep_nop();
-               now = native_read_tsc();
+               now = rdtsc();
        } while ((now - start) < 200000UL);
 
        /* APBT is the only always on clocksource, it has to work! */
        old = dw_apb_clocksource_read(clocksource_apbt);
        old += loop;
 
-       t1 = native_read_tsc();
+       t1 = rdtsc();
 
        do {
                new = dw_apb_clocksource_read(clocksource_apbt);
        } while (new < old);
 
-       t2 = native_read_tsc();
+       t2 = rdtsc();
 
        shift = 5;
        if (unlikely(loop >> shift == 0)) {
 
 {
        u64 tsc;
 
-       tsc = native_read_tsc();
+       tsc = rdtsc();
        wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
        return 0;
 }
        unsigned long pm = acpi_pm_read_early();
 
        if (cpu_has_tsc)
-               tsc = native_read_tsc();
+               tsc = rdtsc();
 
        switch (lapic_cal_loops++) {
        case 0:
        long long max_loops = cpu_khz ? cpu_khz : 1000000;
 
        if (cpu_has_tsc)
-               tsc = native_read_tsc();
+               tsc = rdtsc();
 
        if (disable_apic) {
                disable_ioapic_support();
                }
                if (queued) {
                        if (cpu_has_tsc && cpu_khz) {
-                               ntsc = native_read_tsc();
+                               ntsc = rdtsc();
                                max_loops = (cpu_khz << 10) - (ntsc - tsc);
                        } else
                                max_loops--;
 
 
                n = K6_BUG_LOOP;
                f_vide = vide;
-               d = native_read_tsc();
+               d = rdtsc();
                while (n--)
                        f_vide();
-               d2 = native_read_tsc();
+               d2 = rdtsc();
                d = d2-d;
 
                if (d > 20*K6_BUG_LOOP)
 
 {
        memset(m, 0, sizeof(struct mce));
        m->cpu = m->extcpu = smp_processor_id();
-       m->tsc = native_read_tsc();
+       m->tsc = rdtsc();
        /* We hope get_seconds stays lockless */
        m->time = get_seconds();
        m->cpuvendor = boot_cpu_data.x86_vendor;
 {
        unsigned long *cpu_tsc = (unsigned long *)data;
 
-       cpu_tsc[smp_processor_id()] = native_read_tsc();
+       cpu_tsc[smp_processor_id()] = rdtsc();
 }
 
 static int mce_apei_read_done;
 
         */
        if (!arch_get_random_long(&rand)) {
                /* The constant is an arbitrary large prime */
-               rand = native_read_tsc();
+               rand = rdtsc();
                rand *= 0xc345c6b72fd16123UL;
        }
 
 
 
        /* Verify whether hpet counter works */
        t1 = hpet_readl(HPET_COUNTER);
-       start = native_read_tsc();
+       start = rdtsc();
 
        /*
         * We don't know the TSC frequency yet, but waiting for
         */
        do {
                rep_nop();
-               now = native_read_tsc();
+               now = rdtsc();
        } while ((now - start) < 200000UL);
 
        if (t1 == hpet_readl(HPET_COUNTER)) {
 
        u64 ret;
 
        rdtsc_barrier();
-       ret = native_read_tsc();
+       ret = rdtsc();
 
        return ret;
 }
 
 
        data = cyc2ns_write_begin(cpu);
 
-       tsc_now = native_read_tsc();
+       tsc_now = rdtsc();
        ns_now = cycles_2_ns(tsc_now);
 
        /*
        }
 
        /* read the Time Stamp Counter: */
-       tsc_now = native_read_tsc();
+       tsc_now = rdtsc();
 
        /* return the value in ns */
        return cycles_2_ns(tsc_now);
 
 
        tsc_deadline = apic->lapic_timer.expired_tscdeadline;
        apic->lapic_timer.expired_tscdeadline = 0;
-       guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+       guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
        trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
 
        /* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
                local_irq_save(flags);
 
                now = apic->lapic_timer.timer.base->get_time();
-               guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+               guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
                if (likely(tscdeadline > guest_tsc)) {
                        ns = (tscdeadline - guest_tsc) * 1000000ULL;
                        do_div(ns, this_tsc_khz);
 
 {
        u64 tsc;
 
-       tsc = svm_scale_tsc(vcpu, native_read_tsc());
+       tsc = svm_scale_tsc(vcpu, rdtsc());
 
        return target_tsc - tsc;
 }
        switch (msr_info->index) {
        case MSR_IA32_TSC: {
                msr_info->data = svm->vmcb->control.tsc_offset +
-                       svm_scale_tsc(vcpu, native_read_tsc());
+                       svm_scale_tsc(vcpu, rdtsc());
 
                break;
        }
 
 {
        u64 host_tsc, tsc_offset;
 
-       host_tsc = native_read_tsc();
+       host_tsc = rdtsc();
        tsc_offset = vmcs_read64(TSC_OFFSET);
        return host_tsc + tsc_offset;
 }
 
 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 {
-       return target_tsc - native_read_tsc();
+       return target_tsc - rdtsc();
 }
 
 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
 
         * but no one has ever seen it happen.
         */
        rdtsc_barrier();
-       ret = (cycle_t)native_read_tsc();
+       ret = (cycle_t)rdtsc();
 
        last = pvclock_gtod_data.clock.cycle_last;
 
                return 1;
        }
        if (!use_master_clock) {
-               host_tsc = native_read_tsc();
+               host_tsc = rdtsc();
                kernel_ns = get_kernel_ns();
        }
 
 
        if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
                s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
-                               native_read_tsc() - vcpu->arch.last_host_tsc;
+                               rdtsc() - vcpu->arch.last_host_tsc;
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
                if (check_tsc_unstable()) {
 {
        kvm_x86_ops->vcpu_put(vcpu);
        kvm_put_guest_fpu(vcpu);
-       vcpu->arch.last_host_tsc = native_read_tsc();
+       vcpu->arch.last_host_tsc = rdtsc();
 }
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                hw_breakpoint_restore();
 
        vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-                                                          native_read_tsc());
+                                                          rdtsc());
 
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
        if (ret != 0)
                return ret;
 
-       local_tsc = native_read_tsc();
+       local_tsc = rdtsc();
        stable = !check_tsc_unstable();
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
 
        preempt_disable();
        cpu = smp_processor_id();
        rdtsc_barrier();
-       bclock = native_read_tsc();
+       bclock = rdtsc();
        for (;;) {
                rdtsc_barrier();
-               now = native_read_tsc();
+               now = rdtsc();
                if ((now - bclock) >= loops)
                        break;
 
                        loops -= (now - bclock);
                        cpu = smp_processor_id();
                        rdtsc_barrier();
-                       bclock = native_read_tsc();
+                       bclock = rdtsc();
                }
        }
        preempt_enable();
 int read_current_timer(unsigned long *timer_val)
 {
        if (delay_fn == delay_tsc) {
-               *timer_val = native_read_tsc();
+               *timer_val = rdtsc();
                return 0;
        }
        return -1;
 
        local_irq_save(flags);
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
-       tsc = native_read_tsc();
+       tsc = rdtsc();
        local_irq_restore(flags);
 
        cpu->last_sample_time = cpu->sample.time;
 
 
        for(i = 0; i < 50; i++) {
                local_irq_save(flags);
-               t1 = native_read_tsc();
+               t1 = rdtsc();
                for (t = 0; t < 50; t++) gameport_read(gameport);
-               t2 = native_read_tsc();
+               t2 = rdtsc();
                local_irq_restore(flags);
                udelay(i * 10);
                if (t2 - t1 < tx) tx = t2 - t1;
 
 
 #include <linux/i8253.h>
 
-#define GET_TIME(x)    do { if (cpu_has_tsc) x = (unsigned int)native_read_tsc(); else x = get_time_pit(); } while (0)
+#define GET_TIME(x)    do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
 #define DELTA(x,y)     (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
 #define TIME_NAME      (cpu_has_tsc?"TSC":"PIT")
 static unsigned int get_time_pit(void)
         return count;
 }
 #elif defined(__x86_64__)
-#define GET_TIME(x)    do { x = (unsigned int)native_read_tsc(); } while (0)
+#define GET_TIME(x)    do { x = (unsigned int)rdtsc(); } while (0)
 #define DELTA(x,y)     ((y)-(x))
 #define TIME_NAME      "TSC"
 #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
 
 #define GETTICK(x)                                                \
 ({                                                                \
        if (cpu_has_tsc)                                          \
-               x = (unsigned int)native_read_tsc();              \
+               x = (unsigned int)rdtsc();                \
 })
 #else /* __i386__ */
 #define GETTICK(x)
 
 
        /* check result for the last window */
        msr_now = pkg_state_counter();
-       tsc_now = native_read_tsc();
+       tsc_now = rdtsc();
 
        /* calculate pkg cstate vs tsc ratio */
        if (!msr_last || !tsc_last)
        u64 val64;
 
        msr_now = pkg_state_counter();
-       tsc_now = native_read_tsc();
+       tsc_now = rdtsc();
        jiffies_now = jiffies;
 
        /* calculate pkg cstate vs tsc ratio */
 
 
        printk(KERN_DEBUG "start--> \n");
        then = read_pmtmr();
-       then_tsc = native_read_tsc();
+       then_tsc = rdtsc();
        for (i=0;i<20;i++) {
                mdelay(100);
                now = read_pmtmr();
-               now_tsc = native_read_tsc();
+               now_tsc = rdtsc();
                diff = (now - then) & 0xFFFFFF;
                diff_tsc = now_tsc - then_tsc;
                printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);