DECLARE_PER_CPU(u64, decrementers_next_tb);
 
+static inline u64 timer_get_next_tb(void)
+{
+       return __this_cpu_read(decrementers_next_tb);
+}
+
 /* Convert timebase ticks to nanoseconds */
 unsigned long long tb_to_ns(unsigned long long tb_ticks);
 
 
 EXPORT_SYMBOL(decrementer_clockevent);
 
 DEFINE_PER_CPU(u64, decrementers_next_tb);
+EXPORT_SYMBOL_GPL(decrementers_next_tb);
 static DEFINE_PER_CPU(struct clock_event_device, decrementers);
 
 #define XSEC_PER_SEC (1024*1024)
 
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
        struct p9_host_os_sprs host_os_sprs;
        s64 dec;
-       u64 tb;
+       u64 tb, next_timer;
        int trap, save_pmu;
 
        WARN_ON_ONCE(vcpu->arch.ceded);
 
-       dec = mfspr(SPRN_DEC);
        tb = mftb();
-       if (dec < 0)
+       next_timer = timer_get_next_tb();
+       if (tb >= next_timer)
                return BOOK3S_INTERRUPT_HV_DECREMENTER;
-       local_paca->kvm_hstate.dec_expires = dec + tb;
-       if (local_paca->kvm_hstate.dec_expires < time_limit)
-               time_limit = local_paca->kvm_hstate.dec_expires;
+       if (next_timer < time_limit)
+               time_limit = next_timer;
 
        save_p9_host_os_sprs(&host_os_sprs);
 
        vc->entry_exit_map = 0x101;
        vc->in_guest = 0;
 
-       set_dec(local_paca->kvm_hstate.dec_expires - mftb());
+       next_timer = timer_get_next_tb();
+       set_dec(next_timer - mftb());
        /* We may have raced with new irq work */
        if (test_irq_work_pending())
                set_dec(1);