void etr_switch_to_local(void);
 void etr_sync_check(void);
 
+/* notifier for syncs */
+extern struct atomic_notifier_head s390_epoch_delta_notifier;
+
 /* STP interruption parameter */
 struct stp_irq_parm {
        unsigned int _pad0      : 14;
 
 
 static DEFINE_PER_CPU(struct clock_event_device, comparators);
 
+ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
+EXPORT_SYMBOL(s390_epoch_delta_notifier);
+
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
 static int etr_sync_clock(void *data)
 {
        static int first;
-       unsigned long long clock, old_clock, delay, delta;
+       unsigned long long clock, old_clock, clock_delta, delay, delta;
        struct clock_sync_data *etr_sync;
        struct etr_aib *sync_port, *aib;
        int port;
                delay = (unsigned long long)
                        (aib->edf2.etv - sync_port->edf2.etv) << 32;
                delta = adjust_time(old_clock, clock, delay);
+               clock_delta = clock - old_clock;
+               atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0,
+                                          &clock_delta);
                etr_sync->fixup_cc = delta;
                fixup_clock_comparator(delta);
                /* Verify that the clock is properly set. */
 static int stp_sync_clock(void *data)
 {
        static int first;
-       unsigned long long old_clock, delta;
+       unsigned long long old_clock, delta, new_clock, clock_delta;
        struct clock_sync_data *stp_sync;
        int rc;
 
                old_clock = get_tod_clock();
                rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
                if (rc == 0) {
-                       delta = adjust_time(old_clock, get_tod_clock(), 0);
+                       new_clock = get_tod_clock();
+                       delta = adjust_time(old_clock, new_clock, 0);
+                       clock_delta = new_clock - old_clock;
+                       atomic_notifier_call_chain(&s390_epoch_delta_notifier,
+                                                  0, &clock_delta);
                        fixup_clock_comparator(delta);
                        rc = chsc_sstpi(stp_page, &stp_info,
                                        sizeof(struct stp_sstpi));
 
 
 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
 {
+       preempt_disable();
        if (!(vcpu->arch.sie_block->ckc <
-             get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
+             get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
+               preempt_enable();
                return 0;
+       }
+       preempt_enable();
        return ckc_interrupts_enabled(vcpu);
 }
 
                goto no_timer;
        }
 
+       preempt_disable();
        now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+       preempt_enable();
        sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 
        /* underflow */
        u64 now, sltime;
 
        vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
+       preempt_disable();
        now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+       preempt_enable();
        sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 
        /*
 
 #include <linux/vmalloc.h>
 #include <asm/asm-offsets.h>
 #include <asm/lowcore.h>
+#include <asm/etr.h>
 #include <asm/pgtable.h>
 #include <asm/nmi.h>
 #include <asm/switch_to.h>
 
 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
 
+/*
+ * This callback is executed during stop_machine(). All CPUs are therefore
+ * temporarily stopped. In order not to change guest behavior, we have to
+ * disable preemption whenever we touch the epoch of kvm and the VCPUs,
+ * so a CPU won't be stopped while calculating with the epoch.
+ */
+static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
+                         void *v)
+{
+       struct kvm *kvm;
+       struct kvm_vcpu *vcpu;
+       int i;
+       unsigned long long *delta = v;
+
+       list_for_each_entry(kvm, &vm_list, vm_list) {
+               kvm->arch.epoch -= *delta;
+               kvm_for_each_vcpu(i, vcpu, kvm) {
+                       vcpu->arch.sie_block->epoch -= *delta;
+               }
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block kvm_clock_notifier = {
+       .notifier_call = kvm_clock_sync,
+};
+
 int kvm_arch_hardware_setup(void)
 {
        gmap_notifier.notifier_call = kvm_gmap_notifier;
        gmap_register_ipte_notifier(&gmap_notifier);
+       atomic_notifier_chain_register(&s390_epoch_delta_notifier,
+                                      &kvm_clock_notifier);
        return 0;
 }
 
 void kvm_arch_hardware_unsetup(void)
 {
        gmap_unregister_ipte_notifier(&gmap_notifier);
+       atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
+                                        &kvm_clock_notifier);
 }
 
 int kvm_arch_init(void *opaque)
                return r;
 
        mutex_lock(&kvm->lock);
+       preempt_disable();
        kvm->arch.epoch = gtod - host_tod;
        kvm_s390_vcpu_block_all(kvm);
        kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
                cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
        kvm_s390_vcpu_unblock_all(kvm);
+       preempt_enable();
        mutex_unlock(&kvm->lock);
        VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
        return 0;
        if (r)
                return r;
 
+       preempt_disable();
        gtod = host_tod + kvm->arch.epoch;
+       preempt_enable();
        if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
                return -EFAULT;
        VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
        mutex_lock(&vcpu->kvm->lock);
+       preempt_disable();
        vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+       preempt_enable();
        mutex_unlock(&vcpu->kvm->lock);
        if (!kvm_is_ucontrol(vcpu->kvm))
                vcpu->arch.gmap = vcpu->kvm->arch.gmap;
 
        val = (val - hostclk) & ~0x3fUL;
 
        mutex_lock(&vcpu->kvm->lock);
+       preempt_disable();
        kvm_for_each_vcpu(i, cpup, vcpu->kvm)
                cpup->arch.sie_block->epoch = val;
+       preempt_enable();
        mutex_unlock(&vcpu->kvm->lock);
 
        kvm_s390_set_psw_cc(vcpu, 0);