{
        struct kvm_vcpu *cur_vcpu;
        unsigned int vcpu_idx;
-       u64 host_tod, gtod;
-       int r;
+       u64 gtod;
 
        if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
                return -EFAULT;
 
-       r = store_tod_clock(&host_tod);
-       if (r)
-               return r;
-
        mutex_lock(&kvm->lock);
        preempt_disable();
-       kvm->arch.epoch = gtod - host_tod;
+       kvm->arch.epoch = gtod - get_tod_clock();
        kvm_s390_vcpu_block_all(kvm);
        kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
                cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
 
 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       u64 host_tod, gtod;
-       int r;
-
-       r = store_tod_clock(&host_tod);
-       if (r)
-               return r;
+       u64 gtod;
 
        preempt_disable();
-       gtod = host_tod + kvm->arch.epoch;
+       gtod = get_tod_clock() + kvm->arch.epoch;
        preempt_enable();
        if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
                return -EFAULT;
 
 static int handle_set_clock(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu *cpup;
-       s64 hostclk, val;
+       s64 val;
        int i, rc;
        ar_t ar;
        u64 op2;
        if (rc)
                return kvm_s390_inject_prog_cond(vcpu, rc);
 
-       if (store_tod_clock(&hostclk)) {
-               kvm_s390_set_psw_cc(vcpu, 3);
-               return 0;
-       }
        VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
-       val = (val - hostclk) & ~0x3fUL;
 
        mutex_lock(&vcpu->kvm->lock);
        preempt_disable();
+       val = (val - get_tod_clock()) & ~0x3fUL;
        kvm_for_each_vcpu(i, cpup, vcpu->kvm)
                cpup->arch.sie_block->epoch = val;
        preempt_enable();