kvm_hv_invalidate_tsc_page(kvm);
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
        kvm_make_mclock_inprogress_request(kvm);
+
        /* no guest entries from this point */
+       spin_lock(&ka->pvclock_gtod_sync_lock);
        pvclock_update_vm_gtod_copy(kvm);
+       spin_unlock(&ka->pvclock_gtod_sync_lock);
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
        /* guest entries allowed */
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
 #endif
 }
 
                struct kvm_arch *ka = &kvm->arch;
 
                spin_lock(&ka->pvclock_gtod_sync_lock);
-
                pvclock_update_vm_gtod_copy(kvm);
+               spin_unlock(&ka->pvclock_gtod_sync_lock);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
        }
        mutex_unlock(&kvm_lock);
 }