HV_TSC_PAGE_UNSET = 0,
        /* TSC page MSR was written by the guest, update pending */
        HV_TSC_PAGE_GUEST_CHANGED,
-       /* TSC page MSR was written by KVM userspace, update pending */
+       /* TSC page update was triggered from the host side */
        HV_TSC_PAGE_HOST_CHANGED,
        /* TSC page was properly set up and is currently active  */
        HV_TSC_PAGE_SET,
-       /* TSC page is currently being updated and therefore is inactive */
-       HV_TSC_PAGE_UPDATING,
        /* TSC page was set up with an inaccessible GPA */
        HV_TSC_PAGE_BROKEN,
 };
 
        BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
        BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
 
+       mutex_lock(&hv->hv_lock);
+
        if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+           hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
            hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
-               return;
+               goto out_unlock;
 
-       mutex_lock(&hv->hv_lock);
        if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                goto out_unlock;
 
        mutex_unlock(&hv->hv_lock);
 }
 
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+void kvm_hv_request_tsc_page_update(struct kvm *kvm)
 {
        struct kvm_hv *hv = to_kvm_hv(kvm);
-       u64 gfn;
-       int idx;
-
-       if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
-           hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
-           tsc_page_update_unsafe(hv))
-               return;
 
        mutex_lock(&hv->hv_lock);
 
-       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
-               goto out_unlock;
-
-       /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
-       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
-               hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
+           !tsc_page_update_unsafe(hv))
+               hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
 
-       gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-
-       hv->tsc_ref.tsc_sequence = 0;
-
-       /*
-        * Take the srcu lock as memslots will be accessed to check the gfn
-        * cache generation against the memslots generation.
-        */
-       idx = srcu_read_lock(&kvm->srcu);
-       if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
-                           &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
-       srcu_read_unlock(&kvm->srcu, idx);
-
-out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
 
-
 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
 {
        if (!hv_vcpu->enforce_cpuid)
 
 
 static void kvm_update_masterclock(struct kvm *kvm)
 {
-       kvm_hv_invalidate_tsc_page(kvm);
+       kvm_hv_request_tsc_page_update(kvm);
        kvm_start_pvclock_update(kvm);
        pvclock_update_vm_gtod_copy(kvm);
        kvm_end_pvclock_update(kvm);
                                       offsetof(struct compat_vcpu_info, time));
        if (vcpu->xen.vcpu_time_info_set)
                kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
-       if (!v->vcpu_idx)
-               kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
+       kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
 }
 
        if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
                return -EINVAL;
 
-       kvm_hv_invalidate_tsc_page(kvm);
+       kvm_hv_request_tsc_page_update(kvm);
        kvm_start_pvclock_update(kvm);
        pvclock_update_vm_gtod_copy(kvm);