u64 options;
 };
 
+/* Current state of Hyper-V TSC page clocksource */
+enum hv_tsc_page_status {
+       /* TSC page was not set up or disabled */
+       HV_TSC_PAGE_UNSET = 0,
+       /* TSC page MSR was written by the guest, update pending */
+       HV_TSC_PAGE_GUEST_CHANGED,
+       /* TSC page MSR was written by KVM userspace, update pending */
+       HV_TSC_PAGE_HOST_CHANGED,
+       /* TSC page was properly set up and is currently active  */
+       HV_TSC_PAGE_SET,
+       /* TSC page is currently being updated and therefore is inactive */
+       HV_TSC_PAGE_UPDATING,
+       /* TSC page was set up with an inaccessible GPA */
+       HV_TSC_PAGE_BROKEN,
+};
+
 /* Hyper-V emulation context */
 struct kvm_hv {
        struct mutex hv_lock;
        u64 hv_guest_os_id;
        u64 hv_hypercall;
        u64 hv_tsc_page;
+       enum hv_tsc_page_status hv_tsc_page_status;
 
        /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
        u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
 
        u64 tsc;
 
        /*
-        * The guest has not set up the TSC page or the clock isn't
-        * stable, fall back to get_kvmclock_ns.
+        * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
+        * is broken, disabled or being updated.
         */
-       if (!hv->tsc_ref.tsc_sequence)
+       if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
                return div_u64(get_kvmclock_ns(kvm), 100);
 
        vcpu = kvm_get_vcpu(kvm, 0);
        BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
        BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
 
-       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+           hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
                return;
 
        mutex_lock(&hv->hv_lock);
         */
        if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
                                    &tsc_seq, sizeof(tsc_seq))))
-               goto out_unlock;
+               goto out_err;
 
        /*
         * While we're computing and writing the parameters, force the
        hv->tsc_ref.tsc_sequence = 0;
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
                            &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               goto out_unlock;
+               goto out_err;
 
        if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
-               goto out_unlock;
+               goto out_err;
 
        /* Ensure sequence is zero before writing the rest of the struct.  */
        smp_wmb();
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
-               goto out_unlock;
+               goto out_err;
 
        /*
         * Now switch to the TSC page mechanism by writing the sequence.
        smp_wmb();
 
        hv->tsc_ref.tsc_sequence = tsc_seq;
-       kvm_write_guest(kvm, gfn_to_gpa(gfn),
-                       &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+       if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+                           &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+               goto out_err;
+
+       hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
+       goto out_unlock;
+
+out_err:
+       hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
 out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
        struct kvm_hv *hv = to_kvm_hv(kvm);
        u64 gfn;
 
-       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+           hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
                return;
 
        mutex_lock(&hv->hv_lock);
        if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                goto out_unlock;
 
+       /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
+               hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
+
        gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
 
        hv->tsc_ref.tsc_sequence = 0;
-       kvm_write_guest(kvm, gfn_to_gpa(gfn),
-                       &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+       if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+                           &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+               hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
 
 out_unlock:
        mutex_unlock(&hv->hv_lock);
        }
        case HV_X64_MSR_REFERENCE_TSC:
                hv->hv_tsc_page = data;
-               if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
+               if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
+                       if (!host)
+                               hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
+                       else
+                               hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
                        kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+               } else {
+                       hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
+               }
                break;
        case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
                return kvm_hv_msr_set_crash_data(kvm,