if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                return;
 
+       mutex_lock(&kvm->arch.hyperv.hv_lock);
+       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+               goto out_unlock;
+
        gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
        /*
         * Because the TSC parameters only vary when there is a
         */
        if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
                                    &tsc_seq, sizeof(tsc_seq))))
-               return;
+               goto out_unlock;
 
        /*
         * While we're computing and writing the parameters, force the
        hv->tsc_ref.tsc_sequence = 0;
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
                            &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               return;
+               goto out_unlock;
 
        if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
-               return;
+               goto out_unlock;
 
        /* Ensure sequence is zero before writing the rest of the struct.  */
        smp_wmb();
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
-               return;
+               goto out_unlock;
 
        /*
         * Now switch to the TSC page mechanism by writing the sequence.
        hv->tsc_ref.tsc_sequence = tsc_seq;
        kvm_write_guest(kvm, gfn_to_gpa(gfn),
                        &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+out_unlock:
+       mutex_unlock(&kvm->arch.hyperv.hv_lock);
 }
 
 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
                r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
                return r;
        } else
                return kvm_hv_set_msr(vcpu, msr, data, host);
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
                r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
                return r;
        } else
                return kvm_hv_get_msr(vcpu, msr, pdata);
 
 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
 {
-       return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+       return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
 }
 
 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)