return RESUME_GUEST;
 }
 
+static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
+{
+       if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
+               return EMULATE_FAIL;
+
+       vcpu->arch.hfscr |= HFSCR_TM;
+
+       return RESUME_GUEST;
+}
+
 static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                                 struct task_struct *tsk)
 {
                                r = kvmppc_pmu_unavailable(vcpu);
                        if (cause == FSCR_EBB_LG)
                                r = kvmppc_ebb_unavailable(vcpu);
+                       if (cause == FSCR_TM_LG)
+                               r = kvmppc_tm_unavailable(vcpu);
                }
                if (r == EMULATE_FAIL) {
                        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
        vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
 
        /*
-        * PM, EBB is demand-faulted so start with it clear.
+        * PM, EBB, TM are demand-faulted so start with it clear.
         */
-       vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB);
+       vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
 
        kvmppc_mmu_book3s_hv_init(vcpu);
 
                msr |= MSR_VEC;
        if (cpu_has_feature(CPU_FTR_VSX))
                msr |= MSR_VSX;
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+       if ((cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+                       (vcpu->arch.hfscr & HFSCR_TM))
                msr |= MSR_TM;
        msr = msr_check_and_set(msr);
 
                msr |= MSR_VEC;
        if (cpu_has_feature(CPU_FTR_VSX))
                msr |= MSR_VSX;
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+       if ((cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+                       (vcpu->arch.hfscr & HFSCR_TM))
                msr |= MSR_TM;
        msr = msr_check_and_set(msr);
 
 
                if (MSR_TM_ACTIVE(guest_msr)) {
                        kvmppc_restore_tm_hv(vcpu, guest_msr, true);
                        ret = true;
-               } else {
+               } else if (vcpu->arch.hfscr & HFSCR_TM) {
                        mtspr(SPRN_TEXASR, vcpu->arch.texasr);
                        mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
                        mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
                unsigned long guest_msr = vcpu->arch.shregs.msr;
                if (MSR_TM_ACTIVE(guest_msr)) {
                        kvmppc_save_tm_hv(vcpu, guest_msr, true);
-               } else {
+               } else if (vcpu->arch.hfscr & HFSCR_TM) {
                        vcpu->arch.texasr = mfspr(SPRN_TEXASR);
                        vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
                        vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
+
+                       if (!vcpu->arch.nested) {
+                               vcpu->arch.load_tm++; /* see load_ebb comment */
+                               if (!vcpu->arch.load_tm)
+                                       vcpu->arch.hfscr &= ~HFSCR_TM;
+                       }
                }
        }
 #endif
                msr |= MSR_VEC;
        if (cpu_has_feature(CPU_FTR_VSX))
                msr |= MSR_VSX;
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+       if ((cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+                       (vcpu->arch.hfscr & HFSCR_TM))
                msr |= MSR_TM;
        msr = msr_check_and_set(msr);
        /* Save MSR for restore. This is after hard disable, so EE is clear. */