}
 #endif
 
-#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
-#else
-static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
-#endif
-
 extern void switch_cop(struct mm_struct *next);
 extern int use_cop(unsigned long acop, struct mm_struct *mm);
 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
 
                 * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved.
                 * Keep this in synch with kvmppc_filter_guest_lpcr_hv.
                 */
-               if (mflags != 0 && mflags != 3)
+               if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
+                               kvmhv_vcpu_is_radix(vcpu) && mflags == 3)
                        return H_UNSUPPORTED_FLAG_START;
                return H_TOO_HARD;
        default:
                lpcr &= ~LPCR_AIL;
        if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
                lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */
+       /*
+        * On some POWER9s we force AIL off for radix guests to prevent
+        * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to
+        * guest, which can result in Q0 translations with LPID=0 PID=PIDR to
+        * be cached, which the host TLB management does not expect.
+        */
+       if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+               lpcr &= ~LPCR_AIL;
 
        /*
         * On POWER9, allow userspace to enable large decrementer for the
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
        do {
-               /*
-                * The TLB prefetch bug fixup is only in the kvmppc_run_vcpu
-                * path, which also handles hash and dependent threads mode.
-                */
-               if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
-                   !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+               if (kvm->arch.threads_indep && kvm_is_radix(kvm))
                        r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
                                                  vcpu->arch.vcore->lpcr);
                else
                if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
                        pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
                        kvm->arch.threads_indep = true;
+               } else if (!indep_threads_mode && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
+                       pr_warn("KVM: Ignoring indep_threads_mode=N on pre-DD2.2 POWER9\n");
+                       kvm->arch.threads_indep = true;
                } else {
                        kvm->arch.threads_indep = indep_threads_mode;
                }
 
 
        mtspr(SPRN_AMOR, ~0UL);
 
+       if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+               __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
+
        switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
 
        /*
         */
        mtspr(SPRN_HDEC, hdec);
 
-       __mtmsrd(0, 1); /* clear RI */
+       if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+               __mtmsrd(0, 1); /* clear RI */
 
        mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
        mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
 
        radix_clear_slb();
 
-       __mtmsrd(msr, 0);
-
        accumulate_time(vcpu, &vcpu->arch.rm_exit);
 
        /* Advance host PURR/SPURR by the amount used by guest */
 
        switch_mmu_to_host_radix(kvm, host_pidr);
 
+       /*
+        * If we are in real mode, only switch MMU on after the MMU is
+        * switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
+        */
+       __mtmsrd(msr, 0);
+
        end_timing(vcpu);
 
        return trap;
 
        eieio
        tlbsync
        ptesync
-
-BEGIN_FTR_SECTION
-       /* Radix: Handle the case where the guest used an illegal PID */
-       LOAD_REG_ADDR(r4, mmu_base_pid)
-       lwz     r3, VCPU_GUEST_PID(r9)
-       lwz     r5, 0(r4)
-       cmpw    cr0,r3,r5
-       blt     2f
-
-       /*
-        * Illegal PID, the HW might have prefetched and cached in the TLB
-        * some translations for the  LPID 0 / guest PID combination which
-        * Linux doesn't know about, so we need to flush that PID out of
-        * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
-        * the right context.
-       */
-       li      r0,0
-       mtspr   SPRN_LPID,r0
-       isync
-
-       /* Then do a congruence class local flush */
-       ld      r6,VCPU_KVM(r9)
-       lwz     r0,KVM_TLB_SETS(r6)
-       mtctr   r0
-       li      r7,0x400                /* IS field = 0b01 */
-       ptesync
-       sldi    r0,r3,32                /* RS has PID */
-1:     PPC_TLBIEL(7,0,2,1,1)           /* RIC=2, PRS=1, R=1 */
-       addi    r7,r7,0x1000
-       bdnz    1b
-       ptesync
-END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
-
-2:
 #endif /* CONFIG_PPC_RADIX_MMU */
 
        /*
 
        }
 
        /* Find out how many PID bits are supported */
-       if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
-               if (!mmu_pid_bits)
-                       mmu_pid_bits = 20;
-               mmu_base_pid = 1;
-       } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
-               if (!mmu_pid_bits)
-                       mmu_pid_bits = 20;
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+       if (!cpu_has_feature(CPU_FTR_HVMODE) &&
+                       cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
                /*
-                * When KVM is possible, we only use the top half of the
-                * PID space to avoid collisions between host and guest PIDs
-                * which can cause problems due to prefetch when exiting the
-                * guest with AIL=3
+                * Older versions of KVM on these machines perfer if the
+                * guest only uses the low 19 PID bits.
                 */
-               mmu_base_pid = 1 << (mmu_pid_bits - 1);
-#else
-               mmu_base_pid = 1;
-#endif
-       } else {
-               /* The guest uses the bottom half of the PID space */
                if (!mmu_pid_bits)
                        mmu_pid_bits = 19;
-               mmu_base_pid = 1;
+       } else {
+               if (!mmu_pid_bits)
+                       mmu_pid_bits = 20;
        }
+       mmu_base_pid = 1;
 
        /*
         * Allocate Partition table and process table for the
 
                     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
        asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
-
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
-{
-       unsigned long pid = mm->context.id;
-
-       if (unlikely(pid == MMU_NO_CONTEXT))
-               return;
-
-       if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
-               return;
-
-       /*
-        * If this context hasn't run on that CPU before and KVM is
-        * around, there's a slim chance that the guest on another
-        * CPU just brought in obsolete translation into the TLB of
-        * this CPU due to a bad prefetch using the guest PID on
-        * the way into the hypervisor.
-        *
-        * We work around this here. If KVM is possible, we check if
-        * any sibling thread is in KVM. If it is, the window may exist
-        * and thus we flush that PID from the core.
-        *
-        * A potential future improvement would be to mark which PIDs
-        * have never been used on the system and avoid it if the PID
-        * is new and the process has no other cpumask bit set.
-        */
-       if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
-               int cpu = smp_processor_id();
-               int sib = cpu_first_thread_sibling(cpu);
-               bool flush = false;
-
-               for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
-                       if (sib == cpu)
-                               continue;
-                       if (!cpu_possible(sib))
-                               continue;
-                       if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
-                               flush = true;
-               }
-               if (flush)
-                       _tlbiel_pid(pid, RIC_FLUSH_ALL);
-       }
-}
-EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
-#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 
        if (cpu_has_feature(CPU_FTR_ALTIVEC))
                asm volatile ("dssall");
 
-       if (new_on_cpu)
-               radix_kvm_prefetch_workaround(next);
-       else
+       if (!new_on_cpu)
                membarrier_arch_switch_mm(prev, next, tsk);
 
        /*