#define arch_flush_lazy_mmu_mode()      do {} while (0)
 
+extern void hash__tlbiel_all(unsigned int action);
 
 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
                            int ssize, unsigned long flags);
 
        return mmu_psize_defs[psize].ap;
 }
 
+extern void radix__tlbiel_all(unsigned int action);
+
 extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
                                           unsigned long start, unsigned long end);
 extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
 extern void radix__flush_tlb_all(void);
 extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
                                        unsigned long address);
+
 #endif
 
 #include <asm/book3s/64/tlbflush-hash.h>
 #include <asm/book3s/64/tlbflush-radix.h>
 
+/* TLB flush actions. Used as argument to tlbiel_all() */
+enum {
+       TLB_INVAL_SCOPE_GLOBAL = 0,     /* invalidate all TLBs */
+       TLB_INVAL_SCOPE_LPID = 1,       /* invalidate TLBs for current LPID */
+};
+
+static inline void tlbiel_all(void)
+{
+       /*
+        * This is used for host machine check and bootup.
+        *
+        * This uses early_radix_enabled and implementations use
+        * early_cpu_has_feature etc because that works early in boot
+        * and this is the machine check path which is not performance
+        * critical.
+        */
+       if (early_radix_enabled())
+               radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
+       else
+               hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
+}
+
+static inline void tlbiel_all_lpid(bool radix)
+{
+       /*
+        * This is used for guest machine check.
+        */
+       if (radix)
+               radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
+       else
+               hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
+}
+
+
 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
                                       unsigned long start, unsigned long end)
 
         * called in real mode to handle SLB and TLB errors.
         */
        long            (*machine_check_early)(struct pt_regs *regs);
-
-       /*
-        * Processor specific routine to flush tlbs.
-        */
-       void            (*flush_tlb)(unsigned int action);
-
 };
 
 extern struct cpu_spec         *cur_cpu_spec;
 static inline void cpu_feature_keys_init(void) { }
 #endif
 
-/* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
-enum {
-       TLB_INVAL_SCOPE_GLOBAL = 0,     /* invalidate all TLBs */
-       TLB_INVAL_SCOPE_LPID = 1,       /* invalidate TLBs for current LPID */
-};
-
 #endif /* __ASSEMBLY__ */
 
 /* CPU kernel features */
 
        mfspr   r3,SPRN_LPCR
        li      r4,(LPCR_LPES1 >> LPCR_LPES_SH)
        bl      __init_LPCR_ISA206
-       bl      __init_tlb_power7
        mtlr    r11
        blr
 
        mfspr   r3,SPRN_LPCR
        li      r4,(LPCR_LPES1 >> LPCR_LPES_SH)
        bl      __init_LPCR_ISA206
-       bl      __init_tlb_power7
        mtlr    r11
        blr
 
        li      r4,0 /* LPES = 0 */
        bl      __init_LPCR_ISA206
        bl      __init_HFSCR
-       bl      __init_tlb_power8
        bl      __init_PMU_HV
        bl      __init_PMU_HV_ISA207
        mtlr    r11
        li      r4,0 /* LPES = 0 */
        bl      __init_LPCR_ISA206
        bl      __init_HFSCR
-       bl      __init_tlb_power8
        bl      __init_PMU_HV
        bl      __init_PMU_HV_ISA207
        mtlr    r11
        li      r4,0 /* LPES = 0 */
        bl      __init_LPCR_ISA300
        bl      __init_HFSCR
-       bl      __init_tlb_power9
        bl      __init_PMU_HV
        mtlr    r11
        blr
        li      r4,0 /* LPES = 0 */
        bl      __init_LPCR_ISA300
        bl      __init_HFSCR
-       bl      __init_tlb_power9
        bl      __init_PMU_HV
        mtlr    r11
        blr
        mtspr   SPRN_HFSCR,r3
        blr
 
-/*
- * Clear the TLB using the specified IS form of tlbiel instruction
- * (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
- */
-__init_tlb_power7:
-       li      r6,POWER7_TLB_SETS
-       mtctr   r6
-       li      r7,0xc00        /* IS field = 0b11 */
-       ptesync
-2:     tlbiel  r7
-       addi    r7,r7,0x1000
-       bdnz    2b
-       ptesync
-1:     blr
-
-__init_tlb_power8:
-       li      r6,POWER8_TLB_SETS
-       mtctr   r6
-       li      r7,0xc00        /* IS field = 0b11 */
-       ptesync
-2:     tlbiel  r7
-       addi    r7,r7,0x1000
-       bdnz    2b
-       ptesync
-1:     blr
-
-/*
- * Flush the TLB in hash mode. Hash must flush with RIC=2 once for process
- * and one for partition scope to clear process and partition table entries.
- */
-__init_tlb_power9:
-       li      r6,POWER9_TLB_SETS_HASH - 1
-       mtctr   r6
-       li      r7,0xc00        /* IS field = 0b11 */
-       li      r8,0
-       ptesync
-       PPC_TLBIEL(7, 8, 2, 1, 0)
-       PPC_TLBIEL(7, 8, 2, 0, 0)
-2:     addi    r7,r7,0x1000
-       PPC_TLBIEL(7, 8, 0, 0, 0)
-       bdnz    2b
-       ptesync
-1:     blr
-
 __init_PMU_HV:
        li      r5,0
        mtspr   SPRN_MMCRC,r5
 
 extern void __restore_cpu_power8(void);
 extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec);
 extern void __restore_cpu_power9(void);
-extern void __flush_tlb_power7(unsigned int action);
-extern void __flush_tlb_power8(unsigned int action);
-extern void __flush_tlb_power9(unsigned int action);
 extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
 extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
 extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
                .oprofile_cpu_type      = "ppc64/ibm-compat-v1",
                .cpu_setup              = __setup_cpu_power7,
                .cpu_restore            = __restore_cpu_power7,
-               .flush_tlb              = __flush_tlb_power7,
                .machine_check_early    = __machine_check_early_realmode_p7,
                .platform               = "power7",
        },
                .oprofile_cpu_type      = "ppc64/ibm-compat-v1",
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
-               .flush_tlb              = __flush_tlb_power8,
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
                .oprofile_cpu_type      = "ppc64/ibm-compat-v1",
                .cpu_setup              = __setup_cpu_power9,
                .cpu_restore            = __restore_cpu_power9,
-               .flush_tlb              = __flush_tlb_power9,
                .platform               = "power9",
        },
        {       /* Power7 */
                .oprofile_type          = PPC_OPROFILE_POWER4,
                .cpu_setup              = __setup_cpu_power7,
                .cpu_restore            = __restore_cpu_power7,
-               .flush_tlb              = __flush_tlb_power7,
                .machine_check_early    = __machine_check_early_realmode_p7,
                .platform               = "power7",
        },
                .oprofile_type          = PPC_OPROFILE_POWER4,
                .cpu_setup              = __setup_cpu_power7,
                .cpu_restore            = __restore_cpu_power7,
-               .flush_tlb              = __flush_tlb_power7,
                .machine_check_early    = __machine_check_early_realmode_p7,
                .platform               = "power7+",
        },
                .oprofile_type          = PPC_OPROFILE_INVALID,
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
-               .flush_tlb              = __flush_tlb_power8,
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
                .oprofile_type          = PPC_OPROFILE_INVALID,
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
-               .flush_tlb              = __flush_tlb_power8,
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
                .oprofile_type          = PPC_OPROFILE_INVALID,
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
-               .flush_tlb              = __flush_tlb_power8,
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
                .oprofile_type          = PPC_OPROFILE_INVALID,
                .cpu_setup              = __setup_cpu_power8,
                .cpu_restore            = __restore_cpu_power8,
-               .flush_tlb              = __flush_tlb_power8,
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
                .oprofile_type          = PPC_OPROFILE_INVALID,
                .cpu_setup              = __setup_cpu_power9,
                .cpu_restore            = __restore_cpu_power9,
-               .flush_tlb              = __flush_tlb_power9,
                .machine_check_early    = __machine_check_early_realmode_p9,
                .platform               = "power9",
        },
                .oprofile_type          = PPC_OPROFILE_INVALID,
                .cpu_setup              = __setup_cpu_power9,
                .cpu_restore            = __restore_cpu_power9,
-               .flush_tlb              = __flush_tlb_power9,
                .machine_check_early    = __machine_check_early_realmode_p9,
                .platform               = "power9",
        },
                .oprofile_type          = PPC_OPROFILE_INVALID,
                .cpu_setup              = __setup_cpu_power9,
                .cpu_restore            = __restore_cpu_power9,
-               .flush_tlb              = __flush_tlb_power9,
                .machine_check_early    = __machine_check_early_realmode_p9,
                .platform               = "power9",
        },
 
  * Set up the base CPU
  */
 
-extern void __flush_tlb_power8(unsigned int action);
-extern void __flush_tlb_power9(unsigned int action);
 extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
 extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
 
 
 static void (*init_pmu_registers)(void);
 
-static void cpufeatures_flush_tlb(void)
-{
-       /*
-        * This is a temporary measure to keep equivalent TLB flush as the
-        * cputable based setup code.
-        */
-       switch (PVR_VER(mfspr(SPRN_PVR))) {
-       case PVR_POWER8:
-       case PVR_POWER8E:
-       case PVR_POWER8NVL:
-               __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL);
-               break;
-       case PVR_POWER9:
-               __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL);
-               break;
-       default:
-               pr_err("unknown CPU version for boot TLB flush\n");
-               break;
-       }
-}
-
 static void __restore_cpu_cpufeatures(void)
 {
        /*
 
        if (init_pmu_registers)
                init_pmu_registers();
-
-       cpufeatures_flush_tlb();
 }
 
 static char dt_cpu_name[64];
        .oprofile_type          = PPC_OPROFILE_INVALID,
        .cpu_setup              = NULL,
        .cpu_restore            = __restore_cpu_cpufeatures,
-       .flush_tlb              = NULL,
        .machine_check_early    = NULL,
        .platform               = NULL,
 };
 static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
 {
        cur_cpu_spec->platform = "power8";
-       cur_cpu_spec->flush_tlb = __flush_tlb_power8;
        cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
 
        return 1;
 static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
 {
        cur_cpu_spec->platform = "power9";
-       cur_cpu_spec->flush_tlb = __flush_tlb_power9;
        cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
 
        return 1;
        system_registers.hfscr = mfspr(SPRN_HFSCR);
        system_registers.fscr = mfspr(SPRN_FSCR);
 
-       cpufeatures_flush_tlb();
-
        pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
                cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
 }
 
        return pte_pfn(*ptep);
 }
 
-static void flush_tlb_206(unsigned int num_sets, unsigned int action)
-{
-       unsigned long rb;
-       unsigned int i;
-
-       switch (action) {
-       case TLB_INVAL_SCOPE_GLOBAL:
-               rb = TLBIEL_INVAL_SET;
-               break;
-       case TLB_INVAL_SCOPE_LPID:
-               rb = TLBIEL_INVAL_SET_LPID;
-               break;
-       default:
-               BUG();
-               break;
-       }
-
-       asm volatile("ptesync" : : : "memory");
-       for (i = 0; i < num_sets; i++) {
-               asm volatile("tlbiel %0" : : "r" (rb));
-               rb += 1 << TLBIEL_INVAL_SET_SHIFT;
-       }
-       asm volatile("ptesync" : : : "memory");
-}
-
-static void flush_tlb_300(unsigned int num_sets, unsigned int action)
-{
-       unsigned long rb;
-       unsigned int i;
-       unsigned int r;
-
-       switch (action) {
-       case TLB_INVAL_SCOPE_GLOBAL:
-               rb = TLBIEL_INVAL_SET;
-               break;
-       case TLB_INVAL_SCOPE_LPID:
-               rb = TLBIEL_INVAL_SET_LPID;
-               break;
-       default:
-               BUG();
-               break;
-       }
-
-       asm volatile("ptesync" : : : "memory");
-
-       if (early_radix_enabled())
-               r = 1;
-       else
-               r = 0;
-
-       /*
-        * First flush table/PWC caches with set 0, then flush the
-        * rest of the sets, partition scope. Radix must then do it
-        * all again with process scope. Hash just has to flush
-        * process table.
-        */
-       asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
-                       "r"(rb), "r"(0), "i"(2), "i"(0), "r"(r));
-       for (i = 1; i < num_sets; i++) {
-               unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
-
-               asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
-                               "r"(rb+set), "r"(0), "i"(2), "i"(0), "r"(r));
-       }
-
-       asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
-                       "r"(rb), "r"(0), "i"(2), "i"(1), "r"(r));
-       if (early_radix_enabled()) {
-               for (i = 1; i < num_sets; i++) {
-                       unsigned long set = i * (1<<TLBIEL_INVAL_SET_SHIFT);
-
-                       asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) : :
-                               "r"(rb+set), "r"(0), "i"(2), "i"(1), "r"(r));
-               }
-       }
-
-       asm volatile("ptesync" : : : "memory");
-}
-
-/*
- * Generic routines to flush TLB on POWER processors. These routines
- * are used as flush_tlb hook in the cpu_spec.
- *
- * action => TLB_INVAL_SCOPE_GLOBAL:  Invalidate all TLBs.
- *          TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
- */
-void __flush_tlb_power7(unsigned int action)
-{
-       flush_tlb_206(POWER7_TLB_SETS, action);
-}
-
-void __flush_tlb_power8(unsigned int action)
-{
-       flush_tlb_206(POWER8_TLB_SETS, action);
-}
-
-void __flush_tlb_power9(unsigned int action)
-{
-       unsigned int num_sets;
-
-       if (early_radix_enabled())
-               num_sets = POWER9_TLB_SETS_RADIX;
-       else
-               num_sets = POWER9_TLB_SETS_HASH;
-
-       flush_tlb_300(num_sets, action);
-}
-
-
 /* flush SLBs and reload */
 #ifdef CONFIG_PPC_BOOK3S_64
 static void flush_and_reload_slb(void)
                return 1;
        }
        if (what == MCE_FLUSH_TLB) {
-               if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
-                       cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
-                       return 1;
-               }
+               tlbiel_all();
+               return 1;
        }
 
        return 0;
 
                                   DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
                }
                if (dsisr & DSISR_MC_TLB_MULTI) {
-                       if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
-                               cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
+                       tlbiel_all_lpid(vcpu->kvm->arch.radix);
                        dsisr &= ~DSISR_MC_TLB_MULTI;
                }
                /* Any other errors we don't understand? */
                reload_slb(vcpu);
                break;
        case SRR1_MC_IFETCH_TLBMULTI:
-               if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
-                       cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
+               tlbiel_all_lpid(vcpu->kvm->arch.radix);
                break;
        default:
                handled = 0;
 
 
 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
+static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
+{
+       unsigned long rb;
+
+       rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
+
+       asm volatile("tlbiel %0" : : "r" (rb));
+}
+
+/*
+ * tlbiel instruction for hash, set invalidation
+ * i.e., r=1 and is=01 or is=10 or is=11
+ */
+static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
+                                       unsigned int pid,
+                                       unsigned int ric, unsigned int prs)
+{
+       unsigned long rb;
+       unsigned long rs;
+       unsigned int r = 0; /* hash format */
+
+       rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
+       rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
+
+       asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
+                    : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
+                    : "memory");
+}
+
+
+static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
+{
+       unsigned int set;
+
+       asm volatile("ptesync": : :"memory");
+
+       for (set = 0; set < num_sets; set++)
+               tlbiel_hash_set_isa206(set, is);
+
+       asm volatile("ptesync": : :"memory");
+}
+
+static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
+{
+       unsigned int set;
+
+       asm volatile("ptesync": : :"memory");
+
+       /*
+        * Flush the first set of the TLB, and any caching of partition table
+        * entries. Then flush the remaining sets of the TLB. Hash mode uses
+        * partition scoped TLB translations.
+        */
+       tlbiel_hash_set_isa300(0, is, 0, 2, 0);
+       for (set = 1; set < num_sets; set++)
+               tlbiel_hash_set_isa300(set, is, 0, 0, 0);
+
+       /*
+        * Now invalidate the process table cache.
+        *
+        * From ISA v3.0B p. 1078:
+        *     The following forms are invalid.
+        *      * PRS=1, R=0, and RIC!=2 (The only process-scoped
+        *        HPT caching is of the Process Table.)
+        */
+       tlbiel_hash_set_isa300(0, is, 0, 2, 1);
+
+       asm volatile("ptesync": : :"memory");
+}
+
+void hash__tlbiel_all(unsigned int action)
+{
+       unsigned int is;
+
+       switch (action) {
+       case TLB_INVAL_SCOPE_GLOBAL:
+               is = 3;
+               break;
+       case TLB_INVAL_SCOPE_LPID:
+               is = 2;
+               break;
+       default:
+               BUG();
+       }
+
+       if (early_cpu_has_feature(CPU_FTR_ARCH_300))
+               tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
+       else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
+               tlbiel_all_isa206(POWER8_TLB_SETS, is);
+       else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
+               tlbiel_all_isa206(POWER7_TLB_SETS, is);
+       else
+               WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
+
+       asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+}
+
 static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
                                                int apsize, int ssize)
 {
 
        pr_info("Initializing hash mmu with SLB\n");
        /* Initialize SLB management */
        slb_initialize();
+
+       if (cpu_has_feature(CPU_FTR_ARCH_206)
+                       && cpu_has_feature(CPU_FTR_HVMODE))
+               tlbiel_all();
 }
 
 #ifdef CONFIG_SMP
        }
        /* Initialize SLB */
        slb_initialize();
+
+       if (cpu_has_feature(CPU_FTR_ARCH_206)
+                       && cpu_has_feature(CPU_FTR_HVMODE))
+               tlbiel_all();
 }
 #endif /* CONFIG_SMP */
 
 
 
        radix_init_iamr();
        radix_init_pgtable();
+
+       if (cpu_has_feature(CPU_FTR_HVMODE))
+               tlbiel_all();
 }
 
 void radix__early_init_mmu_secondary(void)
                radix_init_amor();
        }
        radix_init_iamr();
+
+       if (cpu_has_feature(CPU_FTR_HVMODE))
+               tlbiel_all();
 }
 
 void radix__mmu_cleanup_all(void)
 
 #define RIC_FLUSH_PWC 1
 #define RIC_FLUSH_ALL 2
 
+/*
+ * tlbiel instruction for radix, set invalidation
+ * i.e., r=1 and is=01 or is=10 or is=11
+ */
+static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
+                                       unsigned int pid,
+                                       unsigned int ric, unsigned int prs)
+{
+       unsigned long rb;
+       unsigned long rs;
+       unsigned int r = 1; /* radix format */
+
+       rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
+       rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
+
+       asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
+                    : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
+                    : "memory");
+}
+
+static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
+{
+       unsigned int set;
+
+       asm volatile("ptesync": : :"memory");
+
+       /*
+        * Flush the first set of the TLB, and the entire Page Walk Cache
+        * and partition table entries. Then flush the remaining sets of the
+        * TLB.
+        */
+       tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
+       for (set = 1; set < num_sets; set++)
+               tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
+
+       /* Do the same for process scoped entries. */
+       tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
+       for (set = 1; set < num_sets; set++)
+               tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
+
+       asm volatile("ptesync": : :"memory");
+}
+
+void radix__tlbiel_all(unsigned int action)
+{
+       unsigned int is;
+
+       switch (action) {
+       case TLB_INVAL_SCOPE_GLOBAL:
+               is = 3;
+               break;
+       case TLB_INVAL_SCOPE_LPID:
+               is = 2;
+               break;
+       default:
+               BUG();
+       }
+
+       if (early_cpu_has_feature(CPU_FTR_ARCH_300))
+               tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
+       else
+               WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
+
+       asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+}
+
 static inline void __tlbiel_pid(unsigned long pid, int set,
                                unsigned long ric)
 {