#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
 
+#define MMU_NO_CONTEXT ~0UL
+
+
 #include <asm/book3s/64/tlbflush-hash.h>
+#include <asm/book3s/64/tlbflush-radix.h>
 
 static inline void flush_tlb_range(struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end)
 {
+       if (radix_enabled())
+               return radix__flush_tlb_range(vma, start, end);
        return hash__flush_tlb_range(vma, start, end);
 }
 
 static inline void flush_tlb_kernel_range(unsigned long start,
                                          unsigned long end)
 {
+       if (radix_enabled())
+               return radix__flush_tlb_kernel_range(start, end);
        return hash__flush_tlb_kernel_range(start, end);
 }
 
 static inline void local_flush_tlb_mm(struct mm_struct *mm)
 {
+       if (radix_enabled())
+               return radix__local_flush_tlb_mm(mm);
        return hash__local_flush_tlb_mm(mm);
 }
 
 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
                                        unsigned long vmaddr)
 {
+       if (radix_enabled())
+               return radix__local_flush_tlb_page(vma, vmaddr);
        return hash__local_flush_tlb_page(vma, vmaddr);
 }
 
 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
                                         unsigned long vmaddr)
 {
+       if (radix_enabled())
+               return radix__flush_tlb_page(vma, vmaddr);
        return hash__flush_tlb_page_nohash(vma, vmaddr);
 }
 
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
+       if (radix_enabled())
+               return radix__tlb_flush(tlb);
        return hash__tlb_flush(tlb);
 }
 
 #ifdef CONFIG_SMP
 static inline void flush_tlb_mm(struct mm_struct *mm)
 {
+       if (radix_enabled())
+               return radix__flush_tlb_mm(mm);
        return hash__flush_tlb_mm(mm);
 }
 
 static inline void flush_tlb_page(struct vm_area_struct *vma,
                                  unsigned long vmaddr)
 {
+       if (radix_enabled())
+               return radix__flush_tlb_page(vma, vmaddr);
        return hash__flush_tlb_page(vma, vmaddr);
 }
 #else
 
--- /dev/null
+/*
+ * TLB flush routines for radix kernels.
+ *
+ * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/memblock.h>
+
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
+
+static inline void __tlbiel_pid(unsigned long pid, int set)
+{
+       unsigned long rb,rs,ric,prs,r;
+
+       rb = PPC_BIT(53); /* IS = 1 */
+       rb |= set << PPC_BITLSHIFT(51);
+       rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
+       prs = 1; /* process scoped */
+       r = 1;   /* raidx format */
+       ric = 2;  /* invalidate all the caches */
+
+       asm volatile("ptesync": : :"memory");
+       asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
+                    "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+                    : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+       asm volatile("ptesync": : :"memory");
+}
+
+/*
+ * We use 128 set in radix mode and 256 set in hpt mode.
+ */
+static inline void _tlbiel_pid(unsigned long pid)
+{
+       int set;
+
+       for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
+               __tlbiel_pid(pid, set);
+       }
+       return;
+}
+
+static inline void _tlbie_pid(unsigned long pid)
+{
+       unsigned long rb,rs,ric,prs,r;
+
+       rb = PPC_BIT(53); /* IS = 1 */
+       rs = pid << PPC_BITLSHIFT(31);
+       prs = 1; /* process scoped */
+       r = 1;   /* raidx format */
+       ric = 2;  /* invalidate all the caches */
+
+       asm volatile("ptesync": : :"memory");
+       asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
+                    "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+                    : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+       asm volatile("eieio; tlbsync; ptesync": : :"memory");
+}
+
+static inline void _tlbiel_va(unsigned long va, unsigned long pid,
+                             unsigned long ap)
+{
+       unsigned long rb,rs,ric,prs,r;
+
+       rb = va & ~(PPC_BITMASK(52, 63));
+       rb |= ap << PPC_BITLSHIFT(58);
+       rs = pid << PPC_BITLSHIFT(31);
+       prs = 1; /* process scoped */
+       r = 1;   /* raidx format */
+       ric = 0;  /* no cluster flush yet */
+
+       asm volatile("ptesync": : :"memory");
+       asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
+                    "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+                    : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+       asm volatile("ptesync": : :"memory");
+}
+
+static inline void _tlbie_va(unsigned long va, unsigned long pid,
+                            unsigned long ap)
+{
+       unsigned long rb,rs,ric,prs,r;
+
+       rb = va & ~(PPC_BITMASK(52, 63));
+       rb |= ap << PPC_BITLSHIFT(58);
+       rs = pid << PPC_BITLSHIFT(31);
+       prs = 1; /* process scoped */
+       r = 1;   /* raidx format */
+       ric = 0;  /* no cluster flush yet */
+
+       asm volatile("ptesync": : :"memory");
+       asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
+                    "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+                    : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+       asm volatile("eieio; tlbsync; ptesync": : :"memory");
+}
+
+/*
+ * Base TLB flushing operations:
+ *
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
+ *  - flush_tlb_kernel_range(start, end) flushes kernel pages
+ *
+ *  - local_* variants of page and mm only apply to the current
+ *    processor
+ */
+void radix__local_flush_tlb_mm(struct mm_struct *mm)
+{
+       unsigned int pid;
+
+       preempt_disable();
+       pid = mm->context.id;
+       if (pid != MMU_NO_CONTEXT)
+               _tlbiel_pid(pid);
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_mm);
+
+void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+                           unsigned long ap, int nid)
+{
+       unsigned int pid;
+
+       preempt_disable();
+       pid = mm ? mm->context.id : 0;
+       if (pid != MMU_NO_CONTEXT)
+               _tlbiel_va(vmaddr, pid, ap);
+       preempt_enable();
+}
+
+void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+       radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
+                              mmu_get_ap(mmu_virtual_psize), 0);
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_page);
+
+#ifdef CONFIG_SMP
+static int mm_is_core_local(struct mm_struct *mm)
+{
+       return cpumask_subset(mm_cpumask(mm),
+                             topology_sibling_cpumask(smp_processor_id()));
+}
+
+void radix__flush_tlb_mm(struct mm_struct *mm)
+{
+       unsigned int pid;
+
+       preempt_disable();
+       pid = mm->context.id;
+       if (unlikely(pid == MMU_NO_CONTEXT))
+               goto no_context;
+
+       if (!mm_is_core_local(mm)) {
+               int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+               if (lock_tlbie)
+                       raw_spin_lock(&native_tlbie_lock);
+               _tlbie_pid(pid);
+               if (lock_tlbie)
+                       raw_spin_unlock(&native_tlbie_lock);
+       } else
+               _tlbiel_pid(pid);
+no_context:
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__flush_tlb_mm);
+
+void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
+                      unsigned long ap, int nid)
+{
+       unsigned int pid;
+
+       preempt_disable();
+       pid = mm ? mm->context.id : 0;
+       if (unlikely(pid == MMU_NO_CONTEXT))
+               goto bail;
+       if (!mm_is_core_local(mm)) {
+               int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+               if (lock_tlbie)
+                       raw_spin_lock(&native_tlbie_lock);
+               _tlbie_va(vmaddr, pid, ap);
+               if (lock_tlbie)
+                       raw_spin_unlock(&native_tlbie_lock);
+       } else
+               _tlbiel_va(vmaddr, pid, ap);
+bail:
+       preempt_enable();
+}
+
+void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+       radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
+                        mmu_get_ap(mmu_virtual_psize), 0);
+}
+EXPORT_SYMBOL(radix__flush_tlb_page);
+
+#endif /* CONFIG_SMP */
+
+void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+       if (lock_tlbie)
+               raw_spin_lock(&native_tlbie_lock);
+       _tlbie_pid(0);
+       if (lock_tlbie)
+               raw_spin_unlock(&native_tlbie_lock);
+}
+EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
+
+/*
+ * Currently, for range flushing, we just do a full mm flush. Because
+ * we use this in code path where we don' track the page size.
+ */
+void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                    unsigned long end)
+
+{
+       struct mm_struct *mm = vma->vm_mm;
+       radix__flush_tlb_mm(mm);
+}
+EXPORT_SYMBOL(radix__flush_tlb_range);
+
+
+void radix__tlb_flush(struct mmu_gather *tlb)
+{
+       struct mm_struct *mm = tlb->mm;
+       radix__flush_tlb_mm(mm);
+}