#include <asm/udbg.h>
 #include <asm/code-patching.h>
 
+#include "internal.h"
+
+
 enum slb_index {
        LINEAR_INDEX    = 0, /* Kernel linear map  (0xc000000000000000) */
        KSTACK_INDEX    = 1, /* Kernel stack map */
        return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
 }
 
+bool stress_slb_enabled __initdata;
+
+static int __init parse_stress_slb(char *p)
+{
+       stress_slb_enabled = true;
+       return 0;
+}
+early_param("stress_slb", parse_stress_slb);
+
+__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key);
+
 static void assert_slb_presence(bool present, unsigned long ea)
 {
 #ifdef CONFIG_DEBUG_VM
         * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
         * ignores all other bits from 0-27, so just clear them all.
         */
-       ea &= ~((1UL << 28) - 1);
+       ea &= ~((1UL << SID_SHIFT) - 1);
        asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
 
        WARN_ON(present == (tmp == 0));
        asm volatile("slbmte %0,%0; slbia" : : "r" (0));
 }
 
+static __always_inline void __slb_flush_and_restore_bolted(bool preserve_kernel_lookaside)
+{
+       struct slb_shadow *p = get_slb_shadow();
+       unsigned long ksp_esid_data, ksp_vsid_data;
+       u32 ih;
+
+       /*
+        * SLBIA IH=1 on ISA v2.05 and newer processors may preserve lookaside
+        * information created with Class=0 entries, which we use for kernel
+        * SLB entries (the SLB entries themselves are still invalidated).
+        *
+        * Older processors will ignore this optimisation. Over-invalidation
+        * is fine because we never rely on lookaside information existing.
+        */
+       if (preserve_kernel_lookaside)
+               ih = 1;
+       else
+               ih = 0;
+
+       ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
+       ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
+
+       asm volatile(PPC_SLBIA(%0)"     \n"
+                    "slbmte    %1, %2  \n"
+                    :: "i" (ih),
+                       "r" (ksp_vsid_data),
+                       "r" (ksp_esid_data)
+                    : "memory");
+}
+
 /*
  * This flushes non-bolted entries, it can be run in virtual mode. Must
  * be called with interrupts disabled.
  */
 void slb_flush_and_restore_bolted(void)
 {
-       struct slb_shadow *p = get_slb_shadow();
-
        BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
 
        WARN_ON(!irqs_disabled());
         */
        hard_irq_disable();
 
-       asm volatile("isync\n"
-                    "slbia\n"
-                    "slbmte  %0, %1\n"
-                    "isync\n"
-                    :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
-                       "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
-                    : "memory");
+       isync();
+       __slb_flush_and_restore_bolted(false);
+       isync();
+
        assert_slb_presence(true, get_paca()->kstack);
 
        get_paca()->slb_cache_ptr = 0;
        local_irq_enable();
 }
 
+static void slb_cache_slbie_kernel(unsigned int index)
+{
+       unsigned long slbie_data = get_paca()->slb_cache[index];
+       unsigned long ksp = get_paca()->kstack;
+
+       slbie_data <<= SID_SHIFT;
+       slbie_data |= 0xc000000000000000ULL;
+       if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data)
+               return;
+       slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT;
+
+       asm volatile("slbie %0" : : "r" (slbie_data));
+}
+
+static void slb_cache_slbie_user(unsigned int index)
+{
+       unsigned long slbie_data = get_paca()->slb_cache[index];
+
+       slbie_data <<= SID_SHIFT;
+       slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT;
+       slbie_data |= SLBIE_C; /* user slbs have C=1 */
+
+       asm volatile("slbie %0" : : "r" (slbie_data));
+}
 
 /* Flush all user entries from the segment table of the current processor. */
 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
         * which would update the slb_cache/slb_cache_ptr fields in the PACA.
         */
        hard_irq_disable();
-       asm volatile("isync" : : : "memory");
-       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+       isync();
+       if (stress_slb()) {
+               __slb_flush_and_restore_bolted(false);
+               isync();
+               get_paca()->slb_cache_ptr = 0;
+               get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
+
+       } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
                /*
                 * SLBIA IH=3 invalidates all Class=1 SLBEs and their
                 * associated lookaside structures, which matches what
                 * cache.
                 */
                asm volatile(PPC_SLBIA(3));
+
        } else {
                unsigned long offset = get_paca()->slb_cache_ptr;
 
                if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
                    offset <= SLB_CACHE_ENTRIES) {
-                       unsigned long slbie_data = 0;
-
-                       for (i = 0; i < offset; i++) {
-                               unsigned long ea;
-
-                               ea = (unsigned long)
-                                       get_paca()->slb_cache[i] << SID_SHIFT;
-                               /*
-                                * Could assert_slb_presence(true) here, but
-                                * hypervisor or machine check could have come
-                                * in and removed the entry at this point.
-                                */
-
-                               slbie_data = ea;
-                               slbie_data |= user_segment_size(slbie_data)
-                                               << SLBIE_SSIZE_SHIFT;
-                               slbie_data |= SLBIE_C; /* user slbs have C=1 */
-                               asm volatile("slbie %0" : : "r" (slbie_data));
-                       }
+                       /*
+                        * Could assert_slb_presence(true) here, but
+                        * hypervisor or machine check could have come
+                        * in and removed the entry at this point.
+                        */
+
+                       for (i = 0; i < offset; i++)
+                               slb_cache_slbie_user(i);
 
                        /* Workaround POWER5 < DD2.1 issue */
                        if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
-                               asm volatile("slbie %0" : : "r" (slbie_data));
+                               slb_cache_slbie_user(0);
 
                } else {
-                       struct slb_shadow *p = get_slb_shadow();
-                       unsigned long ksp_esid_data =
-                               be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
-                       unsigned long ksp_vsid_data =
-                               be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
-
-                       asm volatile(PPC_SLBIA(1) "\n"
-                                    "slbmte    %0,%1\n"
-                                    "isync"
-                                    :: "r"(ksp_vsid_data),
-                                       "r"(ksp_esid_data));
+                       /* Flush but retain kernel lookaside information */
+                       __slb_flush_and_restore_bolted(true);
+                       isync();
 
                        get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
                }
         * address accesses by the kernel (user mode won't happen until
         * rfid, which is safe).
         */
-       asm volatile("isync" : : : "memory");
+       isync();
 }
 
 void slb_set_size(u16 size)
        if (cpu_has_feature(CPU_FTR_ARCH_300))
                return; /* ISAv3.0B and later does not use slb_cache */
 
+       if (stress_slb())
+               return;
+
        /*
         * Now update slb cache entries
         */
                 * We have space in slb cache for optimized switch_slb().
                 * Top 36 bits from esid_data as per ISA
                 */
-               local_paca->slb_cache[slb_cache_index++] = esid_data >> 28;
+               local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
                local_paca->slb_cache_ptr++;
        } else {
                /*
         * accesses user memory before it returns to userspace with rfid.
         */
        assert_slb_presence(false, ea);
+       if (stress_slb()) {
+               int slb_cache_index = local_paca->slb_cache_ptr;
+
+               /*
+                * stress_slb() does not use slb cache, repurpose as a
+                * cache of inserted (non-bolted) kernel SLB entries. All
+                * non-bolted kernel entries are flushed on any user fault,
+                * or if there are already 3 non-boled kernel entries.
+                */
+               BUILD_BUG_ON(SLB_CACHE_ENTRIES < 3);
+               if (!kernel || slb_cache_index == 3) {
+                       int i;
+
+                       for (i = 0; i < slb_cache_index; i++)
+                               slb_cache_slbie_kernel(i);
+                       slb_cache_index = 0;
+               }
+
+               if (kernel)
+                       local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
+               local_paca->slb_cache_ptr = slb_cache_index;
+       }
        asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
 
        barrier();