* return 1, indicating that the tlb requires preloading.
  */
 #define HUGETLB_NEED_PRELOAD
+
+#define mmu_cleanup_all NULL
+
 #endif
 
 #endif /* !__ASSEMBLY__ */
 
  * make it match the size our of bolted TLB area
  */
 extern u64 ppc64_rma_size;
+
+/* Cleanup function used by kexec */
+extern void mmu_cleanup_all(void);
+extern void radix__mmu_cleanup_all(void);
 #endif /* CONFIG_PPC64 */
 
 struct mm_struct;
 
        const unsigned long *basep;
        const unsigned int *sizep;
 
-       if (!mmu_hash_ops.hpte_clear_all)
-               return -ENOENT;
-
        /*
         * Since we use the kernel fault handlers and paging code to
         * handle the virtual mode, we must make sure no destination
         * a toc is easier in C, so pass in what we can.
         */
        kexec_sequence(&kexec_stack, image->start, image,
-                       page_address(image->control_code_page),
-#ifdef CONFIG_PPC_STD_MMU
-                       mmu_hash_ops.hpte_clear_all
-#else
-                       NULL
-#endif
-       );
+                      page_address(image->control_code_page),
+                      mmu_cleanup_all);
        /* NOTREACHED */
 }
 
 
        return;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/* For use by kexec */
+void mmu_cleanup_all(void)
+{
+       if (radix_enabled())
+               radix__mmu_cleanup_all();
+       else if (mmu_hash_ops.hpte_clear_all)
+               mmu_hash_ops.hpte_clear_all();
+}
 
        }
 }
 
+void radix__mmu_cleanup_all(void)
+{
+       unsigned long lpcr;
+
+       if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+               lpcr = mfspr(SPRN_LPCR);
+               mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
+               mtspr(SPRN_PTCR, 0);
+               radix__flush_tlb_all();
+       }
+}
+
 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
                                phys_addr_t first_memblock_size)
 {