* primary lowlevel boot path:
         *
         *  Register   Scope                      Purpose
+        *  x19        primary_entry() .. start_kernel()        whether we entered with the MMU on
         *  x20        primary_entry() .. __primary_switch()    CPU boot mode
         *  x21        primary_entry() .. start_kernel()        FDT pointer passed at boot in x0
         *  x22        create_idmap() .. start_kernel()         ID map VA of the DT blob
         *  x28        create_idmap()                           callee preserved temp register
         */
 SYM_CODE_START(primary_entry)
+       bl      record_mmu_state
        bl      preserve_boot_args
        bl      init_kernel_el                  // w0=cpu_boot_mode
        mov     x20, x0
        b       __primary_switch
 SYM_CODE_END(primary_entry)
 
+SYM_CODE_START_LOCAL(record_mmu_state)
+       mrs     x19, CurrentEL
+       cmp     x19, #CurrentEL_EL2
+       mrs     x19, sctlr_el1
+       b.ne    0f
+       mrs     x19, sctlr_el2
+0:     tst     x19, #SCTLR_ELx_C               // Z := (C == 0)
+       and     x19, x19, #SCTLR_ELx_M          // isolate M bit
+       csel    x19, xzr, x19, eq               // clear x19 if Z
+       ret
+SYM_CODE_END(record_mmu_state)
+
 /*
  * Preserve the arguments passed by the bootloader in x0 .. x3
  */
        stp     x21, x1, [x0]                   // x0 .. x3 at kernel entry
        stp     x2, x3, [x0, #16]
 
+       cbnz    x19, 0f                         // skip cache invalidation if MMU is on
        dmb     sy                              // needed before dc ivac with
                                                // MMU off
 
        add     x1, x0, #0x20                   // 4 x 8 bytes
        b       dcache_inval_poc                // tail call
+0:     str_l   x19, mmu_enabled_at_boot, x0
+       ret
 SYM_CODE_END(preserve_boot_args)
 
 SYM_FUNC_START_LOCAL(clear_page_tables)
 
 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
        mov_q   x0, INIT_SCTLR_EL1_MMU_OFF
+       pre_disable_mmu_workaround
        msr     sctlr_el1, x0
        isb
        mov_q   x0, INIT_PSTATE_EL1
        cbz     x0, 1f
 
        /* Set a sane SCTLR_EL1, the VHE way */
+       pre_disable_mmu_workaround
        msr_s   SYS_SCTLR_EL12, x1
        mov     x2, #BOOT_CPU_FLAG_E2H
        b       2f
 
 1:
+       pre_disable_mmu_workaround
        msr     sctlr_el1, x1
        mov     x2, xzr
 2:
 
 static struct resource *standard_resources;
 
 phys_addr_t __fdt_pointer __initdata;
+u64 mmu_enabled_at_boot __initdata;
 
 /*
  * Standard memory resources
        xen_early_init();
        efi_init();
 
-       if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
-            pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+       if (!efi_enabled(EFI_BOOT)) {
+               if ((u64)_text % MIN_KIMG_ALIGN)
+                       pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+               WARN_TAINT(mmu_enabled_at_boot, TAINT_FIRMWARE_WORKAROUND,
+                          FW_BUG "Booted with MMU enabled!");
+       }
 
        arm64_memblock_init();
 
        return 0;
 }
 device_initcall(register_arm64_panic_block);
+
+static int __init check_mmu_enabled_at_boot(void)
+{
+       if (!efi_enabled(EFI_BOOT) && mmu_enabled_at_boot)
+               panic("Non-EFI boot detected with MMU and caches enabled");
+       return 0;
+}
+device_initcall_sync(check_mmu_enabled_at_boot);