As the message of the commit 
09e6b306f3ba ("arm64: cpufeature: discover
CPU support for MPAM") already states, if a buggy firmware fails to
either enable MPAM or emulate the trap as if it were disabled, the
kernel will just fail to boot.  While upgrading the firmware should be
the best solution, we have some hardware of which the vendor have made
no response 2 months after we requested a firmware update.  Allow
overriding it so our devices don't become some e-waste.
Cc: James Morse <james.morse@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Cc: Mingcong Bai <jeffbai@aosc.io>
Cc: Shaopeng Tan <tan.shaopeng@fujitsu.com>
Cc: Ben Horgan <ben.horgan@arm.com>
Signed-off-by: Xi Ruoyao <xry111@xry111.site>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250602043723.216338-1-xry111@xry111.site
Signed-off-by: Will Deacon <will@kernel.org>
        arm64.nomops    [ARM64] Unconditionally disable Memory Copy and Memory
                        Set instructions support
 
+       arm64.nompam    [ARM64] Unconditionally disable Memory Partitioning And
+                       Monitoring support
+
        arm64.nomte     [ARM64] Unconditionally disable Memory Tagging Extension
                        support
 
 
 .Lskip_gcs_\@:
 .endm
 
-.macro __init_el2_mpam
-       /* Memory Partitioning And Monitoring: disable EL2 traps */
-       mrs     x1, id_aa64pfr0_el1
-       ubfx    x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4
-       cbz     x0, .Lskip_mpam_\@              // skip if no MPAM
-       msr_s   SYS_MPAM2_EL2, xzr              // use the default partition
-                                               // and disable lower traps
-       mrs_s   x0, SYS_MPAMIDR_EL1
-       tbz     x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@  // skip if no MPAMHCR reg
-       msr_s   SYS_MPAMHCR_EL2, xzr            // clear TRAP_MPAMIDR_EL1 -> EL2
-.Lskip_mpam_\@:
-.endm
-
 /**
  * Initialize EL2 registers to sane values. This should be called early on all
  * cores that were booted in EL2. Note that everything gets initialised as
        __init_el2_stage2
        __init_el2_gicv3
        __init_el2_hstr
-       __init_el2_mpam
        __init_el2_nvhe_idregs
        __init_el2_cptr
        __init_el2_fgt
 #endif
 
 .macro finalise_el2_state
+       check_override id_aa64pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT, .Linit_mpam_\@, .Lskip_mpam_\@, x1, x2
+
+.Linit_mpam_\@:
+       msr_s   SYS_MPAM2_EL2, xzr              // use the default partition
+                                               // and disable lower traps
+       mrs_s   x0, SYS_MPAMIDR_EL1
+       tbz     x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@  // skip if no MPAMHCR reg
+       msr_s   SYS_MPAMHCR_EL2, xzr            // clear TRAP_MPAMIDR_EL1 -> EL2
+
+.Lskip_mpam_\@:
        check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
 
 .Linit_sve_\@: /* SVE register access */
 
                cpacr_restore(cpacr);
        }
 
-       if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
+       if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
+               info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
                init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr);
+       }
 
        if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
                init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
                cpacr_restore(cpacr);
        }
 
-       if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) {
+       if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
+               info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
                taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
                                        info->reg_mpamidr, boot->reg_mpamidr);
        }
 
        if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
                __cpuinfo_store_cpu_32bit(&info->aarch32);
 
-       if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
-               info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
+       /*
+        * info->reg_mpamidr deferred to {init,update}_cpu_features because we
+        * don't want to read it (and trigger a trap on buggy firmware) if
+        * using an aa64pfr0_el1 override to unconditionally disable MPAM.
+        */
 
        if (IS_ENABLED(CONFIG_ARM64_SME) &&
            id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
 
        .fields         = {
                FIELD("sve", ID_AA64PFR0_EL1_SVE_SHIFT, pfr0_sve_filter),
                FIELD("el0", ID_AA64PFR0_EL1_EL0_SHIFT, NULL),
+               FIELD("mpam", ID_AA64PFR0_EL1_MPAM_SHIFT, NULL),
                {}
        },
 };
                FIELD("gcs", ID_AA64PFR1_EL1_GCS_SHIFT, NULL),
                FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL),
                FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter),
+               FIELD("mpam_frac", ID_AA64PFR1_EL1_MPAM_frac_SHIFT, NULL),
                {}
        },
 };
        { "rodata=off",                 "arm64_sw.rodataoff=1" },
        { "arm64.nolva",                "id_aa64mmfr2.varange=0" },
        { "arm64.no32bit_el0",          "id_aa64pfr0.el0=1" },
+       { "arm64.nompam",               "id_aa64pfr0.mpam=0 id_aa64pfr1.mpam_frac=0" },
 };
 
 static int __init parse_hexdigit(const char *p, u64 *v)