__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
 }
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
-                                     const char *hyp_vecs_start,
-                                     const char *hyp_vecs_end)
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+                                   const char *hyp_vecs_start,
+                                   const char *hyp_vecs_end)
 {
        static DEFINE_RAW_SPINLOCK(bp_lock);
        int cpu, slot = -1;
 #define __smccc_workaround_1_smc_start         NULL
 #define __smccc_workaround_1_smc_end           NULL
 
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
                                      const char *hyp_vecs_start,
                                      const char *hyp_vecs_end)
 {
 }
 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
 
-static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
-                                    bp_hardening_cb_t fn,
-                                    const char *hyp_vecs_start,
-                                    const char *hyp_vecs_end)
-{
-       u64 pfr0;
-
-       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-               return;
-
-       pfr0 = read_cpuid(ID_AA64PFR0_EL1);
-       if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
-               return;
-
-       __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
-}
-
 #include <uapi/linux/psci.h>
 #include <linux/arm-smccc.h>
 #include <linux/psci.h>
 }
 early_param("nospectre_v2", parse_nospectre_v2);
 
-static void
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+/*
+ * -1: No workaround
+ *  0: No workaround required
+ *  1: Workaround installed
+ */
+static int detect_harden_bp_fw(void)
 {
        bp_hardening_cb_t cb;
        void *smccc_start, *smccc_end;
        struct arm_smccc_res res;
        u32 midr = read_cpuid_id();
 
-       if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-               return;
-
-       if (__nospectre_v2) {
-               pr_info_once("spectrev2 mitigation disabled by command line option\n");
-               return;
-       }
-
        if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
-               return;
+               return -1;
 
        switch (psci_ops.conduit) {
        case PSCI_CONDUIT_HVC:
                arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
                if ((int)res.a0 < 0)
-                       return;
+                       return -1;
                cb = call_hvc_arch_workaround_1;
                /* This is a guest, no need to patch KVM vectors */
                smccc_start = NULL;
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
                                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
                if ((int)res.a0 < 0)
-                       return;
+                       return -1;
                cb = call_smc_arch_workaround_1;
                smccc_start = __smccc_workaround_1_smc_start;
                smccc_end = __smccc_workaround_1_smc_end;
                break;
 
        default:
-               return;
+               return -1;
        }
 
        if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
            ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
                cb = qcom_link_stack_sanitization;
 
-       install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+       install_bp_hardening_cb(cb, smccc_start, smccc_end);
 
-       return;
+       return 1;
 }
 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
        CAP_MIDR_RANGE_LIST(midr_list)
 
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-
 /*
- * List of CPUs where we need to issue a psci call to
- * harden the branch predictor.
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
  */
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
-       MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
-       MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
-       MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
-       {},
+static const struct midr_range spectre_v2_safe_list[] = {
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+       MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       { /* sentinel */ }
 };
 
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       int need_wa;
+
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+       /* If the CPU has CSV2 set, we're safe */
+       if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
+                                                ID_AA64PFR0_CSV2_SHIFT))
+               return false;
+
+       /* Alternatively, we have a list of unaffected CPUs */
+       if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+               return false;
+
+       /* Fallback to firmware detection */
+       need_wa = detect_harden_bp_fw();
+       if (!need_wa)
+               return false;
+
+       /* forced off */
+       if (__nospectre_v2) {
+               pr_info_once("spectrev2 mitigation disabled by command line option\n");
+               return false;
+       }
+
+       if (need_wa < 0)
+               pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
+
+       return (need_wa > 0);
+}
 #endif
 
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
        {
                .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
-               .cpu_enable = enable_smccc_arch_workaround_1,
-               ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = check_branch_predictor,
        },
 #endif
 #ifdef CONFIG_HARDEN_EL2_VECTORS