If unsure, say Y.
 
-config HARDEN_BRANCH_PREDICTOR
-       bool "Harden the branch predictor against aliasing attacks" if EXPERT
-       default y
-       help
-         Speculation attacks against some high-performance processors rely on
-         being able to manipulate the branch predictor for a victim context by
-         executing aliasing branches in the attacker context.  Such attacks
-         can be partially mitigated against by clearing internal branch
-         predictor state and limiting the prediction logic in some situations.
-
-         This config option will take CPU-specific actions to harden the
-         branch predictor against aliasing attacks and may rely on specific
-         instruction sequences or control bits being set by the system
-         firmware.
-
-         If unsure, say Y.
-
-config ARM64_SSBD
-       bool "Speculative Store Bypass Disable" if EXPERT
-       default y
-       help
-         This enables mitigation of the bypassing of previous stores
-         by speculative loads.
-
-         If unsure, say Y.
-
 config RODATA_FULL_DEFAULT_ENABLED
        bool "Apply r/o permissions of VM areas also to their linear aliases"
        default y
 
 
 static inline int arm64_get_ssbd_state(void)
 {
-#ifdef CONFIG_ARM64_SSBD
        extern int ssbd_state;
        return ssbd_state;
-#else
-       return ARM64_SSBD_UNKNOWN;
-#endif
 }
 
 void arm64_set_ssbd_mitigation(bool state);
 
 }
 #endif
 
-#ifdef CONFIG_ARM64_SSBD
 DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 static inline int hyp_map_aux_data(void)
        }
        return 0;
 }
-#else
-static inline int hyp_map_aux_data(void)
-{
-       return 0;
-}
-#endif
 
 #define kvm_phys_to_vttbr(addr)                phys_to_ttbr(addr)
 
 
        bp_hardening_cb_t       fn;
 };
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
        if (d->fn)
                d->fn();
 }
-#else
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
-       return NULL;
-}
-
-static inline void arm64_apply_bp_hardening(void)      { }
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
 extern void arm64_memblock_init(void);
 extern void paging_init(void);
 
                           return_address.o cpuinfo.o cpu_errata.o              \
                           cpufeature.o alternative.o cacheinfo.o               \
                           smp.o smp_spin_table.o topology.o smccc-call.o       \
-                          syscall.o
+                          ssbd.o syscall.o
 
 targets                        += efi-entry.o
 
 obj-$(CONFIG_CRASH_DUMP)               += crash_dump.o
 obj-$(CONFIG_CRASH_CORE)               += crash_core.o
 obj-$(CONFIG_ARM_SDE_INTERFACE)                += sdei.o
-obj-$(CONFIG_ARM64_SSBD)               += ssbd.o
 obj-$(CONFIG_ARM64_PTR_AUTH)           += pointer_auth.o
 obj-$(CONFIG_SHADOW_CALL_STACK)                += scs.o
 
 
            ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
                cb = qcom_link_stack_sanitization;
 
-       if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
-               install_bp_hardening_cb(cb, smccc_start, smccc_end);
-
+       install_bp_hardening_cb(cb, smccc_start, smccc_end);
        return 1;
 }
 
 {
        int conduit;
 
-       if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
-               pr_info_once("SSBD disabled by kernel configuration\n");
-               return;
-       }
-
        if (this_cpu_has_cap(ARM64_SSBS)) {
                if (state)
                        asm volatile(SET_PSTATE_SSBS(0));
 
        __spectrev2_safe = false;
 
-       if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
-               pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
-               __hardenbp_enab = false;
-               return false;
-       }
-
        /* forced off */
        if (__nospectre_v2 || cpu_mitigations_off()) {
                pr_info_once("spectrev2 mitigation disabled by command line option\n");
        switch (ssbd_state) {
        case ARM64_SSBD_KERNEL:
        case ARM64_SSBD_FORCE_ENABLE:
-               if (IS_ENABLED(CONFIG_ARM64_SSBD))
-                       return sprintf(buf,
-                           "Mitigation: Speculative Store Bypass disabled via prctl\n");
+               return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
        }
 
        return sprintf(buf, "Vulnerable\n");
 
        WARN_ON(val & (7 << 27 | 7 << 21));
 }
 
-#ifdef CONFIG_ARM64_SSBD
 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
 {
        if (user_mode(regs))
                arm64_set_ssbd_mitigation(true);
        }
 }
-#endif /* CONFIG_ARM64_SSBD */
 
 #ifdef CONFIG_ARM64_PAN
 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
                .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
                .min_field_value = 1,
        },
-#ifdef CONFIG_ARM64_SSBD
        {
                .desc = "Speculative Store Bypassing Safe (SSBS)",
                .capability = ARM64_SSBS,
                .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
                .cpu_enable = cpu_enable_ssbs,
        },
-#endif
 #ifdef CONFIG_ARM64_CNP
        {
                .desc = "Common not Private translations",
 
         * them if required.
         */
        .macro  apply_ssbd, state, tmp1, tmp2
-#ifdef CONFIG_ARM64_SSBD
 alternative_cb arm64_enable_wa2_handling
        b       .L__asm_ssbd_skip\@
 alternative_cb_end
        nop                                     // Patched to SMC/HVC #0
 alternative_cb_end
 .L__asm_ssbd_skip\@:
-#endif
        .endm
 
        .macro  kernel_entry, el, regsize = 64
        bl      trace_hardirqs_off
 #endif
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
        tbz     x22, #55, 1f
        bl      do_el0_irq_bp_hardening
 1:
-#endif
        irq_handler
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 
          virtual machines.
 
 config KVM_INDIRECT_VECTORS
-       def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
+       def_bool RANDOMIZE_BASE
 
 endif # KVM
 
 
                          ARM_SMCCC_ARCH_WORKAROUND_2)
        cbnz    w1, el1_trap
 
-#ifdef CONFIG_ARM64_SSBD
 alternative_cb arm64_enable_wa2_handling
        b       wa2_end
 alternative_cb_end
 wa2_end:
        mov     x2, xzr
        mov     x1, xzr
-#endif
 
 wa_epilogue:
        mov     x0, xzr
 
 
 static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
 {
-#ifdef CONFIG_ARM64_SSBD
        /*
         * The host runs with the workaround always present. If the
         * guest wants it disabled, so be it...
        if (__needs_ssbd_off(vcpu) &&
            __hyp_this_cpu_read(arm64_ssbd_callback_required))
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
-#endif
 }
 
 static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
 {
-#ifdef CONFIG_ARM64_SSBD
        /*
         * If the guest has disabled the workaround, bring it back on.
         */
        if (__needs_ssbd_off(vcpu) &&
            __hyp_this_cpu_read(arm64_ssbd_callback_required))
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
-#endif
 }
 
 static inline void __kvm_unexpected_el2_exception(void)