sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
 }
 
-DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
-static bool __ssb_safe = true;
-
-static const struct ssbd_options {
-       const char      *str;
-       int             state;
-} ssbd_options[] = {
-       { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
-       { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
-       { "kernel",     ARM64_SSBD_KERNEL, },
-};
-
-static int __init ssbd_cfg(char *buf)
-{
-       int i;
-
-       if (!buf || !buf[0])
-               return -EINVAL;
-
-       for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
-               int len = strlen(ssbd_options[i].str);
-
-               if (strncmp(buf, ssbd_options[i].str, len))
-                       continue;
-
-               ssbd_state = ssbd_options[i].state;
-               return 0;
-       }
-
-       return -EINVAL;
-}
-early_param("ssbd", ssbd_cfg);
-
-void __init arm64_update_smccc_conduit(struct alt_instr *alt,
-                                      __le32 *origptr, __le32 *updptr,
-                                      int nr_inst)
-{
-       u32 insn;
-
-       BUG_ON(nr_inst != 1);
-
-       switch (arm_smccc_1_1_get_conduit()) {
-       case SMCCC_CONDUIT_HVC:
-               insn = aarch64_insn_get_hvc_value();
-               break;
-       case SMCCC_CONDUIT_SMC:
-               insn = aarch64_insn_get_smc_value();
-               break;
-       default:
-               return;
-       }
-
-       *updptr = cpu_to_le32(insn);
-}
+int ssbd_state __read_mostly = ARM64_SSBD_UNKNOWN;
 
 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
                                      __le32 *origptr, __le32 *updptr,
                *updptr = cpu_to_le32(aarch64_insn_gen_nop());
 }
 
-void arm64_set_ssbd_mitigation(bool state)
-{
-       int conduit;
-
-       if (this_cpu_has_cap(ARM64_SSBS)) {
-               if (state)
-                       asm volatile(SET_PSTATE_SSBS(0));
-               else
-                       asm volatile(SET_PSTATE_SSBS(1));
-               return;
-       }
-
-       conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
-                                      NULL);
-
-       WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
-}
-
-static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
-                                   int scope)
-{
-       struct arm_smccc_res res;
-       bool required = true;
-       s32 val;
-       bool this_cpu_safe = false;
-       int conduit;
-
-       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-
-       if (cpu_mitigations_off())
-               ssbd_state = ARM64_SSBD_FORCE_DISABLE;
-
-       /* delay setting __ssb_safe until we get a firmware response */
-       if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
-               this_cpu_safe = true;
-
-       if (this_cpu_has_cap(ARM64_SSBS)) {
-               if (!this_cpu_safe)
-                       __ssb_safe = false;
-               required = false;
-               goto out_printmsg;
-       }
-
-       conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
-                                      ARM_SMCCC_ARCH_WORKAROUND_2, &res);
-
-       if (conduit == SMCCC_CONDUIT_NONE) {
-               ssbd_state = ARM64_SSBD_UNKNOWN;
-               if (!this_cpu_safe)
-                       __ssb_safe = false;
-               return false;
-       }
-
-       val = (s32)res.a0;
-
-       switch (val) {
-       case SMCCC_RET_NOT_SUPPORTED:
-               ssbd_state = ARM64_SSBD_UNKNOWN;
-               if (!this_cpu_safe)
-                       __ssb_safe = false;
-               return false;
-
-       /* machines with mixed mitigation requirements must not return this */
-       case SMCCC_RET_NOT_REQUIRED:
-               pr_info_once("%s mitigation not required\n", entry->desc);
-               ssbd_state = ARM64_SSBD_MITIGATED;
-               return false;
-
-       case SMCCC_RET_SUCCESS:
-               __ssb_safe = false;
-               required = true;
-               break;
-
-       case 1: /* Mitigation not required on this CPU */
-               required = false;
-               break;
-
-       default:
-               WARN_ON(1);
-               if (!this_cpu_safe)
-                       __ssb_safe = false;
-               return false;
-       }
-
-       switch (ssbd_state) {
-       case ARM64_SSBD_FORCE_DISABLE:
-               arm64_set_ssbd_mitigation(false);
-               required = false;
-               break;
-
-       case ARM64_SSBD_KERNEL:
-               if (required) {
-                       __this_cpu_write(arm64_ssbd_callback_required, 1);
-                       arm64_set_ssbd_mitigation(true);
-               }
-               break;
-
-       case ARM64_SSBD_FORCE_ENABLE:
-               arm64_set_ssbd_mitigation(true);
-               required = true;
-               break;
-
-       default:
-               WARN_ON(1);
-               break;
-       }
-
-out_printmsg:
-       switch (ssbd_state) {
-       case ARM64_SSBD_FORCE_DISABLE:
-               pr_info_once("%s disabled from command-line\n", entry->desc);
-               break;
-
-       case ARM64_SSBD_FORCE_ENABLE:
-               pr_info_once("%s forced from command-line\n", entry->desc);
-               break;
-       }
-
-       return required;
-}
-
-static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap)
-{
-       if (ssbd_state != ARM64_SSBD_FORCE_DISABLE)
-               cap->matches(cap, SCOPE_LOCAL_CPU);
-}
-
-/* known invulnerable cores */
-static const struct midr_range arm64_ssb_cpus[] = {
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
-       MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
-       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
-       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
-       {},
-};
-
 #ifdef CONFIG_ARM64_ERRATUM_1463225
 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
 
        },
 #endif
        {
-               .desc = "Speculative Store Bypass Disable",
+               .desc = "Spectre-v4",
                .capability = ARM64_SPECTRE_V4,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
-               .matches = has_ssbd_mitigation,
-               .cpu_enable = cpu_enable_ssbd_mitigation,
-               .midr_range_list = arm64_ssb_cpus,
+               .matches = has_spectre_v4,
+               .cpu_enable = spectre_v4_enable_mitigation,
        },
 #ifdef CONFIG_ARM64_ERRATUM_1418040
        {
        {
        }
 };
-
-ssize_t cpu_show_spec_store_bypass(struct device *dev,
-               struct device_attribute *attr, char *buf)
-{
-       if (__ssb_safe)
-               return sprintf(buf, "Not affected\n");
-
-       switch (ssbd_state) {
-       case ARM64_SSBD_KERNEL:
-       case ARM64_SSBD_FORCE_ENABLE:
-               return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
-       }
-
-       return sprintf(buf, "Vulnerable\n");
-}
 
        update_mitigation_state(&spectre_v2_state, state);
 }
 
-/* Spectre v4 prctl */
-static void ssbd_ssbs_enable(struct task_struct *task)
+/*
+ * Spectre v4.
+ *
+ * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
+ * either:
+ *
+ * - Mitigated in hardware and listed in our "safe list".
+ * - Mitigated in hardware via PSTATE.SSBS.
+ * - Mitigated in software by firmware (sometimes referred to as SSBD).
+ *
+ * Wait, that doesn't sound so bad, does it? Keep reading...
+ *
+ * A major source of headaches is that the software mitigation is enabled both
+ * on a per-task basis, but can also be forced on for the kernel, necessitating
+ * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
+ * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
+ * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
+ * so you can have systems that have both firmware and SSBS mitigations. This
+ * means we actually have to reject late onlining of CPUs with mitigations if
+ * all of the currently onlined CPUs are safelisted, as the mitigation tends to
+ * be opt-in for userspace. Yes, really, the cure is worse than the disease.
+ *
+ * The only good part is that if the firmware mitigation is present, then it is
+ * present for all CPUs, meaning we don't have to worry about late onlining of a
+ * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
+ *
+ * Give me a VAX-11/780 any day of the week...
+ */
+static enum mitigation_state spectre_v4_state;
+
+/* This is the per-cpu state tracking whether we need to talk to firmware */
+DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+enum spectre_v4_policy {
+       SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
+       SPECTRE_V4_POLICY_MITIGATION_ENABLED,
+       SPECTRE_V4_POLICY_MITIGATION_DISABLED,
+};
+
+static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
+
+static const struct spectre_v4_param {
+       const char              *str;
+       enum spectre_v4_policy  policy;
+} spectre_v4_params[] = {
+       { "force-on",   SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
+       { "force-off",  SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
+       { "kernel",     SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
+};
+static int __init parse_spectre_v4_param(char *str)
 {
-       u64 val = is_compat_thread(task_thread_info(task)) ?
-                 PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+       int i;
 
-       task_pt_regs(task)->pstate |= val;
-}
+       if (!str || !str[0])
+               return -EINVAL;
 
-static void ssbd_ssbs_disable(struct task_struct *task)
-{
-       u64 val = is_compat_thread(task_thread_info(task)) ?
-                 PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+       for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
+               const struct spectre_v4_param *param = &spectre_v4_params[i];
+
+               if (strncmp(str, param->str, strlen(param->str)))
+                       continue;
 
-       task_pt_regs(task)->pstate &= ~val;
+               __spectre_v4_policy = param->policy;
+               return 0;
+       }
+
+       return -EINVAL;
 }
+early_param("ssbd", parse_spectre_v4_param);
 
 /*
- * prctl interface for SSBD
+ * Because this was all written in a rush by people working in different silos,
+ * we've ended up with multiple command line options to control the same thing.
+ * Wrap these up in some helpers, which prefer disabling the mitigation if faced
+ * with contradictory parameters. The mitigation is always either "off",
+ * "dynamic" or "on".
  */
-static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+static bool spectre_v4_mitigations_off(void)
 {
-       int state = arm64_get_ssbd_state();
+       bool ret = cpu_mitigations_off() ||
+                  __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
 
-       /* Unsupported */
-       if (state == ARM64_SSBD_UNKNOWN)
-               return -ENODEV;
+       if (ret)
+               pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
 
-       /* Treat the unaffected/mitigated state separately */
-       if (state == ARM64_SSBD_MITIGATED) {
-               switch (ctrl) {
-               case PR_SPEC_ENABLE:
-                       return -EPERM;
-               case PR_SPEC_DISABLE:
-               case PR_SPEC_FORCE_DISABLE:
-                       return 0;
-               }
+       return ret;
+}
+
+/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
+static bool spectre_v4_mitigations_dynamic(void)
+{
+       return !spectre_v4_mitigations_off() &&
+              __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
+}
+
+static bool spectre_v4_mitigations_on(void)
+{
+       return !spectre_v4_mitigations_off() &&
+              __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       switch (spectre_v4_state) {
+       case SPECTRE_UNAFFECTED:
+               return sprintf(buf, "Not affected\n");
+       case SPECTRE_MITIGATED:
+               return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
+       case SPECTRE_VULNERABLE:
+               fallthrough;
+       default:
+               return sprintf(buf, "Vulnerable\n");
        }
+}
+
+enum mitigation_state arm64_get_spectre_v4_state(void)
+{
+       return spectre_v4_state;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
+{
+       static const struct midr_range spectre_v4_safe_list[] = {
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+               MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+               MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+               { /* sentinel */ },
+       };
+
+       if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
+               return SPECTRE_UNAFFECTED;
+
+       /* CPU features are detected first */
+       if (this_cpu_has_cap(ARM64_SSBS))
+               return SPECTRE_MITIGATED;
+
+       return SPECTRE_VULNERABLE;
+}
+
+static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
+{
+       int ret;
+       struct arm_smccc_res res;
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+                            ARM_SMCCC_ARCH_WORKAROUND_2, &res);
+
+       ret = res.a0;
+       switch (ret) {
+       case SMCCC_RET_SUCCESS:
+               return SPECTRE_MITIGATED;
+       case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+               fallthrough;
+       case SMCCC_RET_NOT_REQUIRED:
+               return SPECTRE_UNAFFECTED;
+       default:
+               fallthrough;
+       case SMCCC_RET_NOT_SUPPORTED:
+               return SPECTRE_VULNERABLE;
+       }
+}
+
+bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
+{
+       enum mitigation_state state;
+
+       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+       state = spectre_v4_get_cpu_hw_mitigation_state();
+       if (state == SPECTRE_VULNERABLE)
+               state = spectre_v4_get_cpu_fw_mitigation_state();
+
+       return state != SPECTRE_UNAFFECTED;
+}
+
+static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
+{
+       if (user_mode(regs))
+               return 1;
+
+       if (instr & BIT(PSTATE_Imm_shift))
+               regs->pstate |= PSR_SSBS_BIT;
+       else
+               regs->pstate &= ~PSR_SSBS_BIT;
+
+       arm64_skip_faulting_instruction(regs, 4);
+       return 0;
+}
+
+static struct undef_hook ssbs_emulation_hook = {
+       .instr_mask     = ~(1U << PSTATE_Imm_shift),
+       .instr_val      = 0xd500401f | PSTATE_SSBS,
+       .fn             = ssbs_emulation_handler,
+};
+
+static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
+{
+       static bool undef_hook_registered = false;
+       static DEFINE_RAW_SPINLOCK(hook_lock);
+       enum mitigation_state state;
 
        /*
-        * Things are a bit backward here: the arm64 internal API
-        * *enables the mitigation* when the userspace API *disables
-        * speculation*. So much fun.
+        * If the system is mitigated but this CPU doesn't have SSBS, then
+        * we must be on the safelist and there's nothing more to do.
         */
+       state = spectre_v4_get_cpu_hw_mitigation_state();
+       if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
+               return state;
+
+       raw_spin_lock(&hook_lock);
+       if (!undef_hook_registered) {
+               register_undef_hook(&ssbs_emulation_hook);
+               undef_hook_registered = true;
+       }
+       raw_spin_unlock(&hook_lock);
+
+       if (spectre_v4_mitigations_off()) {
+               sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+               asm volatile(SET_PSTATE_SSBS(1));
+               return SPECTRE_VULNERABLE;
+       }
+
+       /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
+       asm volatile(SET_PSTATE_SSBS(0));
+       return SPECTRE_MITIGATED;
+}
+
+/*
+ * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
+ * we fallthrough and check whether firmware needs to be called on this CPU.
+ */
+void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
+                                                 __le32 *origptr,
+                                                 __le32 *updptr, int nr_inst)
+{
+       BUG_ON(nr_inst != 1); /* Branch -> NOP */
+
+       if (spectre_v4_mitigations_off())
+               return;
+
+       if (cpus_have_final_cap(ARM64_SSBS))
+               return;
+
+       if (spectre_v4_mitigations_dynamic())
+               *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
+/*
+ * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
+ * to call into firmware to adjust the mitigation state.
+ */
+void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
+                                                  __le32 *origptr,
+                                                  __le32 *updptr, int nr_inst)
+{
+       u32 insn;
+
+       BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
+
+       switch (arm_smccc_1_1_get_conduit()) {
+       case SMCCC_CONDUIT_HVC:
+               insn = aarch64_insn_get_hvc_value();
+               break;
+       case SMCCC_CONDUIT_SMC:
+               insn = aarch64_insn_get_smc_value();
+               break;
+       default:
+               return;
+       }
+
+       *updptr = cpu_to_le32(insn);
+}
+
+static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
+{
+       enum mitigation_state state;
+
+       state = spectre_v4_get_cpu_fw_mitigation_state();
+       if (state != SPECTRE_MITIGATED)
+               return state;
+
+       if (spectre_v4_mitigations_off()) {
+               arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
+               return SPECTRE_VULNERABLE;
+       }
+
+       arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
+
+       if (spectre_v4_mitigations_dynamic())
+               __this_cpu_write(arm64_ssbd_callback_required, 1);
+
+       return SPECTRE_MITIGATED;
+}
+
+void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
+{
+       enum mitigation_state state;
+
+       WARN_ON(preemptible());
+
+       state = spectre_v4_enable_hw_mitigation();
+       if (state == SPECTRE_VULNERABLE)
+               state = spectre_v4_enable_fw_mitigation();
+
+       update_mitigation_state(&spectre_v4_state, state);
+}
+
+static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
+{
+       u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+       if (state)
+               regs->pstate |= bit;
+       else
+               regs->pstate &= ~bit;
+}
+
+void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
+{
+       struct pt_regs *regs = task_pt_regs(tsk);
+       bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
+
+       if (spectre_v4_mitigations_off())
+               ssbs = true;
+       else if (spectre_v4_mitigations_dynamic() && !kthread)
+               ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
+
+       __update_pstate_ssbs(regs, ssbs);
+}
+
+/*
+ * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
+ * This is interesting because the "speculation disabled" behaviour can be
+ * configured so that it is preserved across exec(), which means that the
+ * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
+ * from userspace.
+ */
+static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
        switch (ctrl) {
        case PR_SPEC_ENABLE:
-               /* If speculation is force disabled, enable is not allowed */
-               if (state == ARM64_SSBD_FORCE_ENABLE ||
-                   task_spec_ssb_force_disable(task))
+               /* Enable speculation: disable mitigation */
+               /*
+                * Force disabled speculation prevents it from being
+                * re-enabled.
+                */
+               if (task_spec_ssb_force_disable(task))
+                       return -EPERM;
+
+               /*
+                * If the mitigation is forced on, then speculation is forced
+                * off and we again prevent it from being re-enabled.
+                */
+               if (spectre_v4_mitigations_on())
                        return -EPERM;
+
                task_clear_spec_ssb_disable(task);
                clear_tsk_thread_flag(task, TIF_SSBD);
-               ssbd_ssbs_enable(task);
-               break;
-       case PR_SPEC_DISABLE:
-               if (state == ARM64_SSBD_FORCE_DISABLE)
-                       return -EPERM;
-               task_set_spec_ssb_disable(task);
-               set_tsk_thread_flag(task, TIF_SSBD);
-               ssbd_ssbs_disable(task);
                break;
        case PR_SPEC_FORCE_DISABLE:
-               if (state == ARM64_SSBD_FORCE_DISABLE)
+               /* Force disable speculation: force enable mitigation */
+               /*
+                * If the mitigation is forced off, then speculation is forced
+                * on and we prevent it from being disabled.
+                */
+               if (spectre_v4_mitigations_off())
                        return -EPERM;
-               task_set_spec_ssb_disable(task);
+
                task_set_spec_ssb_force_disable(task);
+               fallthrough;
+       case PR_SPEC_DISABLE:
+               /* Disable speculation: enable mitigation */
+               /* Same as PR_SPEC_FORCE_DISABLE */
+               if (spectre_v4_mitigations_off())
+                       return -EPERM;
+
+               task_set_spec_ssb_disable(task);
                set_tsk_thread_flag(task, TIF_SSBD);
-               ssbd_ssbs_disable(task);
                break;
        default:
                return -ERANGE;
        }
 
+       spectre_v4_enable_task_mitigation(task);
        return 0;
 }
 
 
 static int ssbd_prctl_get(struct task_struct *task)
 {
-       switch (arm64_get_ssbd_state()) {
-       case ARM64_SSBD_UNKNOWN:
-               return -ENODEV;
-       case ARM64_SSBD_FORCE_ENABLE:
-               return PR_SPEC_DISABLE;
-       case ARM64_SSBD_KERNEL:
-               if (task_spec_ssb_force_disable(task))
-                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
-               if (task_spec_ssb_disable(task))
-                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
-               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
-       case ARM64_SSBD_FORCE_DISABLE:
-               return PR_SPEC_ENABLE;
-       default:
+       switch (spectre_v4_state) {
+       case SPECTRE_UNAFFECTED:
                return PR_SPEC_NOT_AFFECTED;
+       case SPECTRE_MITIGATED:
+               if (spectre_v4_mitigations_on())
+                       return PR_SPEC_NOT_AFFECTED;
+
+               if (spectre_v4_mitigations_dynamic())
+                       break;
+
+               /* Mitigations are disabled, so we're vulnerable. */
+               fallthrough;
+       case SPECTRE_VULNERABLE:
+               fallthrough;
+       default:
+               return PR_SPEC_ENABLE;
        }
+
+       /* Check the mitigation state for this task */
+       if (task_spec_ssb_force_disable(task))
+               return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+
+       if (task_spec_ssb_disable(task))
+               return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+
+       return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
 }
 
 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)