(__wa && __wa->h) ? __wa->h : arch_timer_##h;           \
        })
 
-extern struct static_key_false arch_timer_read_ool_enabled;
-#define needs_unstable_timer_counter_workaround() \
-       static_branch_unlikely(&arch_timer_read_ool_enabled)
 #else
 #define has_erratum_handler(h)                    false
 #define erratum_handler(h)                        (arch_timer_##h)
-#define needs_unstable_timer_counter_workaround()  false
 #endif
 
 enum arch_timer_erratum_match_type {
 
 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
 
-DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
-EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
 
 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
                                                struct clock_event_device *clk)
                        per_cpu(timer_unstable_counter_workaround, i) = wa;
        }
 
-       /*
-        * Use the locked version, as we're called from the CPU
-        * hotplug framework. Otherwise, we end-up in deadlock-land.
-        */
-       static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
-
        /*
         * Don't use the vdso fastpath if errata require using the
         * out-of-line counter accessor. We may change our mind pretty
 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
                                            void *arg)
 {
-       const struct arch_timer_erratum_workaround *wa;
+       const struct arch_timer_erratum_workaround *wa, *__wa;
        ate_match_fn_t match_fn = NULL;
        bool local = false;
 
        if (!wa)
                return;
 
-       if (needs_unstable_timer_counter_workaround()) {
-               const struct arch_timer_erratum_workaround *__wa;
-               __wa = __this_cpu_read(timer_unstable_counter_workaround);
-               if (__wa && wa != __wa)
-                       pr_warn("Can't enable workaround for %s (clashes with %s\n)",
-                               wa->desc, __wa->desc);
+       __wa = __this_cpu_read(timer_unstable_counter_workaround);
+       if (__wa && wa != __wa)
+               pr_warn("Can't enable workaround for %s (clashes with %s\n)",
+                       wa->desc, __wa->desc);
 
-               if (__wa)
-                       return;
-       }
+       if (__wa)
+               return;
 
        arch_timer_enable_workaround(wa, local);
        pr_info("Enabling %s workaround for %s\n",