rcu_sysrq_start();
        rcu_read_lock();
-       printk_prefer_direct_enter();
        /*
         * Raise the apparent loglevel to maximum so that the sysrq header
         * is shown to provide the user with positive feedback.  We do not
                pr_cont("\n");
                console_loglevel = orig_log_level;
        }
-       printk_prefer_direct_exit();
        rcu_read_unlock();
        rcu_sysrq_end();
 
 
 #define printk_deferred_enter __printk_safe_enter
 #define printk_deferred_exit __printk_safe_exit
 
-extern void printk_prefer_direct_enter(void);
-extern void printk_prefer_direct_exit(void);
-
 extern bool pr_flush(int timeout_ms, bool reset_on_progress);
 
 /*
 {
 }
 
-static inline void printk_prefer_direct_enter(void)
-{
-}
-
-static inline void printk_prefer_direct_exit(void)
-{
-}
-
 static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
 {
        return true;
 
         * complain:
         */
        if (sysctl_hung_task_warnings) {
-               printk_prefer_direct_enter();
-
                if (sysctl_hung_task_warnings > 0)
                        sysctl_hung_task_warnings--;
                pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
 
                if (sysctl_hung_task_all_cpu_backtrace)
                        hung_task_show_all_bt = true;
-
-               printk_prefer_direct_exit();
        }
 
        touch_nmi_watchdog();
        }
  unlock:
        rcu_read_unlock();
-       if (hung_task_show_lock) {
-               printk_prefer_direct_enter();
+       if (hung_task_show_lock)
                debug_show_all_locks();
-               printk_prefer_direct_exit();
-       }
 
        if (hung_task_show_all_bt) {
                hung_task_show_all_bt = false;
-               printk_prefer_direct_enter();
                trigger_all_cpu_backtrace();
-               printk_prefer_direct_exit();
        }
 
        if (hung_task_call_panic)
 
 {
        disable_trace_on_warning();
 
-       printk_prefer_direct_enter();
-
        if (file)
                pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
                        raw_smp_processor_id(), current->pid, file, line,
 
        /* Just a warning, don't kill lockdep. */
        add_taint(taint, LOCKDEP_STILL_OK);
-
-       printk_prefer_direct_exit();
 }
 
 #ifndef __WARN_FLAGS
 
 static DEFINE_MUTEX(syslog_lock);
 
 #ifdef CONFIG_PRINTK
-static atomic_t printk_prefer_direct = ATOMIC_INIT(0);
-
-/**
- * printk_prefer_direct_enter - cause printk() calls to attempt direct
- *                              printing to all enabled consoles
- *
- * Since it is not possible to call into the console printing code from any
- * context, there is no guarantee that direct printing will occur.
- *
- * This globally effects all printk() callers.
- *
- * Context: Any context.
- */
-void printk_prefer_direct_enter(void)
-{
-       atomic_inc(&printk_prefer_direct);
-}
-
-/**
- * printk_prefer_direct_exit - restore printk() behavior
- *
- * Context: Any context.
- */
-void printk_prefer_direct_exit(void)
-{
-       WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0);
-}
-
 DECLARE_WAIT_QUEUE_HEAD(log_wait);
 /* All 3 protected by @syslog_lock. */
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
 
         * See Documentation/RCU/stallwarn.rst for info on how to debug
         * RCU CPU stall warnings.
         */
-       printk_prefer_direct_enter();
        trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
        pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
        raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
         */
        set_tsk_need_resched(current);
        set_preempt_need_resched();
-       printk_prefer_direct_exit();
 }
 
 static void check_cpu_stall(struct rcu_data *rdp)
 
        ret = run_cmd(reboot_cmd);
 
        if (ret) {
-               printk_prefer_direct_enter();
                pr_warn("Failed to start orderly reboot: forcing the issue\n");
                emergency_sync();
                kernel_restart(NULL);
-               printk_prefer_direct_exit();
        }
 
        return ret;
        ret = run_cmd(poweroff_cmd);
 
        if (ret && force) {
-               printk_prefer_direct_enter();
                pr_warn("Failed to start orderly shutdown: forcing the issue\n");
 
                /*
                 */
                emergency_sync();
                kernel_power_off();
-               printk_prefer_direct_exit();
        }
 
        return ret;
  */
 static void hw_failure_emergency_poweroff_func(struct work_struct *work)
 {
-       printk_prefer_direct_enter();
-
        /*
         * We have reached here after the emergency shutdown waiting period has
         * expired. This means orderly_poweroff has not been able to shut off
         */
        pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
        emergency_restart();
-
-       printk_prefer_direct_exit();
 }
 
 static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
 {
        static atomic_t allow_proceed = ATOMIC_INIT(1);
 
-       printk_prefer_direct_enter();
-
        pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
 
        /* Shutdown should be initiated only once. */
        if (!atomic_dec_and_test(&allow_proceed))
-               goto out;
+               return;
 
        /*
         * Queue a backup emergency shutdown in the event of
         */
        hw_failure_emergency_poweroff(ms_until_forced);
        orderly_poweroff(true);
-out:
-       printk_prefer_direct_exit();
 }
 EXPORT_SYMBOL_GPL(hw_protection_shutdown);
 
 
                /* Start period for the next softlockup warning. */
                update_report_ts();
 
-               printk_prefer_direct_enter();
-
                pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
                        smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
                add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
-
-               printk_prefer_direct_exit();
        }
 
        return HRTIMER_RESTART;
 
                if (__this_cpu_read(hard_watchdog_warn) == true)
                        return;
 
-               printk_prefer_direct_enter();
-
                pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
                         this_cpu);
                print_modules();
                if (hardlockup_panic)
                        nmi_panic(regs, "Hard LOCKUP");
 
-               printk_prefer_direct_exit();
-
                __this_cpu_write(hard_watchdog_warn, true);
                return;
        }