]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
kernel/watchdog: flush all printk nmi buffers when hardlockup detected
authorKonstantin Khlebnikov <khlebnikov@yandex-team.ru>
Thu, 31 Dec 2020 22:03:56 +0000 (22:03 +0000)
committerJohannes Weiner <hannes@cmpxchg.org>
Thu, 31 Dec 2020 22:03:56 +0000 (22:03 +0000)
In NMI context printk() could save messages into per-cpu buffers and
schedule flush by irq_work when IRQ are unblocked.  This means message
about hardlockup appears in kernel log only when/if lockup is gone.

Comment in irq_work_queue_on() states that remote IPI aren't NMI safe thus
printk() cannot schedule flush work to another cpu.

This patch adds simple atomic counter of detected hardlockups and flushes
all per-cpu printk buffers in context softlockup watchdog at any other cpu
when it sees changes of this counter.

Link: http://lkml.kernel.org/r/158132813726.1980.17382047082627699898.stgit@buzz
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Dmitry Monakhov <dmtrmonakhov@yandex-team.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/nmi.h
kernel/watchdog.c
kernel/watchdog_hld.c

index 750c7f395ca907f772ad8e784e6df32e473ead72..0c2a1d915453d820f35594f49a9fd84d91322cb5 100644 (file)
@@ -84,6 +84,7 @@ static inline void reset_hung_task_detector(void) { }
 #if defined(CONFIG_HARDLOCKUP_DETECTOR)
 extern void hardlockup_detector_disable(void);
 extern unsigned int hardlockup_panic;
+extern atomic_t hardlockup_detected;
 #else
 static inline void hardlockup_detector_disable(void) {}
 #endif
index 71109065bd8ebf4ee946a902321b4cd8934a3d95..d8baf69aab4a764922f687893fa030141dace409 100644 (file)
@@ -85,6 +85,25 @@ static int __init hardlockup_panic_setup(char *str)
 }
 __setup("nmi_watchdog=", hardlockup_panic_setup);
 
+atomic_t hardlockup_detected = ATOMIC_INIT(0);
+
+static inline void flush_hardlockup_messages(void)
+{
+       static atomic_t flushed = ATOMIC_INIT(0);
+
+       /* flush messages from hard lockup detector */
+       if (atomic_read(&hardlockup_detected) != atomic_read(&flushed)) {
+               atomic_set(&flushed, atomic_read(&hardlockup_detected));
+               printk_safe_flush();
+       }
+}
+
+#else /* CONFIG_HARDLOCKUP_DETECTOR */
+
+static inline void flush_hardlockup_messages(void)
+{
+}
+
 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
 /*
@@ -351,6 +370,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
+       flush_hardlockup_messages();
+
        /* kick the softlockup detector */
        if (completion_done(this_cpu_ptr(&softlockup_completion))) {
                reinit_completion(this_cpu_ptr(&softlockup_completion));
index 247bf0b1582ca1cf352f006aa1fd5f689f8f5859..a546bc54f6ff0aebfff1f733ea52ef80a833ed8d 100644 (file)
@@ -154,6 +154,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
 
                if (hardlockup_panic)
                        nmi_panic(regs, "Hard LOCKUP");
+               atomic_inc(&hardlockup_detected);
 
                __this_cpu_write(hard_watchdog_warn, true);
                return;