From 2d04e16c3a18a8b3c993e7c46844244d983ee18c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 16 Dec 2020 17:32:25 -0800 Subject: [PATCH] clocksource: Retry clock read if long delays detected When the clocksource watchdog marks a clock as unstable, this might be due to that clock being unstable or it might be due to delays that happen to occur between the reads of the two clocks. Yes, interrupts are disabled across those two reads, but there are no shortage of things that can delay interrupts-disabled regions of code ranging from SMI handlers to vCPU preemption. It would be good to have some indication as to why the clock was marked unstable. This commit therefore re-reads the watchdog clock on either side of the read from the clock under test. If the watchdog clock shows an excessive time delta between its pair of reads, the reads are retried. The maximum number of retries is specified by a new kernel boot parameter clocksource.max_read_retries, which defaults to three, that is, up to four reads, one initial and up to three retries. If retries were required, a message is printed on the console. If the number of retries is exceeded, the clock under test will be marked unstable. However, the probability of this happening due to various sorts of delays is quite small. In addition, the reason (clock-read delays) for the unstable marking will be apparent. Cc: John Stultz Cc: Thomas Gleixner Cc: Stephen Boyd Cc: Jonathan Corbet Cc: Mark Rutland Cc: Marc Zyngier Cc: Andi Kleen Reported-by: Chris Mason [ paulmck: Per-clocksource retries per Neeraj Upadhyay feedback. ] [ paulmck: Don't reset injectfail per Neeraj Upadhyay feedback. ] Signed-off-by: Paul E. McKenney --- kernel/time/clocksource.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 4be4391aa72f..3f734c6b5ac4 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -124,6 +124,7 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating); */ #define WATCHDOG_INTERVAL (HZ >> 1) #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) +#define WATCHDOG_MAX_SKEW (NSEC_PER_SEC >> 6) static void clocksource_watchdog_work(struct work_struct *work) { @@ -213,9 +214,10 @@ static void clocksource_watchdog_inject_delay(void) static void clocksource_watchdog(struct timer_list *unused) { struct clocksource *cs; - u64 csnow, wdnow, cslast, wdlast, delta; - int64_t wd_nsec, cs_nsec; + u64 csnow, wdnow, wdagain, cslast, wdlast, delta; + int64_t wd_nsec, wdagain_nsec, wderr_nsec = 0, cs_nsec; int next_cpu, reset_pending; + int nretries; spin_lock(&watchdog_lock); if (!watchdog_running) @@ -224,6 +226,7 @@ static void clocksource_watchdog(struct timer_list *unused) reset_pending = atomic_read(&watchdog_reset_pending); list_for_each_entry(cs, &watchdog_list, wd_list) { + nretries = 0; /* Clocksource already marked unstable? */ if (cs->flags & CLOCK_SOURCE_UNSTABLE) { @@ -232,11 +235,23 @@ static void clocksource_watchdog(struct timer_list *unused) continue; } +retry: local_irq_disable(); - csnow = cs->read(cs); - clocksource_watchdog_inject_delay(); wdnow = watchdog->read(watchdog); + clocksource_watchdog_inject_delay(); + csnow = cs->read(cs); + wdagain = watchdog->read(watchdog); local_irq_enable(); + delta = clocksource_delta(wdagain, wdnow, watchdog->mask); + wdagain_nsec = clocksource_cyc2ns(delta, watchdog->mult, watchdog->shift); + if (wdagain_nsec < 0 || wdagain_nsec > WATCHDOG_MAX_SKEW) { + wderr_nsec = wdagain_nsec; + if (nretries++ < max_read_retries) + goto retry; + } + if (nretries) + pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d\n", + smp_processor_id(), watchdog->name, wderr_nsec, nretries); /* Clocksource initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) || -- 2.50.1