#include <linux/jiffies.h>
 #include <linux/time.h>
 
-/*
- * Structure holding internal timekeeping values.
- *
- * Note: wall_to_monotonic is what we need to add to xtime (or xtime
- * corrected for sub jiffie times) to get to monotonic time.
- * Monotonic is pegged at zero at system boot time, so
- * wall_to_monotonic will be negative, however, we will ALWAYS keep
- * the tv_nsec part positive so we can use the usual normalization.
+/**
+ * struct tk_read_base - base structure for timekeeping readout
+ * @clock:     Current clocksource used for timekeeping.
+ * @read:      Read function of @clock
+ * @mask:      Bitmask for two's complement subtraction of non 64bit clocks
+ * @cycle_last: @clock cycle value at last update
+ * @mult:      NTP adjusted multiplier for scaled math conversion
+ * @shift:     Shift value for scaled math conversion
+ * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
+ * @base_mono:  ktime_t (nanoseconds) base time for readout
  *
- * wall_to_monotonic is moved after resume from suspend for the
- * monotonic time not to jump. To calculate the real boot time offset
- * we need to do offs_real - offs_boot.
+ * This struct has size 56 byte on 64 bit. Together with a seqcount it
+ * occupies a single 64byte cache line.
  *
- * - wall_to_monotonic is no longer the boot time, getboottime must be
- * used instead.
+ * The struct is separate from struct timekeeper as it is also used
+ * for a fast NMI safe accessor to clock monotonic.
  */
-struct timekeeper {
-       /* Current clocksource used for timekeeping. */
+struct tk_read_base {
        struct clocksource      *clock;
-       /* Read function of @clock */
        cycle_t                 (*read)(struct clocksource *cs);
-       /* Bitmask for two's complement subtraction of non 64bit counters */
        cycle_t                 mask;
-       /* Last cycle value */
        cycle_t                 cycle_last;
-       /* NTP adjusted clock multiplier */
        u32                     mult;
-       /* The shift value of the current clocksource. */
        u32                     shift;
-       /* Clock shifted nano seconds */
        u64                     xtime_nsec;
-
-       /* Monotonic base time */
        ktime_t                 base_mono;
+};
 
-       /* Current CLOCK_REALTIME time in seconds */
+/**
+ * struct timekeeper - Structure holding internal timekeeping values.
+ * @tkr:               The readout base structure
+ * @xtime_sec:         Current CLOCK_REALTIME time in seconds
+ * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
+ * @offs_real:         Offset clock monotonic -> clock realtime
+ * @offs_boot:         Offset clock monotonic -> clock boottime
+ * @offs_tai:          Offset clock monotonic -> clock tai
+ * @tai_offset:                The current UTC to TAI offset in seconds
+ * @base_raw:          Monotonic raw base time in ktime_t format
+ * @raw_time:          Monotonic raw base time in timespec64 format
+ * @cycle_interval:    Number of clock cycles in one NTP interval
+ * @xtime_interval:    Number of clock shifted nano seconds in one NTP
+ *                     interval.
+ * @xtime_remainder:   Shifted nano seconds left over when rounding
+ *                     @cycle_interval
+ * @raw_interval:      Raw nano seconds accumulated per NTP interval.
+ * @ntp_error:         Difference between accumulated time and NTP time in ntp
+ *                     shifted nano seconds.
+ * @ntp_error_shift:   Shift conversion between clock shifted nano seconds and
+ *                     ntp shifted nano seconds.
+ *
+ * Note: For timespec(64) based interfaces wall_to_monotonic is what
+ * we need to add to xtime (or xtime corrected for sub jiffie times)
+ * to get to monotonic time.  Monotonic is pegged at zero at system
+ * boot time, so wall_to_monotonic will be negative, however, we will
+ * ALWAYS keep the tv_nsec part positive so we can use the usual
+ * normalization.
+ *
+ * wall_to_monotonic is moved after resume from suspend for the
+ * monotonic time not to jump. We need to add total_sleep_time to
+ * wall_to_monotonic to get the real boot based time offset.
+ *
+ * wall_to_monotonic is no longer the boot time, getboottime must be
+ * used instead.
+ */
+struct timekeeper {
+       struct tk_read_base     tkr;
        u64                     xtime_sec;
-       /* CLOCK_REALTIME to CLOCK_MONOTONIC offset */
        struct timespec64       wall_to_monotonic;
-
-       /* Offset clock monotonic -> clock realtime */
        ktime_t                 offs_real;
-       /* Offset clock monotonic -> clock boottime */
        ktime_t                 offs_boot;
-       /* Offset clock monotonic -> clock tai */
        ktime_t                 offs_tai;
-
-       /* The current UTC to TAI offset in seconds */
        s32                     tai_offset;
-
-       /* Monotonic raw base time */
        ktime_t                 base_raw;
-
-       /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
        struct timespec64       raw_time;
 
-       /* Number of clock cycles in one NTP interval. */
+       /* The following members are for timekeeping internal use */
        cycle_t                 cycle_interval;
-       /* Number of clock shifted nano seconds in one NTP interval. */
        u64                     xtime_interval;
-       /* shifted nano seconds left over when rounding cycle_interval */
        s64                     xtime_remainder;
-       /* Raw nano seconds accumulated per NTP interval. */
        u32                     raw_interval;
-
-       /*
-        * Difference between accumulated time and NTP time in ntp
-        * shifted nano seconds.
-        */
        s64                     ntp_error;
-       /*
-        * Shift conversion between clock shifted nano seconds and
-        * ntp shifted nano seconds.
-        */
        u32                     ntp_error_shift;
 };
 
 
 
 static inline void tk_normalize_xtime(struct timekeeper *tk)
 {
-       while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
-               tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
+       while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
+               tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
                tk->xtime_sec++;
        }
 }
        struct timespec64 ts;
 
        ts.tv_sec = tk->xtime_sec;
-       ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
+       ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
        return ts;
 }
 
 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
 {
        tk->xtime_sec = ts->tv_sec;
-       tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
+       tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
 }
 
 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
 {
        tk->xtime_sec += ts->tv_sec;
-       tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
+       tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
        tk_normalize_xtime(tk);
 }
 
        u64 tmp, ntpinterval;
        struct clocksource *old_clock;
 
-       old_clock = tk->clock;
-       tk->clock = clock;
-       tk->read = clock->read;
-       tk->mask = clock->mask;
-       tk->cycle_last = tk->read(clock);
+       old_clock = tk->tkr.clock;
+       tk->tkr.clock = clock;
+       tk->tkr.read = clock->read;
+       tk->tkr.mask = clock->mask;
+       tk->tkr.cycle_last = tk->tkr.read(clock);
 
        /* Do the ns -> cycle conversion first, using original mult */
        tmp = NTP_INTERVAL_LENGTH;
        if (old_clock) {
                int shift_change = clock->shift - old_clock->shift;
                if (shift_change < 0)
-                       tk->xtime_nsec >>= -shift_change;
+                       tk->tkr.xtime_nsec >>= -shift_change;
                else
-                       tk->xtime_nsec <<= shift_change;
+                       tk->tkr.xtime_nsec <<= shift_change;
        }
-       tk->shift = clock->shift;
+       tk->tkr.shift = clock->shift;
 
        tk->ntp_error = 0;
        tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
         * active clocksource. These value will be adjusted via NTP
         * to counteract clock drifting.
         */
-       tk->mult = clock->mult;
+       tk->tkr.mult = clock->mult;
 }
 
 /* Timekeeper helper functions. */
        s64 nsec;
 
        /* read clocksource: */
-       cycle_now = tk->read(tk->clock);
+       cycle_now = tk->tkr.read(tk->tkr.clock);
 
        /* calculate the delta since the last update_wall_time: */
-       delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
+       delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
 
-       nsec = delta * tk->mult + tk->xtime_nsec;
-       nsec >>= tk->shift;
+       nsec = delta * tk->tkr.mult + tk->tkr.xtime_nsec;
+       nsec >>= tk->tkr.shift;
 
        /* If arch requires, add in get_arch_timeoffset() */
        return nsec + arch_gettimeoffset();
 
 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
 {
-       struct clocksource *clock = tk->clock;
+       struct clocksource *clock = tk->tkr.clock;
        cycle_t cycle_now, delta;
        s64 nsec;
 
        /* read clocksource: */
-       cycle_now = tk->read(clock);
+       cycle_now = tk->tkr.read(clock);
 
        /* calculate the delta since the last update_wall_time: */
-       delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
+       delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
 
        /* convert delta to nanoseconds. */
        nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
        struct timespec xt;
 
        xt = tk_xtime(tk);
-       update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult,
-                           tk->cycle_last);
+       update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult,
+                           tk->tkr.cycle_last);
 }
 
 static inline void old_vsyscall_fixup(struct timekeeper *tk)
        * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
        * users are removed, this can be killed.
        */
-       remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
-       tk->xtime_nsec -= remainder;
-       tk->xtime_nsec += 1ULL << tk->shift;
+       remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
+       tk->tkr.xtime_nsec -= remainder;
+       tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
        tk->ntp_error += remainder << tk->ntp_error_shift;
-       tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
+       tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
 }
 #else
 #define old_vsyscall_fixup(tk)
        nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
        nsec *= NSEC_PER_SEC;
        nsec += tk->wall_to_monotonic.tv_nsec;
-       tk->base_mono = ns_to_ktime(nsec);
+       tk->tkr.base_mono = ns_to_ktime(nsec);
 
        /* Update the monotonic raw base */
        tk->base_raw = timespec64_to_ktime(tk->raw_time);
  */
 static void timekeeping_forward_now(struct timekeeper *tk)
 {
-       struct clocksource *clock = tk->clock;
+       struct clocksource *clock = tk->tkr.clock;
        cycle_t cycle_now, delta;
        s64 nsec;
 
-       cycle_now = tk->read(clock);
-       delta = clocksource_delta(cycle_now, tk->cycle_last, tk->mask);
-       tk->cycle_last = cycle_now;
+       cycle_now = tk->tkr.read(clock);
+       delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
+       tk->tkr.cycle_last = cycle_now;
 
-       tk->xtime_nsec += delta * tk->mult;
+       tk->tkr.xtime_nsec += delta * tk->tkr.mult;
 
        /* If arch requires, add in get_arch_timeoffset() */
-       tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
+       tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
 
        tk_normalize_xtime(tk);
 
 
        do {
                seq = read_seqcount_begin(&tk_core.seq);
-               base = tk->base_mono;
+               base = tk->tkr.base_mono;
                nsecs = timekeeping_get_ns(tk);
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
        do {
                seq = read_seqcount_begin(&tk_core.seq);
-               base = ktime_add(tk->base_mono, *offset);
+               base = ktime_add(tk->tkr.base_mono, *offset);
                nsecs = timekeeping_get_ns(tk);
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
         */
        if (try_module_get(new->owner)) {
                if (!new->enable || new->enable(new) == 0) {
-                       old = tk->clock;
+                       old = tk->tkr.clock;
                        tk_setup_internals(tk, new);
                        if (old->disable)
                                old->disable(old);
 {
        struct timekeeper *tk = &tk_core.timekeeper;
 
-       if (tk->clock == clock)
+       if (tk->tkr.clock == clock)
                return 0;
        stop_machine(change_clocksource, clock, NULL);
        tick_clock_notify();
-       return tk->clock == clock ? 0 : -1;
+       return tk->tkr.clock == clock ? 0 : -1;
 }
 
 /**
        do {
                seq = read_seqcount_begin(&tk_core.seq);
 
-               ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+               ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
        do {
                seq = read_seqcount_begin(&tk_core.seq);
 
-               ret = tk->clock->max_idle_ns;
+               ret = tk->tkr.clock->max_idle_ns;
 
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
 static void timekeeping_resume(void)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
-       struct clocksource *clock = tk->clock;
+       struct clocksource *clock = tk->tkr.clock;
        unsigned long flags;
        struct timespec64 ts_new, ts_delta;
        struct timespec tmp;
         * The less preferred source will only be tried if there is no better
         * usable source. The rtc part is handled separately in rtc core code.
         */
-       cycle_now = tk->read(clock);
+       cycle_now = tk->tkr.read(clock);
        if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
-               cycle_now > tk->cycle_last) {
+               cycle_now > tk->tkr.cycle_last) {
                u64 num, max = ULLONG_MAX;
                u32 mult = clock->mult;
                u32 shift = clock->shift;
                s64 nsec = 0;
 
-               cycle_delta = clocksource_delta(cycle_now, tk->cycle_last,
-                                               tk->mask);
+               cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
+                                               tk->tkr.mask);
 
                /*
                 * "cycle_delta * mutl" may cause 64 bits overflow, if the
                __timekeeping_inject_sleeptime(tk, &ts_delta);
 
        /* Re-base the last cycle value */
-       tk->cycle_last = cycle_now;
+       tk->tkr.cycle_last = cycle_now;
        tk->ntp_error = 0;
        timekeeping_suspended = 0;
        timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
                }
        }
 
-       if (unlikely(tk->clock->maxadj &&
-               (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
+       if (unlikely(tk->tkr.clock->maxadj &&
+               (tk->tkr.mult + adj > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
                printk_deferred_once(KERN_WARNING
                        "Adjusting %s more than 11%% (%ld vs %ld)\n",
-                       tk->clock->name, (long)tk->mult + adj,
-                       (long)tk->clock->mult + tk->clock->maxadj);
+                       tk->tkr.clock->name, (long)tk->tkr.mult + adj,
+                       (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
        }
        /*
         * So the following can be confusing.
         *
         * XXX - TODO: Doc ntp_error calculation.
         */
-       tk->mult += adj;
+       tk->tkr.mult += adj;
        tk->xtime_interval += interval;
-       tk->xtime_nsec -= offset;
+       tk->tkr.xtime_nsec -= offset;
        tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
 
 out_adjust:
         * We'll correct this error next time through this function, when
         * xtime_nsec is not as small.
         */
-       if (unlikely((s64)tk->xtime_nsec < 0)) {
-               s64 neg = -(s64)tk->xtime_nsec;
-               tk->xtime_nsec = 0;
+       if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
+               s64 neg = -(s64)tk->tkr.xtime_nsec;
+               tk->tkr.xtime_nsec = 0;
                tk->ntp_error += neg << tk->ntp_error_shift;
        }
 
  */
 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
 {
-       u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
+       u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
        unsigned int clock_set = 0;
 
-       while (tk->xtime_nsec >= nsecps) {
+       while (tk->tkr.xtime_nsec >= nsecps) {
                int leap;
 
-               tk->xtime_nsec -= nsecps;
+               tk->tkr.xtime_nsec -= nsecps;
                tk->xtime_sec++;
 
                /* Figure out if its a leap sec and apply if needed */
 
        /* Accumulate one shifted interval */
        offset -= interval;
-       tk->cycle_last += interval;
+       tk->tkr.cycle_last += interval;
 
-       tk->xtime_nsec += tk->xtime_interval << shift;
+       tk->tkr.xtime_nsec += tk->xtime_interval << shift;
        *clock_set |= accumulate_nsecs_to_secs(tk);
 
        /* Accumulate raw time */
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
        offset = real_tk->cycle_interval;
 #else
-       offset = clocksource_delta(tk->read(tk->clock), tk->cycle_last,
-                                  tk->mask);
+       offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
+                                  tk->tkr.cycle_last, tk->tkr.mask);
 #endif
 
        /* Check if there's really nothing to do */
        do {
                seq = read_seqcount_begin(&tk_core.seq);
 
-               base = tk->base_mono;
-               nsecs = tk->xtime_nsec >> tk->shift;
+               base = tk->tkr.base_mono;
+               nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
 
                *offs_real = tk->offs_real;
                *offs_boot = tk->offs_boot;
        do {
                seq = read_seqcount_begin(&tk_core.seq);
 
-               base = tk->base_mono;
+               base = tk->tkr.base_mono;
                nsecs = timekeeping_get_ns(tk);
 
                *offs_real = tk->offs_real;