static u64 read_internal_timer(const struct cyclecounter *cc)
 {
-       struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
+       struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
+       struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
        struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
                                                  clock);
 
 {
        struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
        struct mlx5_clock *clock = &mdev->clock;
+       struct mlx5_timer *timer;
        u32 sign;
 
        if (!clock_info)
        smp_store_mb(clock_info->sign,
                     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
 
-       clock_info->cycles = clock->tc.cycle_last;
-       clock_info->mult   = clock->cycles.mult;
-       clock_info->nsec   = clock->tc.nsec;
-       clock_info->frac   = clock->tc.frac;
+       timer = &clock->timer;
+       clock_info->cycles = timer->tc.cycle_last;
+       clock_info->mult   = timer->cycles.mult;
+       clock_info->nsec   = timer->tc.nsec;
+       clock_info->frac   = timer->tc.frac;
 
        smp_store_release(&clock_info->sign,
                          sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
 {
        struct delayed_work *dwork = to_delayed_work(work);
        struct mlx5_core_dev *mdev;
+       struct mlx5_timer *timer;
        struct mlx5_clock *clock;
        unsigned long flags;
 
-       clock = container_of(dwork, struct mlx5_clock, overflow_work);
+       timer = container_of(dwork, struct mlx5_timer, overflow_work);
+       clock = container_of(timer, struct mlx5_clock, timer);
        mdev = container_of(clock, struct mlx5_core_dev, clock);
+
        write_seqlock_irqsave(&clock->lock, flags);
-       timecounter_read(&clock->tc);
+       timecounter_read(&timer->tc);
        mlx5_update_clock_info_page(mdev);
        write_sequnlock_irqrestore(&clock->lock, flags);
-       schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
+       schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
 }
 
 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
 {
        struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+       struct mlx5_timer *timer = &clock->timer;
        u64 ns = timespec64_to_ns(ts);
        struct mlx5_core_dev *mdev;
        unsigned long flags;
 
        mdev = container_of(clock, struct mlx5_core_dev, clock);
        write_seqlock_irqsave(&clock->lock, flags);
-       timecounter_init(&clock->tc, &clock->cycles, ns);
+       timecounter_init(&timer->tc, &timer->cycles, ns);
        mlx5_update_clock_info_page(mdev);
        write_sequnlock_irqrestore(&clock->lock, flags);
 
                             struct ptp_system_timestamp *sts)
 {
        struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+       struct mlx5_timer *timer = &clock->timer;
        struct mlx5_core_dev *mdev;
        unsigned long flags;
        u64 cycles, ns;
        mdev = container_of(clock, struct mlx5_core_dev, clock);
        write_seqlock_irqsave(&clock->lock, flags);
        cycles = mlx5_read_internal_timer(mdev, sts);
-       ns = timecounter_cyc2time(&clock->tc, cycles);
+       ns = timecounter_cyc2time(&timer->tc, cycles);
        write_sequnlock_irqrestore(&clock->lock, flags);
 
        *ts = ns_to_timespec64(ns);
 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+       struct mlx5_timer *timer = &clock->timer;
        struct mlx5_core_dev *mdev;
        unsigned long flags;
 
        mdev = container_of(clock, struct mlx5_core_dev, clock);
        write_seqlock_irqsave(&clock->lock, flags);
-       timecounter_adjtime(&clock->tc, delta);
+       timecounter_adjtime(&timer->tc, delta);
        mlx5_update_clock_info_page(mdev);
        write_sequnlock_irqrestore(&clock->lock, flags);
 
 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 {
        struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+       struct mlx5_timer *timer = &clock->timer;
        struct mlx5_core_dev *mdev;
        unsigned long flags;
        int neg_adj = 0;
        u32 diff;
        u64 adj;
 
-
        if (delta < 0) {
                neg_adj = 1;
                delta = -delta;
        }
 
-       adj = clock->nominal_c_mult;
+       adj = timer->nominal_c_mult;
        adj *= delta;
        diff = div_u64(adj, 1000000000ULL);
 
        mdev = container_of(clock, struct mlx5_core_dev, clock);
        write_seqlock_irqsave(&clock->lock, flags);
-       timecounter_read(&clock->tc);
-       clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
-                                      clock->nominal_c_mult + diff;
+       timecounter_read(&timer->tc);
+       timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
+                                      timer->nominal_c_mult + diff;
        mlx5_update_clock_info_page(mdev);
        write_sequnlock_irqrestore(&clock->lock, flags);
 
                        container_of(ptp, struct mlx5_clock, ptp_info);
        struct mlx5_core_dev *mdev =
                        container_of(clock, struct mlx5_core_dev, clock);
+       struct mlx5_timer *timer = &clock->timer;
        u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
        u64 nsec_now, nsec_delta, time_stamp = 0;
        u64 cycles_now, cycles_delta;
                ns = timespec64_to_ns(&ts);
                cycles_now = mlx5_read_internal_timer(mdev, NULL);
                write_seqlock_irqsave(&clock->lock, flags);
-               nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
+               nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
                nsec_delta = ns - nsec_now;
-               cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
-                                        clock->cycles.mult);
+               cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
+                                        timer->cycles.mult);
                write_sequnlock_irqrestore(&clock->lock, flags);
                time_stamp = cycles_now + cycles_delta;
                field_select = MLX5_MTPPS_FS_PIN_MODE |
                          unsigned long type, void *data)
 {
        struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
+       struct mlx5_timer *timer = &clock->timer;
        struct ptp_clock_event ptp_event;
        u64 cycles_now, cycles_delta;
        u64 nsec_now, nsec_delta, ns;
                ts.tv_nsec = 0;
                ns = timespec64_to_ns(&ts);
                write_seqlock_irqsave(&clock->lock, flags);
-               nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
+               nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
                nsec_delta = ns - nsec_now;
-               cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
-                                        clock->cycles.mult);
+               cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
+                                        timer->cycles.mult);
                clock->pps_info.start[pin] = cycles_now + cycles_delta;
                write_sequnlock_irqrestore(&clock->lock, flags);
                schedule_work(&clock->pps_info.out_work);
 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
 {
        struct mlx5_clock *clock = &mdev->clock;
+       struct mlx5_timer *timer = &clock->timer;
        u32 dev_freq;
 
        dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
-       clock->cycles.read = read_internal_timer;
-       clock->cycles.shift = MLX5_CYCLES_SHIFT;
-       clock->cycles.mult = clocksource_khz2mult(dev_freq,
-                                                 clock->cycles.shift);
-       clock->nominal_c_mult = clock->cycles.mult;
-       clock->cycles.mask = CLOCKSOURCE_MASK(41);
-
-       timecounter_init(&clock->tc, &clock->cycles,
+       timer->cycles.read = read_internal_timer;
+       timer->cycles.shift = MLX5_CYCLES_SHIFT;
+       timer->cycles.mult = clocksource_khz2mult(dev_freq,
+                                                 timer->cycles.shift);
+       timer->nominal_c_mult = timer->cycles.mult;
+       timer->cycles.mask = CLOCKSOURCE_MASK(41);
+
+       timecounter_init(&timer->tc, &timer->cycles,
                         ktime_to_ns(ktime_get_real()));
 }
 
 {
        struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
        struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+       struct mlx5_timer *timer = &clock->timer;
        u64 overflow_cycles;
        u64 frac = 0;
        u64 ns;
         * multiplied by clock multiplier where the result doesn't exceed
         * 64bits.
         */
-       overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
-       overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
+       overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
+       overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
 
-       ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
+       ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
                                 frac, &frac);
        do_div(ns, NSEC_PER_SEC / HZ);
-       clock->overflow_period = ns;
+       timer->overflow_period = ns;
 
-       INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
-       if (clock->overflow_period)
-               schedule_delayed_work(&clock->overflow_work, 0);
+       INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
+       if (timer->overflow_period)
+               schedule_delayed_work(&timer->overflow_work, 0);
        else
                mlx5_core_warn(mdev,
                               "invalid overflow period, overflow_work is not scheduled\n");
 
        if (clock_info)
-               clock_info->overflow_period = clock->overflow_period;
+               clock_info->overflow_period = timer->overflow_period;
 }
 
 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
 {
        struct mlx5_clock *clock = &mdev->clock;
        struct mlx5_ib_clock_info *info;
+       struct mlx5_timer *timer;
 
        mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
        if (!mdev->clock_info) {
        }
 
        info = mdev->clock_info;
-
-       info->nsec = clock->tc.nsec;
-       info->cycles = clock->tc.cycle_last;
-       info->mask = clock->cycles.mask;
-       info->mult = clock->nominal_c_mult;
-       info->shift = clock->cycles.shift;
-       info->frac = clock->tc.frac;
+       timer = &clock->timer;
+
+       info->nsec = timer->tc.nsec;
+       info->cycles = timer->tc.cycle_last;
+       info->mask = timer->cycles.mask;
+       info->mult = timer->nominal_c_mult;
+       info->shift = timer->cycles.shift;
+       info->frac = timer->tc.frac;
 }
 
 void mlx5_init_clock(struct mlx5_core_dev *mdev)
        }
 
        cancel_work_sync(&clock->pps_info.out_work);
-       cancel_delayed_work_sync(&clock->overflow_work);
+       cancel_delayed_work_sync(&clock->timer.overflow_work);
 
        if (mdev->clock_info) {
                free_page((unsigned long)mdev->clock_info);