{
        inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
                                  &tcp_keepalive_timer);
-       hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_TAI,
+       hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
                     HRTIMER_MODE_ABS_PINNED_SOFT);
        tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
 
 
 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
-       u64 now = ktime_get_tai_ns();
+       u64 now = ktime_get_ns();
        struct fq_flow_head *head;
        struct sk_buff *skb;
        struct fq_flow *f;
        q->fq_trees_log         = ilog2(1024);
        q->orphan_mask          = 1024 - 1;
        q->low_rate_threshold   = 550000 / 8;
-       qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_TAI);
+       qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
 
        if (opt)
                err = fq_change(sch, opt, extack);
        st.flows_plimit           = q->stat_flows_plimit;
        st.pkts_too_long          = q->stat_pkts_too_long;
        st.allocation_errors      = q->stat_allocation_errors;
-       st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_tai_ns();
+       st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
        st.flows                  = q->flows;
        st.inactive_flows         = q->inactive_flows;
        st.throttled_flows        = q->throttled_flows;