if (!bfqg_stats_waiting(stats))
                return;
 
-       now = ktime_get_ns();
+       now = blk_time_get_ns();
        if (now > stats->start_group_wait_time)
                bfq_stat_add(&stats->group_wait_time,
                              now - stats->start_group_wait_time);
                return;
        if (bfqg == curr_bfqg)
                return;
-       stats->start_group_wait_time = ktime_get_ns();
+       stats->start_group_wait_time = blk_time_get_ns();
        bfqg_stats_mark_waiting(stats);
 }
 
        if (!bfqg_stats_empty(stats))
                return;
 
-       now = ktime_get_ns();
+       now = blk_time_get_ns();
        if (now > stats->start_empty_time)
                bfq_stat_add(&stats->empty_time,
                              now - stats->start_empty_time);
        if (bfqg_stats_empty(stats))
                return;
 
-       stats->start_empty_time = ktime_get_ns();
+       stats->start_empty_time = blk_time_get_ns();
        bfqg_stats_mark_empty(stats);
 }
 
        struct bfqg_stats *stats = &bfqg->stats;
 
        if (bfqg_stats_idling(stats)) {
-               u64 now = ktime_get_ns();
+               u64 now = blk_time_get_ns();
 
                if (now > stats->start_idle_time)
                        bfq_stat_add(&stats->idle_time,
 {
        struct bfqg_stats *stats = &bfqg->stats;
 
-       stats->start_idle_time = ktime_get_ns();
+       stats->start_idle_time = blk_time_get_ns();
        bfqg_stats_mark_idling(stats);
 }
 
                                  u64 io_start_time_ns, blk_opf_t opf)
 {
        struct bfqg_stats *stats = &bfqg->stats;
-       u64 now = ktime_get_ns();
+       u64 now = blk_time_get_ns();
 
        if (now > io_start_time_ns)
                blkg_rwstat_add(&stats->service_time, opf,
 
 
        rq = rq_entry_fifo(bfqq->fifo.next);
 
-       if (rq == last || ktime_get_ns() < rq->fifo_time)
+       if (rq == last || blk_time_get_ns() < rq->fifo_time)
                return NULL;
 
        bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
                 * bfq_bfqq_update_budg_for_activation for
                 * details on the usage of the next variable.
                 */
-               arrived_in_time =  ktime_get_ns() <=
+               arrived_in_time =  blk_time_get_ns() <=
                        bfqq->ttime.last_end_request +
                        bfqd->bfq_slice_idle * 3;
        unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
        struct request *next_rq, *prev;
        unsigned int old_wr_coeff = bfqq->wr_coeff;
        bool interactive = false;
-       u64 now_ns = ktime_get_ns();
+       u64 now_ns = blk_time_get_ns();
 
        bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
        bfqq->queued[rq_is_sync(rq)]++;
                      bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) &&
                    time_is_before_eq_jiffies(bfqq->decrease_time_jif +
                                              msecs_to_jiffies(10))) {
-                       bfqd->last_empty_occupied_ns = ktime_get_ns();
+                       bfqd->last_empty_occupied_ns = blk_time_get_ns();
                        /*
                         * Start the state machine for measuring the
                         * total service time of rq: setting
        else
                timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
 
-       bfqd->last_budget_start = ktime_get();
+       bfqd->last_budget_start = blk_time_get();
 
        bfqq->budget_timeout = jiffies +
                bfqd->bfq_timeout * timeout_coeff;
        else if (bfqq->wr_coeff > 1)
                sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
 
-       bfqd->last_idling_start = ktime_get();
+       bfqd->last_idling_start = blk_time_get();
        bfqd->last_idling_start_jiffies = jiffies;
 
        hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
                                       struct request *rq)
 {
        if (rq != NULL) { /* new rq dispatch now, reset accordingly */
-               bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
+               bfqd->last_dispatch = bfqd->first_dispatch = blk_time_get_ns();
                bfqd->peak_rate_samples = 1;
                bfqd->sequential_samples = 0;
                bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
  */
 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
 {
-       u64 now_ns = ktime_get_ns();
+       u64 now_ns = blk_time_get_ns();
 
        if (bfqd->peak_rate_samples == 0) { /* first dispatch */
                bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
        if (compensate)
                delta_ktime = bfqd->last_idling_start;
        else
-               delta_ktime = ktime_get();
+               delta_ktime = blk_time_get();
        delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
        delta_usecs = ktime_to_us(delta_ktime);
 
                          struct bfq_io_cq *bic, pid_t pid, int is_sync,
                          unsigned int act_idx)
 {
-       u64 now_ns = ktime_get_ns();
+       u64 now_ns = blk_time_get_ns();
 
        bfqq->actuator_idx = act_idx;
        RB_CLEAR_NODE(&bfqq->entity.rb_node);
         */
        if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
                return;
-       elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
+       elapsed = blk_time_get_ns() - bfqq->ttime.last_end_request;
        elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
 
        ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
        bfq_add_request(rq);
        idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
 
-       rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
+       rq->fifo_time = blk_time_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
        list_add_tail(&rq->queuelist, &bfqq->fifo);
 
        bfq_rq_enqueued(bfqd, bfqq, rq);
                bfq_weights_tree_remove(bfqq);
        }
 
-       now_ns = ktime_get_ns();
+       now_ns = blk_time_get_ns();
 
        bfqq->ttime.last_end_request = now_ns;
 
 static void bfq_update_inject_limit(struct bfq_data *bfqd,
                                    struct bfq_queue *bfqq)
 {
-       u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
+       u64 tot_time_ns = blk_time_get_ns() - bfqd->last_empty_occupied_ns;
        unsigned int old_limit = bfqq->inject_limit;
 
        if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
 
 {
        unsigned long pflags;
        bool clamp;
-       u64 now = ktime_to_ns(ktime_get());
+       u64 now = blk_time_get_ns();
        u64 exp;
        u64 delay_nsec = 0;
        int tok;
 
        part_stat_lock();
        part_stat_inc(part, ios[STAT_FLUSH]);
        part_stat_add(part, nsecs[STAT_FLUSH],
-                     ktime_get_ns() - rq->start_time_ns);
+                     blk_time_get_ns() - rq->start_time_ns);
        part_stat_unlock();
 }
 
 
 
        /* step up/down based on the vrate */
        vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
-       now_ns = ktime_get_ns();
+       now_ns = blk_time_get_ns();
 
        if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
                if (!ioc->autop_too_fast_at)
        unsigned seq;
        u64 vrate;
 
-       now->now_ns = ktime_get();
+       now->now_ns = blk_time_get_ns();
        now->now = ktime_to_us(now->now_ns);
        vrate = atomic64_read(&ioc->vtime_rate);
 
                return;
        }
 
-       on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
+       on_q_ns = blk_time_get_ns() - rq->alloc_time_ns;
        rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
        size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
 
        ioc->vtime_base_rate = VTIME_PER_USEC;
        atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
        seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
-       ioc->period_at = ktime_to_us(ktime_get());
+       ioc->period_at = ktime_to_us(blk_time_get());
        atomic64_set(&ioc->cur_period, 0);
        atomic_set(&ioc->hweight_gen, 0);
 
 
        if (!iolat->blkiolat->enabled)
                return;
 
-       now = ktime_to_ns(ktime_get());
+       now = blk_time_get_ns();
        while (blkg && blkg->parent) {
                iolat = blkg_to_lat(blkg);
                if (!iolat) {
        struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
        struct blkcg_gq *blkg;
        struct cgroup_subsys_state *pos_css;
-       u64 now = ktime_to_ns(ktime_get());
+       u64 now = blk_time_get_ns();
 
        rcu_read_lock();
        blkg_for_each_descendant_pre(blkg, pos_css,
        struct blkcg_gq *blkg = lat_to_blkg(iolat);
        struct rq_qos *rqos = iolat_rq_qos(blkg->q);
        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
-       u64 now = ktime_to_ns(ktime_get());
+       u64 now = blk_time_get_ns();
        int cpu;
 
        if (blk_queue_nonrot(blkg->q))
 
        RB_CLEAR_NODE(&rq->rb_node);
        rq->tag = BLK_MQ_NO_TAG;
        rq->internal_tag = BLK_MQ_NO_TAG;
-       rq->start_time_ns = ktime_get_ns();
+       rq->start_time_ns = blk_time_get_ns();
        rq->part = NULL;
        blk_crypto_rq_set_defaults(rq);
 }
 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
 {
        if (blk_mq_need_time_stamp(rq))
-               rq->start_time_ns = ktime_get_ns();
+               rq->start_time_ns = blk_time_get_ns();
        else
                rq->start_time_ns = 0;
 
 
        /* alloc_time includes depth and tag waits */
        if (blk_queue_rq_alloc_time(q))
-               alloc_time_ns = ktime_get_ns();
+               alloc_time_ns = blk_time_get_ns();
 
        if (data->cmd_flags & REQ_NOWAIT)
                data->flags |= BLK_MQ_REQ_NOWAIT;
 
        /* alloc_time includes depth and tag waits */
        if (blk_queue_rq_alloc_time(q))
-               alloc_time_ns = ktime_get_ns();
+               alloc_time_ns = blk_time_get_ns();
 
        /*
         * If the tag allocator sleeps we could get an allocation for a
 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 {
        if (blk_mq_need_time_stamp(rq))
-               __blk_mq_end_request_acct(rq, ktime_get_ns());
+               __blk_mq_end_request_acct(rq, blk_time_get_ns());
 
        blk_mq_finish_request(rq);
 
        u64 now = 0;
 
        if (iob->need_ts)
-               now = ktime_get_ns();
+               now = blk_time_get_ns();
 
        while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
                prefetch(rq->bio);
 
        if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
            !blk_rq_is_passthrough(rq)) {
-               rq->io_start_time_ns = ktime_get_ns();
+               rq->io_start_time_ns = blk_time_get_ns();
                rq->stats_sectors = blk_rq_sectors(rq);
                rq->rq_flags |= RQF_STATS;
                rq_qos_issue(q, rq);
        blk_mq_run_dispatch_ops(q,
                        ret = blk_mq_request_issue_directly(rq, true));
        if (ret)
-               blk_account_io_done(rq, ktime_get_ns());
+               blk_account_io_done(rq, blk_time_get_ns());
        return ret;
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
        time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
        ret = tg->latency_target == DFL_LATENCY_TARGET ||
              tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
-             (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
+             (blk_time_get_ns() >> 10) - tg->last_finish_time > time ||
              tg->avg_idletime > tg->idletime_threshold ||
              (tg->latency_target && tg->bio_cnt &&
                tg->bad_bio_cnt * 5 < tg->bio_cnt);
        if (last_finish_time == 0)
                return;
 
-       now = ktime_get_ns() >> 10;
+       now = blk_time_get_ns() >> 10;
        if (now <= last_finish_time ||
            last_finish_time == tg->checked_last_finish_time)
                return;
        if (!tg->td->limit_valid[LIMIT_LOW])
                return;
 
-       finish_time_ns = ktime_get_ns();
+       finish_time_ns = blk_time_get_ns();
        tg->last_finish_time = finish_time_ns >> 10;
 
        start_time = bio_issue_time(&bio->bi_issue) >> 10;
 
 #include "blk-wbt.h"
 #include "blk-rq-qos.h"
 #include "elevator.h"
+#include "blk.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/wbt.h>
 
 static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
 {
-       u64 now, issue = READ_ONCE(rwb->sync_issue);
+       u64 issue = READ_ONCE(rwb->sync_issue);
 
        if (!issue || !rwb->sync_cookie)
                return 0;
 
-       now = ktime_to_ns(ktime_get());
-       return now - issue;
+       return blk_time_get_ns() - issue;
 }
 
 static inline unsigned int wbt_inflight(struct rq_wb *rwb)
 
 
 #include <linux/blk-crypto.h>
 #include <linux/memblock.h>    /* for max_pfn/max_low_pfn */
+#include <linux/timekeeping.h>
 #include <xen/xen.h>
 #include "blk-crypto-internal.h"
 
        return atomic_read(&req->ref);
 }
 
+static inline u64 blk_time_get_ns(void)
+{
+       return ktime_get_ns();
+}
+
+static inline ktime_t blk_time_get(void)
+{
+       return ns_to_ktime(blk_time_get_ns());
+}
+
 /*
  * From most significant bit:
  * 1 bit: reserved for other usage, see below
 {
        size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
        issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
-                       (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
+                       (blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
                        ((u64)size << BIO_ISSUE_SIZE_SHIFT));
 }