return NULL;
 }
+EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
 
 /*
  * If @new_blkg is %NULL, this function tries to allocate a new one as
                        return blkg;
        }
 }
-EXPORT_SYMBOL_GPL(blkg_lookup_create);
 
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
 
         */
        create_io_context(GFP_ATOMIC, q->node);
 
-       if (blk_throtl_bio(q, bio))
-               return false;   /* throttled, will be resubmitted later */
+       if (!blkcg_bio_issue_check(q, bio))
+               return false;
 
        trace_block_bio_queue(q, bio);
        return true;
 
        return pd_to_blkg(&tg->pd);
 }
 
-static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
-{
-       return blkg_to_tg(td->queue->root_blkg);
-}
-
 /**
  * sq_to_tg - return the throl_grp the specified service queue belongs to
  * @sq: the throtl_service_queue of interest
        }
 }
 
-static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
-                                          struct blkcg *blkcg)
-{
-       return blkg_to_tg(blkg_lookup(blkcg, td->queue));
-}
-
-static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
-                                                 struct blkcg *blkcg)
-{
-       struct request_queue *q = td->queue;
-       struct throtl_grp *tg = NULL;
-
-       /*
-        * This is the common case when there are no blkcgs.  Avoid lookup
-        * in this case
-        */
-       if (blkcg == &blkcg_root) {
-               tg = td_root_tg(td);
-       } else {
-               struct blkcg_gq *blkg;
-
-               blkg = blkg_lookup_create(blkcg, q);
-
-               /* if %NULL and @q is alive, fall back to root_tg */
-               if (!IS_ERR(blkg))
-                       tg = blkg_to_tg(blkg);
-               else
-                       tg = td_root_tg(td);
-       }
-
-       return tg;
-}
-
 static struct throtl_grp *
 throtl_rb_first(struct throtl_service_queue *parent_sq)
 {
        .pd_reset_stats_fn      = throtl_pd_reset_stats,
 };
 
-bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
+bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+                   struct bio *bio)
 {
-       struct throtl_data *td = q->td;
        struct throtl_qnode *qn = NULL;
-       struct throtl_grp *tg;
+       struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
        struct throtl_service_queue *sq;
        bool rw = bio_data_dir(bio);
-       struct blkcg *blkcg;
        bool throttled = false;
 
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
        /* see throtl_charge_bio() */
-       if (bio->bi_rw & REQ_THROTTLED)
+       if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
                goto out;
 
-       /*
-        * A throtl_grp pointer retrieved under rcu can be used to access
-        * basic fields like stats and io rates. If a group has no rules,
-        * just update the dispatch stats in lockless manner and return.
-        */
-       rcu_read_lock();
-       blkcg = bio_blkcg(bio);
-       tg = throtl_lookup_tg(td, blkcg);
-       if (tg) {
-               if (!tg->has_rules[rw]) {
-                       throtl_update_dispatch_stats(tg_to_blkg(tg),
-                                       bio->bi_iter.bi_size, bio->bi_rw);
-                       goto out_unlock_rcu;
-               }
-       }
-
-       /*
-        * Either group has not been allocated yet or it is not an unlimited
-        * IO group
-        */
        spin_lock_irq(q->queue_lock);
 
        if (unlikely(blk_queue_bypass(q)))
                goto out_unlock;
 
-       tg = throtl_lookup_create_tg(td, blkcg);
        sq = &tg->service_queue;
 
        while (true) {
 
 out_unlock:
        spin_unlock_irq(q->queue_lock);
-out_unlock_rcu:
-       rcu_read_unlock();
 out:
        /*
         * As multiple blk-throtls may stack in the same issue path, we
 
  * Internal throttling interface
  */
 #ifdef CONFIG_BLK_DEV_THROTTLING
-extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
 extern void blk_throtl_drain(struct request_queue *q);
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
 #else /* CONFIG_BLK_DEV_THROTTLING */
-static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
-{
-       return false;
-}
 static inline void blk_throtl_drain(struct request_queue *q) { }
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline void blk_throtl_exit(struct request_queue *q) { }
 
        cfqg_stats_reset(&cfqg->dead_stats);
 }
 
-/*
- * Search for the cfq group current task belongs to. request_queue lock must
- * be held.
- */
-static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
-                                               struct blkcg *blkcg)
+static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
+                                        struct blkcg *blkcg)
 {
-       struct request_queue *q = cfqd->queue;
-       struct cfq_group *cfqg = NULL;
-
-       /* avoid lookup for the common case where there's no blkcg */
-       if (blkcg == &blkcg_root) {
-               cfqg = cfqd->root_group;
-       } else {
-               struct blkcg_gq *blkg;
-
-               blkg = blkg_lookup_create(blkcg, q);
-               if (!IS_ERR(blkg))
-                       cfqg = blkg_to_cfqg(blkg);
-       }
+       struct blkcg_gq *blkg;
 
-       return cfqg;
+       blkg = blkg_lookup(blkcg, cfqd->queue);
+       if (likely(blkg))
+               return blkg_to_cfqg(blkg);
+       return NULL;
 }
 
 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
        { }     /* terminate */
 };
 #else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
-                                               struct blkcg *blkcg)
+static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
+                                        struct blkcg *blkcg)
 {
        return cfqd->root_group;
 }
        struct cfq_group *cfqg;
 
        rcu_read_lock();
-       cfqg = cfq_lookup_create_cfqg(cfqd, bio_blkcg(bio));
+       cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
        if (!cfqg) {
                cfqq = &cfqd->oom_cfqq;
                goto out;
 
         * or if either the blkcg or queue is going away.  Fall back to
         * root_rl in such cases.
         */
-       blkg = blkg_lookup_create(blkcg, q);
-       if (unlikely(IS_ERR(blkg)))
+       blkg = blkg_lookup(blkcg, q);
+       if (unlikely(!blkg))
                goto root_rl;
 
        blkg_get(blkg);
        u64_stats_update_end(&to->syncp);
 }
 
+#ifdef CONFIG_BLK_DEV_THROTTLING
+extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+                          struct bio *bio);
+#else
+static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
+                                 struct bio *bio) { return false; }
+#endif
+
+static inline bool blkcg_bio_issue_check(struct request_queue *q,
+                                        struct bio *bio)
+{
+       struct blkcg *blkcg;
+       struct blkcg_gq *blkg;
+       bool throtl = false;
+
+       rcu_read_lock();
+       blkcg = bio_blkcg(bio);
+
+       blkg = blkg_lookup(blkcg, q);
+       if (unlikely(!blkg)) {
+               spin_lock_irq(q->queue_lock);
+               blkg = blkg_lookup_create(blkcg, q);
+               if (IS_ERR(blkg))
+                       blkg = NULL;
+               spin_unlock_irq(q->queue_lock);
+       }
+
+       throtl = blk_throtl_bio(q, blkg, bio);
+
+       rcu_read_unlock();
+       return !throtl;
+}
+
 #else  /* CONFIG_BLK_CGROUP */
 
 struct blkcg {
 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
 
+static inline bool blkcg_bio_issue_check(struct request_queue *q,
+                                        struct bio *bio) { return true; }
+
 #define blk_queue_for_each_rl(rl, q)   \
        for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)