bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
 
        /* If tg->bps = -1, then BW is unlimited */
-       if (bps_limit == U64_MAX && iops_limit == UINT_MAX) {
+       if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
+           tg->flags & THROTL_TG_CANCELING) {
                if (wait)
                        *wait = 0;
                return true;
        return false;
 }
 
+void blk_throtl_cancel_bios(struct request_queue *q)
+{
+       struct cgroup_subsys_state *pos_css;
+       struct blkcg_gq *blkg;
+
+       spin_lock_irq(&q->queue_lock);
+       /*
+        * queue_lock is held, rcu lock is not needed here technically.
+        * However, rcu lock is still held to emphasize that following
+        * path need RCU protection and to prevent warning from lockdep.
+        */
+       rcu_read_lock();
+       blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
+               struct throtl_grp *tg = blkg_to_tg(blkg);
+               struct throtl_service_queue *sq = &tg->service_queue;
+
+               /*
+                * Set the flag to make sure throtl_pending_timer_fn() won't
+                * stop until all throttled bios are dispatched.
+                */
+               blkg_to_tg(blkg)->flags |= THROTL_TG_CANCELING;
+               /*
+                * Update disptime after setting the above flag to make sure
+                * throtl_select_dispatch() won't exit without dispatching.
+                */
+               tg_update_disptime(tg);
+
+               throtl_schedule_pending_timer(sq, jiffies + 1);
+       }
+       rcu_read_unlock();
+       spin_unlock_irq(&q->queue_lock);
+}
+
 static bool throtl_can_upgrade(struct throtl_data *td,
        struct throtl_grp *this_tg)
 {
 
        THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
        THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
        THROTL_TG_HAS_IOPS_LIMIT = 1 << 2,      /* tg has iops limit */
+       THROTL_TG_CANCELING     = 1 << 3,       /* starts to cancel bio */
 };
 
 enum {
 static inline void blk_throtl_exit(struct request_queue *q) { }
 static inline void blk_throtl_register_queue(struct request_queue *q) { }
 static inline bool blk_throtl_bio(struct bio *bio) { return false; }
+static inline void blk_throtl_cancel_bios(struct request_queue *q) { }
 #else /* CONFIG_BLK_DEV_THROTTLING */
 int blk_throtl_init(struct request_queue *q);
 void blk_throtl_exit(struct request_queue *q);
 void blk_throtl_register_queue(struct request_queue *q);
 bool __blk_throtl_bio(struct bio *bio);
+void blk_throtl_cancel_bios(struct request_queue *q);
 static inline bool blk_throtl_bio(struct bio *bio)
 {
        struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
 
 #include <linux/pm_runtime.h>
 #include <linux/badblocks.h>
 #include <linux/part_stat.h>
+#include "blk-throttle.h"
 
 #include "blk.h"
 #include "blk-mq-sched.h"
 
        blk_mq_freeze_queue_wait(q);
 
+       blk_throtl_cancel_bios(disk->queue);
+
        blk_sync_queue(q);
        blk_flush_integrity();
        /*