unsigned long last_check_time;
 
+       unsigned long last_dispatch_time[2];
+
        /* When did we start a new slice */
        unsigned long slice_start[2];
        unsigned long slice_end[2];
 
 static void throtl_pd_online(struct blkg_policy_data *pd)
 {
+       struct throtl_grp *tg = pd_to_tg(pd);
        /*
         * We don't want new groups to escape the limits of its ancestors.
         * Update has_rules[] after a new group is brought online.
         */
-       tg_update_has_rules(pd_to_tg(pd));
+       tg_update_has_rules(tg);
+       tg->last_dispatch_time[READ] = jiffies;
+       tg->last_dispatch_time[WRITE] = jiffies;
 }
 
 static void blk_throtl_update_limit_valid(struct throtl_data *td)
        if (write_limit && sq->nr_queued[WRITE] &&
            (!read_limit || sq->nr_queued[READ]))
                return true;
+
+       if (time_after_eq(jiffies,
+            tg->last_dispatch_time[READ] + tg->td->throtl_slice) &&
+           time_after_eq(jiffies,
+            tg->last_dispatch_time[WRITE] + tg->td->throtl_slice))
+               return true;
        return false;
 }
 
        struct throtl_data *td = tg->td;
        unsigned long now = jiffies;
 
+       if (time_after_eq(now, tg->last_dispatch_time[READ] +
+                                       td->throtl_slice) &&
+           time_after_eq(now, tg->last_dispatch_time[WRITE] +
+                                       td->throtl_slice))
+               return false;
        /*
         * If cgroup is below low limit, consider downgrade and throttle other
         * cgroups
 
 again:
        while (true) {
+               tg->last_dispatch_time[rw] = jiffies;
                if (tg->last_low_overflow_time[rw] == 0)
                        tg->last_low_overflow_time[rw] = jiffies;
                throtl_downgrade_check(tg);