debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
        debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+       debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
        debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
 
        debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
        SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
        PN(sysctl_sched_latency);
        PN(sysctl_sched_min_granularity);
+       PN(sysctl_sched_idle_min_granularity);
        PN(sysctl_sched_wakeup_granularity);
        P(sysctl_sched_child_runs_first);
        P(sysctl_sched_features);
 
 unsigned int sysctl_sched_min_granularity                      = 750000ULL;
 static unsigned int normalized_sysctl_sched_min_granularity    = 750000ULL;
 
+/*
+ * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks.
+ * Applies only when SCHED_IDLE tasks compete with normal tasks.
+ *
+ * (default: 0.75 msec)
+ */
+unsigned int sysctl_sched_idle_min_granularity                 = 750000ULL;
+
 /*
  * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
  */
                return sysctl_sched_latency;
 }
 
+static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq);
+
 /*
  * We calculate the wall-time slice from the period by taking a part
  * proportional to the weight.
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        unsigned int nr_running = cfs_rq->nr_running;
+       struct sched_entity *init_se = se;
+       unsigned int min_gran;
        u64 slice;
 
        if (sched_feat(ALT_PERIOD))
        for_each_sched_entity(se) {
                struct load_weight *load;
                struct load_weight lw;
+               struct cfs_rq *qcfs_rq;
 
-               cfs_rq = cfs_rq_of(se);
-               load = &cfs_rq->load;
+               qcfs_rq = cfs_rq_of(se);
+               load = &qcfs_rq->load;
 
                if (unlikely(!se->on_rq)) {
-                       lw = cfs_rq->load;
+                       lw = qcfs_rq->load;
 
                        update_load_add(&lw, se->load.weight);
                        load = &lw;
                slice = __calc_delta(slice, se->load.weight, load);
        }
 
-       if (sched_feat(BASE_SLICE))
-               slice = max(slice, (u64)sysctl_sched_min_granularity);
+       if (sched_feat(BASE_SLICE)) {
+               if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq))
+                       min_gran = sysctl_sched_idle_min_granularity;
+               else
+                       min_gran = sysctl_sched_min_granularity;
+
+               slice = max_t(u64, slice, min_gran);
+       }
 
        return slice;
 }
 
 #ifdef CONFIG_SCHED_DEBUG
 extern unsigned int sysctl_sched_latency;
 extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_idle_min_granularity;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern int sysctl_resched_latency_warn_ms;
 extern int sysctl_resched_latency_warn_once;