#ifdef CONFIG_SCHED_DEBUG
 extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_nr_latency;
+extern unsigned int sysctl_sched_min_granularity;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_batch_wakeup_granularity;
 extern unsigned int sysctl_sched_child_runs_first;
 extern unsigned int sysctl_sched_features;
 extern unsigned int sysctl_sched_migration_cost;
+
+int sched_nr_latency_handler(struct ctl_table *table, int write,
+               struct file *file, void __user *buffer, size_t *length,
+               loff_t *ppos);
 #endif
 
 extern unsigned int sysctl_sched_compat_yield;
 
 #define PN(x) \
        SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
        PN(sysctl_sched_latency);
-       PN(sysctl_sched_nr_latency);
+       PN(sysctl_sched_min_granularity);
        PN(sysctl_sched_wakeup_granularity);
        PN(sysctl_sched_batch_wakeup_granularity);
        PN(sysctl_sched_child_runs_first);
 
 const_debug unsigned int sysctl_sched_latency = 20000000ULL;
 
 /*
- * After fork, child runs first. (default) If set to 0 then
- * parent will (try to) run first.
+ * Minimal preemption granularity for CPU-bound tasks:
+ * (default: 1 msec, units: nanoseconds)
  */
-const_debug unsigned int sysctl_sched_child_runs_first = 1;
+const_debug unsigned int sysctl_sched_min_granularity = 1000000ULL;
 
 /*
- * Minimal preemption granularity for CPU-bound tasks:
- * (default: 2 msec, units: nanoseconds)
+ * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  */
-const_debug unsigned int sysctl_sched_nr_latency = 20;
+const_debug unsigned int sched_nr_latency = 20;
+
+/*
+ * After fork, child runs first. (default) If set to 0 then
+ * parent will (try to) run first.
+ */
+const_debug unsigned int sysctl_sched_child_runs_first = 1;
 
 /*
  * sys_sched_yield() compat mode
  * Scheduling class statistics methods:
  */
 
+#ifdef CONFIG_SCHED_DEBUG
+int sched_nr_latency_handler(struct ctl_table *table, int write,
+               struct file *filp, void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+
+       if (ret || !write)
+               return ret;
+
+       sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
+                                       sysctl_sched_min_granularity);
+
+       return 0;
+}
+#endif
 
 /*
  * The idea is to set a period in which each task runs once.
 static u64 __sched_period(unsigned long nr_running)
 {
        u64 period = sysctl_sched_latency;
-       unsigned long nr_latency = sysctl_sched_nr_latency;
+       unsigned long nr_latency = sched_nr_latency;
 
        if (unlikely(nr_running > nr_latency)) {
                period *= nr_running;
 
 #ifdef CONFIG_SCHED_DEBUG
        {
                .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "sched_nr_latency",
-               .data           = &sysctl_sched_nr_latency,
+               .procname       = "sched_min_granularity_ns",
+               .data           = &sysctl_sched_min_granularity,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = &proc_dointvec,
+               .proc_handler   = &sched_nr_latency_handler,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &min_sched_granularity_ns,
+               .extra2         = &max_sched_granularity_ns,
        },
        {
                .ctl_name       = CTL_UNNUMBERED,
                .data           = &sysctl_sched_latency,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
+               .proc_handler   = &sched_nr_latency_handler,
                .strategy       = &sysctl_intvec,
                .extra1         = &min_sched_granularity_ns,
                .extra2         = &max_sched_granularity_ns,