SCHED_FEAT_TREE_AVG             = 4,
        SCHED_FEAT_APPROX_AVG           = 8,
        SCHED_FEAT_WAKEUP_PREEMPT       = 16,
+       SCHED_FEAT_PREEMPT_RESTRICT     = 32,
 };
 
 const_debug unsigned int sysctl_sched_features =
                SCHED_FEAT_START_DEBIT          *1 |
                SCHED_FEAT_TREE_AVG             *0 |
                SCHED_FEAT_APPROX_AVG           *0 |
-               SCHED_FEAT_WAKEUP_PREEMPT       *1;
+               SCHED_FEAT_WAKEUP_PREEMPT       *1 |
+               SCHED_FEAT_PREEMPT_RESTRICT     *1;
 
 #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
 
 
 
        update_stats_dequeue(cfs_rq, se);
        if (sleep) {
+               se->peer_preempt = 0;
 #ifdef CONFIG_SCHEDSTATS
                if (entity_is_task(se)) {
                        struct task_struct *tsk = task_of(se);
 
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
-       if (delta_exec > ideal_runtime)
+       if (delta_exec > ideal_runtime ||
+                       (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt))
                resched_task(rq_of(cfs_rq)->curr);
+       curr->peer_preempt = 0;
 }
 
 static void
                if (unlikely(se->load.weight != NICE_0_LOAD))
                        gran = calc_delta_fair(gran, &se->load);
 
-               if (delta > gran)
-                       resched_task(curr);
+               if (delta > gran) {
+                       int now = !sched_feat(PREEMPT_RESTRICT);
+
+                       if (now || p->prio < curr->prio || !se->peer_preempt++)
+                               resched_task(curr);
+               }
        }
 }
 
        check_spread(cfs_rq, curr);
        __enqueue_entity(cfs_rq, se);
        account_entity_enqueue(cfs_rq, se);
+       se->peer_preempt = 0;
        resched_task(rq->curr);
 }