void set_user_nice(struct task_struct *p, long nice)
 {
        bool queued, running;
-       int old_prio;
-       struct rq_flags rf;
        struct rq *rq;
+       int old_prio;
 
        if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
                return;
         * We have to be careful, if called from sys_setpriority(),
         * the task might be in the middle of scheduling on another CPU.
         */
-       rq = task_rq_lock(p, &rf);
+       CLASS(task_rq_lock, rq_guard)(p);
+       rq = rq_guard.rq;
+
        update_rq_clock(rq);
 
        /*
         */
        if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
                p->static_prio = NICE_TO_PRIO(nice);
-               goto out_unlock;
+               return;
        }
+
        queued = task_on_rq_queued(p);
        running = task_current(rq, p);
        if (queued)
         * lowered its priority, then reschedule its CPU:
         */
        p->sched_class->prio_changed(rq, p, old_prio);
-
-out_unlock:
-       task_rq_unlock(rq, p, &rf);
 }
 EXPORT_SYMBOL(set_user_nice);
 
 
        raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 }
 
+DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
+                   _T->rq = task_rq_lock(_T->lock, &_T->rf),
+                   task_rq_unlock(_T->rq, _T->lock, &_T->rf),
+                   struct rq *rq; struct rq_flags rf)
+
 static inline void
 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
        __acquires(rq->lock)