]> www.infradead.org Git - users/willy/xarray.git/commitdiff
Merge branch 'for-6.15-fixes' into for-6.16
authorTejun Heo <tj@kernel.org>
Tue, 22 Apr 2025 19:29:23 +0000 (09:29 -1000)
committerTejun Heo <tj@kernel.org>
Tue, 22 Apr 2025 19:29:44 +0000 (09:29 -1000)
a11d6784d731 ("sched_ext: Fix missing rq lock in scx_bpf_cpuperf_set()")
added a call to scx_ops_error() which was renamed to scx_error() in
for-6.16. Fix it up.

1  2 
kernel/sched/ext.c
kernel/sched/ext_idle.c

index bb0873411d798fe98be9a0d8676c1eb30d1078ee,ac79067dc87e65cf1c3f1fc49ac4fa4e1c0addbf..a175b622716ceea9eb006efcc505ef559b28779a
@@@ -3440,13 -3477,13 +3473,13 @@@ static void handle_hotplug(struct rq *r
                scx_idle_update_selcpu_topology(&scx_ops);
  
        if (online && SCX_HAS_OP(cpu_online))
-               SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
+               SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, rq, cpu);
        else if (!online && SCX_HAS_OP(cpu_offline))
-               SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, cpu);
+               SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_offline, rq, cpu);
        else
 -              scx_ops_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
 -                           "cpu %d going %s, exiting scheduler", cpu,
 -                           online ? "online" : "offline");
 +              scx_exit(SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG,
 +                       "cpu %d going %s, exiting scheduler", cpu,
 +                       online ? "online" : "offline");
  }
  
  void scx_rq_activate(struct rq *rq)
@@@ -3661,11 -3699,12 +3694,12 @@@ static int scx_init_task(struct task_st
        return 0;
  }
  
 -static void scx_ops_enable_task(struct task_struct *p)
 +static void scx_enable_task(struct task_struct *p)
  {
+       struct rq *rq = task_rq(p);
        u32 weight;
  
-       lockdep_assert_rq_held(task_rq(p));
+       lockdep_assert_rq_held(rq);
  
        /*
         * Set the weight before calling ops.enable() so that the scheduler
        scx_set_task_state(p, SCX_TASK_ENABLED);
  
        if (SCX_HAS_OP(set_weight))
-               SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight);
+               SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, rq, p, p->scx.weight);
  }
  
 -static void scx_ops_disable_task(struct task_struct *p)
 +static void scx_disable_task(struct task_struct *p)
  {
-       lockdep_assert_rq_held(task_rq(p));
+       struct rq *rq = task_rq(p);
+       lockdep_assert_rq_held(rq);
        WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
  
        if (SCX_HAS_OP(disable))
@@@ -7052,13 -7113,32 +7088,32 @@@ __bpf_kfunc void scx_bpf_cpuperf_set(s3
        }
  
        if (ops_cpu_valid(cpu, NULL)) {
-               struct rq *rq = cpu_rq(cpu);
+               struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
+               struct rq_flags rf;
+               /*
+                * When called with an rq lock held, restrict the operation
+                * to the corresponding CPU to prevent ABBA deadlocks.
+                */
+               if (locked_rq && rq != locked_rq) {
 -                      scx_ops_error("Invalid target CPU %d", cpu);
++                      scx_error("Invalid target CPU %d", cpu);
+                       return;
+               }
+               /*
+                * If no rq lock is held, allow to operate on any CPU by
+                * acquiring the corresponding rq lock.
+                */
+               if (!locked_rq) {
+                       rq_lock_irqsave(rq, &rf);
+                       update_rq_clock(rq);
+               }
  
                rq->scx.cpuperf_target = perf;
+               cpufreq_update_util(rq, 0);
  
-               rcu_read_lock_sched_notrace();
-               cpufreq_update_util(cpu_rq(cpu), 0);
-               rcu_read_unlock_sched_notrace();
+               if (!locked_rq)
+                       rq_unlock_irqrestore(rq, &rf);
        }
  }
  
Simple merge