]> www.infradead.org Git - users/willy/xarray.git/commitdiff
sched/core: Rename schedutil_cpu_util() and allow rest of the kernel to use it
authorViresh Kumar <viresh.kumar@linaro.org>
Tue, 8 Dec 2020 04:16:56 +0000 (09:46 +0530)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 14 Jan 2021 10:20:09 +0000 (11:20 +0100)
There is nothing schedutil specific in schedutil_cpu_util(), rename it
to effective_cpu_util(). Also create and expose another wrapper
sched_cpu_util() which can be used by other parts of the kernel, like
thermal core (that will be done in a later commit).

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lkml.kernel.org/r/db011961fb3bb8bef1c0eda5cd64564637d3ef31.1607400596.git.viresh.kumar@linaro.org
include/linux/sched.h
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/sched.h

index 6e3a5eeec509a193b5f2f3d07348a68f188a6f18..31169e70476afd995f4f67483b8929742d98fa58 100644 (file)
@@ -1968,6 +1968,11 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 #define TASK_SIZE_OF(tsk)      TASK_SIZE
 #endif
 
+#ifdef CONFIG_SMP
+/* Returns effective CPU energy utilization, as seen by the scheduler */
+unsigned long sched_cpu_util(int cpu, unsigned long max);
+#endif /* CONFIG_SMP */
+
 #ifdef CONFIG_RSEQ
 
 /*
index d89d682d533704d535319083bbe813c8e60bd401..4fe4cbf0bf08a0c0a0585a1ba9565d18163c4f87 100644 (file)
@@ -5683,8 +5683,8 @@ struct task_struct *idle_task(int cpu)
  * based on the task model parameters and gives the minimal utilization
  * required to meet deadlines.
  */
-unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
-                                unsigned long max, enum schedutil_type type,
+unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
+                                unsigned long max, enum cpu_util_type type,
                                 struct task_struct *p)
 {
        unsigned long dl_util, util, irq;
@@ -5768,6 +5768,12 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
 
        return min(max, util);
 }
+
+unsigned long sched_cpu_util(int cpu, unsigned long max)
+{
+       return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max,
+                                 ENERGY_UTIL, NULL);
+}
 #endif /* CONFIG_SMP */
 
 /**
index 1dfa69246485861ec5062439af008eded07ad455..41e498b0008a6c415d7e8ebd3d5babf65f3aa0d6 100644 (file)
@@ -178,7 +178,7 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
 
        sg_cpu->max = max;
        sg_cpu->bw_dl = cpu_bw_dl(rq);
-       sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
+       sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
                                          FREQUENCY_UTIL, NULL);
 }
 
index 04a3ce20da671e456905fe56a7496b255c2c4d3b..39c5bda90bd4d6ad916b387fb83fe125ee47771d 100644 (file)
@@ -6543,7 +6543,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
                 * is already enough to scale the EM reported power
                 * consumption at the (eventually clamped) cpu_capacity.
                 */
-               sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap,
+               sum_util += effective_cpu_util(cpu, util_cfs, cpu_cap,
                                               ENERGY_UTIL, NULL);
 
                /*
@@ -6553,7 +6553,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
                 * NOTE: in case RT tasks are running, by default the
                 * FREQUENCY_UTIL's utilization can be max OPP.
                 */
-               cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap,
+               cpu_util = effective_cpu_util(cpu, util_cfs, cpu_cap,
                                              FREQUENCY_UTIL, tsk);
                max_util = max(max_util, cpu_util);
        }
@@ -6651,7 +6651,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
                         * IOW, placing the task there would make the CPU
                         * overutilized. Take uclamp into account to see how
                         * much capacity we can get out of the CPU; this is
-                        * aligned with schedutil_cpu_util().
+                        * aligned with sched_cpu_util().
                         */
                        util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
                        if (!fits_capacity(util, cpu_cap))
index 242d4c5a5efc4120f506a132601857bf1b42170a..045b01064c1ebf540c10c082ea1256808d70a003 100644 (file)
@@ -2559,22 +2559,22 @@ static inline unsigned long capacity_orig_of(int cpu)
 }
 
 /**
- * enum schedutil_type - CPU utilization type
+ * enum cpu_util_type - CPU utilization type
  * @FREQUENCY_UTIL:    Utilization used to select frequency
  * @ENERGY_UTIL:       Utilization used during energy calculation
  *
  * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
  * need to be aggregated differently depending on the usage made of them. This
- * enum is used within schedutil_freq_util() to differentiate the types of
+ * enum is used within effective_cpu_util() to differentiate the types of
  * utilization expected by the callers, and adjust the aggregation accordingly.
  */
-enum schedutil_type {
+enum cpu_util_type {
        FREQUENCY_UTIL,
        ENERGY_UTIL,
 };
 
-unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
-                                unsigned long max, enum schedutil_type type,
+unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
+                                unsigned long max, enum cpu_util_type type,
                                 struct task_struct *p);
 
 static inline unsigned long cpu_bw_dl(struct rq *rq)