]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
sched: Move the loadavg code to a more obvious location
authorAtish Patra <atish.patra@oracle.com>
Fri, 23 Jun 2017 19:32:57 +0000 (13:32 -0600)
committerAllen Pais <allen.pais@oracle.com>
Tue, 18 Jul 2017 12:25:00 +0000 (17:55 +0530)
A previous commit f33dfff75d968 ("sched/fair: Rewrite runnable load
and utilization average tracking") created a regression in global
load average in uptime. Active Load average computation function
should be invoked periodically to update the delta for each runqueue.

Use the following upstream commit 3289bdb42 to fix this in stead of
quick-fix.

Before the fix

 procs_
    when running load average
======== ======= =================
13:32:46       1  0.65, 0.22, 0.08
13:33:47     129  0.78, 0.33, 0.12
13:34:47     129  0.74, 0.41, 0.16
13:35:47     129  0.60, 0.42, 0.18
13:36:47     129  0.77, 0.49, 0.22
13:37:47     129  0.78, 0.55, 0.26

After the fix:

  procs_
    when running load average
======== ======= =================
19:46:35       1  0.58, 0.38, 0.16
19:47:35     129  74.02, 21.09, 7.27
19:48:35     129  103.16, 39.08, 14.31
19:49:35     129  114.25, 53.95, 20.98
19:52:36     257  172.40, 97.26, 42.96
19:53:37     257  221.54, 124.95, 55.87
19:54:37     257  237.13, 147.05, 67.80

Original upstream commit message:
I could not find the loadavg code.. turns out it was hidden in a file
called proc.c. It further got mingled up with the cruft per rq load
indexes (which we really want to get rid of).

Move the per rq load indexes into the fair.c load-balance code (that's
the only thing that uses them) and rename proc.c to loadavg.c so we
can find it again.

Orabug: 26266279

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
[ Did minor cleanups to the code. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit 3289bdb429884c0279bf9ab72dff7b934f19dfc6)

Conflicts:

kernel/sched/fair.c
kernel/sched/loadavg.c
kernel/sched/sched.h

Signed-off-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: Atish Patra <atish.patra@oracle.com>
Signed-off-by: Allen Pais <allen.pais@oracle.com>
Reviewed-by: Dhaval Giani <dhaval.giani@oracle.com>
include/linux/sched.h
kernel/sched/Makefile
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/loadavg.c [moved from kernel/sched/proc.c with 94% similarity]
kernel/sched/sched.h

index 16a61ab87c8444a9dc1fec0d744c20ef0c4cdd33..94f9bfdd72712a94f7689c7a51fbc3b96793d80b 100644 (file)
@@ -174,7 +174,12 @@ extern unsigned long nr_iowait_cpu(int cpu);
 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
 
 extern void calc_global_load(unsigned long ticks);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 extern void update_cpu_load_nohz(void);
+#else
+static inline void update_cpu_load_nohz(void) { }
+#endif
 
 extern unsigned long get_parent_ip(unsigned long addr);
 
index 46be8702487561cd88a7895fea8c6401d72e9ce6..67687973ce80d63d3f52698fb4b738b76964b896 100644 (file)
@@ -11,7 +11,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
 CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
 endif
 
-obj-y += core.o proc.o clock.o cputime.o
+obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
 obj-y += wait.o completion.o idle.o
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
index 92ca59411c6ff1c3baa2ea3181ac09a4ec082031..a564e3c90bcf2ba45b723ef16f26df870b21e2c5 100644 (file)
@@ -2423,9 +2423,9 @@ unsigned long nr_iowait_cpu(int cpu)
 
 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
 {
-       struct rq *this = this_rq();
-       *nr_waiters = atomic_read(&this->nr_iowait);
-       *load = this->cpu_load[0];
+       struct rq *rq = this_rq();
+       *nr_waiters = atomic_read(&rq->nr_iowait);
+       *load = rq->load.weight;
 }
 
 #ifdef CONFIG_SMP
@@ -2523,6 +2523,7 @@ void scheduler_tick(void)
        update_rq_clock(rq);
        curr->sched_class->task_tick(rq, curr, 0);
        update_cpu_load_active(rq);
+       calc_global_load_tick(rq);
        raw_spin_unlock(&rq->lock);
 
        perf_event_task_tick();
index e94d6a6c1c88b3b7ba816122dedf76498d463b20..3a7f8a4e66de160333a4e7bc2ddac41fb76f5a1c 100644 (file)
@@ -4311,7 +4311,7 @@ static unsigned long weighted_cpuload(const int cpu)
  */
 static void update_idle_cpu_load(struct rq *this_rq)
 {
-       unsigned long curr_jiffies = READ_ONCE(jiffies);
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
        unsigned long load = weighted_cpuload(cpu_of(this_rq));
        unsigned long pending_updates;
 
@@ -4333,7 +4333,7 @@ static void update_idle_cpu_load(struct rq *this_rq)
 void update_cpu_load_nohz(void)
 {
        struct rq *this_rq = this_rq();
-       unsigned long curr_jiffies = READ_ONCE(jiffies);
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
        unsigned long pending_updates;
 
        if (curr_jiffies == this_rq->last_load_update_tick)
similarity index 94%
rename from kernel/sched/proc.c
rename to kernel/sched/loadavg.c
index a5d8787f00569da249f1eb5b421330f17eae8411..2b07e90e229ec567bfeee476ca460cebd8239887 100644 (file)
@@ -1,7 +1,9 @@
 /*
- *  kernel/sched/proc.c
+ * kernel/sched/loadavg.c
  *
- *  Kernel load calculations, forked from sched/core.c
+ * This file contains the magic bits required to compute the global loadavg
+ * figure. Its a silly number but people think its important. We go through
+ * great pains to make it work on big machines and tickless kernels.
  */
 
 #include <linux/export.h>
@@ -81,7 +83,7 @@ long calc_load_fold_active(struct rq *this_rq)
        long nr_active, delta = 0;
 
        nr_active = this_rq->nr_running;
-       nr_active += (long) this_rq->nr_uninterruptible;
+       nr_active += (long)this_rq->nr_uninterruptible;
 
        if (nr_active != this_rq->calc_load_active) {
                delta = nr_active - this_rq->calc_load_active;
@@ -186,6 +188,7 @@ void calc_load_enter_idle(void)
        delta = calc_load_fold_active(this_rq);
        if (delta) {
                int idx = calc_load_write_idx();
+
                atomic_long_add(delta, &calc_load_idle[idx]);
        }
 }
@@ -241,18 +244,20 @@ fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
 {
        unsigned long result = 1UL << frac_bits;
 
-       if (n) for (;;) {
-               if (n & 1) {
-                       result *= x;
-                       result += 1UL << (frac_bits - 1);
-                       result >>= frac_bits;
+       if (n) {
+               for (;;) {
+                       if (n & 1) {
+                               result *= x;
+                               result += 1UL << (frac_bits - 1);
+                               result >>= frac_bits;
+                       }
+                       n >>= 1;
+                       if (!n)
+                               break;
+                       x *= x;
+                       x += 1UL << (frac_bits - 1);
+                       x >>= frac_bits;
                }
-               n >>= 1;
-               if (!n)
-                       break;
-               x *= x;
-               x += 1UL << (frac_bits - 1);
-               x >>= frac_bits;
        }
 
        return result;
@@ -285,7 +290,6 @@ static unsigned long
 calc_load_n(unsigned long load, unsigned long exp,
            unsigned long active, unsigned int n)
 {
-
        return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
 }
 
@@ -339,6 +343,8 @@ static inline void calc_global_nohz(void) { }
 /*
  * calc_load - update the avenrun load estimates 10 ticks after the
  * CPUs have updated calc_load_tasks.
+ *
+ * Called from the global timer code.
  */
 void calc_global_load(unsigned long ticks)
 {
@@ -370,10 +376,10 @@ void calc_global_load(unsigned long ticks)
 }
 
 /*
- * Called from update_cpu_load() to periodically update this CPU's
+ * Called from scheduler_tick() to periodically update this CPU's
  * active count.
  */
-static void calc_load_account_active(struct rq *this_rq)
+void calc_global_load_tick(struct rq *this_rq)
 {
        long delta;
 
index b7962f5f58009037a9e09c7dd39ded47fcb6be97..b5116cc2665526ff49f1ecad6b7f105e89937eb7 100644 (file)
@@ -27,8 +27,14 @@ extern __read_mostly int scheduler_running;
 extern unsigned long calc_load_update;
 extern atomic_long_t calc_load_tasks;
 
+extern void calc_global_load_tick(struct rq *this_rq);
 extern long calc_load_fold_active(struct rq *this_rq);
+
+#ifdef CONFIG_SMP
 extern void update_cpu_load_active(struct rq *this_rq);
+#else
+static inline void update_cpu_load_active(struct rq *this_rq) { }
+#endif
 
 /*
  * Helpers for converting nanosecond timing to jiffy resolution