diff -Naur linux-4.2/include/linux/sched/sysctl.h linux-4.2_/include/linux/sched/sysctl.h
--- linux-4.2/include/linux/sched/sysctl.h 2015-08-30 11:34:09.000000000 -0700
+++ linux-4.2_/include/linux/sched/sysctl.h 2016-02-22 12:33:07.000000000 -0800
@@ -39,6 +39,7 @@
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_weighted_cpuload_uses_rla;
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
diff -Naur linux-4.2/kernel/sched/fair.c linux-4.2_/kernel/sched/fair.c
--- linux-4.2/kernel/sched/fair.c 2015-08-30 11:34:09.000000000 -0700
+++ linux-4.2_/kernel/sched/fair.c 2016-02-22 16:03:25.443801709 -0800
@@ -35,6 +35,10 @@
#include "sched.h"
+#ifdef CONFIG_SMP
+unsigned int sysctl_sched_weighted_cpuload_uses_rla = 1;
+#endif
+
/*
* Targeted preemption latency for CPU-bound tasks:
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -4517,7 +4521,9 @@
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(const int cpu)
{
- return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
+ if (sysctl_sched_weighted_cpuload_uses_rla)
+ return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
+ return cpu_rq(cpu)->load.weight;
}
#ifdef CONFIG_NO_HZ_COMMON
diff -Naur linux-4.2/kernel/sysctl.c linux-4.2_/kernel/sysctl.c
--- linux-4.2/kernel/sysctl.c 2015-08-30 11:34:09.000000000 -0700
+++ linux-4.2_/kernel/sysctl.c 2016-02-22 12:33:17.000000000 -0800
@@ -349,6 +349,13 @@
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "sched_weighted_cpuload_uses_rla",
+ .data = &sysctl_sched_weighted_cpuload_uses_rla,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#endif /* CONFIG_SMP */
#ifdef CONFIG_NUMA_BALANCING
{