summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorSteve Muckle2016-03-22 01:21:08 +0100
committerIngo Molnar2016-04-23 14:20:36 +0200
commit41e0d37f7ac81297c07ba311e4ad39465b8c8295 (patch)
tree85885c97c1ff6450aee7132b6964bc12cfae53f5 /kernel/sched/fair.c
parentsched/fair: Move cpufreq hook to update_cfs_rq_load_avg() (diff)
downloadkernel-qcow2-linux-41e0d37f7ac81297c07ba311e4ad39465b8c8295.tar.gz
kernel-qcow2-linux-41e0d37f7ac81297c07ba311e4ad39465b8c8295.tar.xz
kernel-qcow2-linux-41e0d37f7ac81297c07ba311e4ad39465b8c8295.zip
sched/fair: Do not call cpufreq hook unless util changed
There's no reason to call the cpufreq hook if the root cfs_rq utilization has not been modified. Signed-off-by: Steve Muckle <smuckle@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <Juri.Lelli@arm.com> Cc: Michael Turquette <mturquette@baylibre.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Patrick Bellasi <patrick.bellasi@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rafael@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: http://lkml.kernel.org/r/1458606068-7476-2-git-send-email-smuckle@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6df80d47a525..81552819444c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2879,20 +2879,21 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
struct sched_avg *sa = &cfs_rq->avg;
struct rq *rq = rq_of(cfs_rq);
- int decayed, removed = 0;
+ int decayed, removed_load = 0, removed_util = 0;
int cpu = cpu_of(rq);
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
sa->load_avg = max_t(long, sa->load_avg - r, 0);
sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
- removed = 1;
+ removed_load = 1;
}
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
sa->util_avg = max_t(long, sa->util_avg - r, 0);
sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
+ removed_util = 1;
}
decayed = __update_load_avg(now, cpu, sa,
@@ -2903,7 +2904,8 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
- if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
+ if (cpu == smp_processor_id() && &rq->cfs == cfs_rq &&
+ (decayed || removed_util)) {
unsigned long max = rq->cpu_capacity_orig;
/*
@@ -2926,7 +2928,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
min(sa->util_avg, max), max);
}
- return decayed || removed;
+ return decayed || removed_load;
}
/* Update task and its cfs_rq load average */