summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra2009-06-03 15:41:20 +0200
committerIngo Molnar2009-08-02 14:26:07 +0200
commite709715915d69b6a929d77e7652c9c3fea61c317 (patch)
tree69ed9845b4b4c412cc17d59469e46ebb41de0c31 /kernel/sched.c
parentsched: Fix cgroup smp fairness (diff)
downloadkernel-qcow2-linux-e709715915d69b6a929d77e7652c9c3fea61c317.tar.gz
kernel-qcow2-linux-e709715915d69b6a929d77e7652c9c3fea61c317.tar.xz
kernel-qcow2-linux-e709715915d69b6a929d77e7652c9c3fea61c317.zip
sched: Optimize unused cgroup configuration
When cgroup group scheduling is built in, skip some code paths if we don't have any (but the root) cgroups configured. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 26976cd8be0f..ca1f76ba7773 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1629,8 +1629,14 @@ static int tg_load_down(struct task_group *tg, void *data)
static void update_shares(struct sched_domain *sd)
{
- u64 now = cpu_clock(raw_smp_processor_id());
- s64 elapsed = now - sd->last_update;
+ s64 elapsed;
+ u64 now;
+
+ if (root_task_group_empty())
+ return;
+
+ now = cpu_clock(raw_smp_processor_id());
+ elapsed = now - sd->last_update;
if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
sd->last_update = now;
@@ -1640,6 +1646,9 @@ static void update_shares(struct sched_domain *sd)
static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
{
+ if (root_task_group_empty())
+ return;
+
spin_unlock(&rq->lock);
update_shares(sd);
spin_lock(&rq->lock);
@@ -1647,6 +1656,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
static void update_h_load(long cpu)
{
+ if (root_task_group_empty())
+ return;
+
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}