summaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra2012-02-20 21:49:09 +0100
committerIngo Molnar2012-03-01 13:08:37 +0100
commit367456c756a6b84f493ca9cc5b17b1f5d38ef466 (patch)
tree0e95a2fa5cb25ea14e2841d84d4d2410ff383e33 /kernel/sched/sched.h
parentsched: Rename load-balancing fields (diff)
downloadkernel-qcow2-linux-367456c756a6b84f493ca9cc5b17b1f5d38ef466.tar.gz
kernel-qcow2-linux-367456c756a6b84f493ca9cc5b17b1f5d38ef466.tar.xz
kernel-qcow2-linux-367456c756a6b84f493ca9cc5b17b1f5d38ef466.zip
sched: Ditch per cgroup task lists for load-balancing
Per cgroup load-balance has numerous problems, chief amongst them that there is no real sane order in them. So stop pretending it makes sense and enqueue all tasks on a single list. This also allows us to more easily fix the fwd progress issue uncovered by the lock-break stuff. Rotate the list on failure to migreate and limit the total iterations to nr_running (which with releasing the lock isn't strictly accurate but close enough). Also add a filter that skips very light tasks on the first attempt around the list, this attempts to avoid shooting whole cgroups around without affecting over balance. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: pjt@google.com Link: http://lkml.kernel.org/n/tip-tx8yqydc7eimgq7i4rkc3a4g@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h10
1 files changed, 2 insertions, 8 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c0660a1a0cd1..753bdd567416 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -212,9 +212,6 @@ struct cfs_rq {
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
- struct list_head tasks;
- struct list_head *balance_iterator;
-
/*
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
@@ -242,11 +239,6 @@ struct cfs_rq {
#ifdef CONFIG_SMP
/*
- * the part of load.weight contributed by tasks
- */
- unsigned long task_weight;
-
- /*
* h_load = weight * f(tg)
*
* Where f(tg) is the recursive weight fraction assigned to
@@ -420,6 +412,8 @@ struct rq {
int cpu;
int online;
+ struct list_head cfs_tasks;
+
u64 rt_avg;
u64 age_stamp;
u64 idle_stamp;