summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorMatt Fleming2016-09-21 15:38:13 +0200
committerIngo Molnar2017-01-14 11:29:35 +0100
commitcb42c9a3ebbbb23448c3f9a25417fae6309b1a92 (patch)
treeaa786c889a2e6c42889096ed54daba878285e866 /kernel/sched/core.c
parentsched/core: Add missing update_rq_clock() call in set_user_nice() (diff)
downloadkernel-qcow2-linux-cb42c9a3ebbbb23448c3f9a25417fae6309b1a92.tar.gz
kernel-qcow2-linux-cb42c9a3ebbbb23448c3f9a25417fae6309b1a92.tar.xz
kernel-qcow2-linux-cb42c9a3ebbbb23448c3f9a25417fae6309b1a92.zip
sched/core: Add debugging code to catch missing update_rq_clock() calls
There's no diagnostic checks for figuring out when we've accidentally missed update_rq_clock() calls. Let's add some by piggybacking on the rq_*pin_lock() wrappers. The idea behind the diagnostic checks is that upon pining rq lock the rq clock should be updated, via update_rq_clock(), before anybody reads the clock with rq_clock() or rq_clock_task(). The exception to this rule is when updates have explicitly been disabled with the rq_clock_skip_update() optimisation. There are some functions that only unpin the rq lock in order to grab some other lock and avoid deadlock. In that case we don't need to update the clock again and the previous diagnostic state can be carried over in rq_repin_lock() by saving the state in the rq_flags context. Since this patch adds a new clock update flag and some already exist in rq::clock_skip_update, that field has now been renamed. An attempt has been made to keep the flag manipulation code small and fast since it's used in the heart of the __schedule() fast path. For the !CONFIG_SCHED_DEBUG case the only object code change (other than addresses) is the following change to reset RQCF_ACT_SKIP inside of __schedule(), - c7 83 38 09 00 00 00 movl $0x0,0x938(%rbx) - 00 00 00 + 83 a3 38 09 00 00 fc andl $0xfffffffc,0x938(%rbx) Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jan Kara <jack@suse.cz> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luca Abeni <luca.abeni@unitn.it> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Mike Galbraith <efault@gmx.de> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Rik van Riel <riel@redhat.com> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Cc: Yuyang Du <yuyang.du@intel.com> Link: http://lkml.kernel.org/r/20160921133813.31976-8-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d2338927773a..a129b34b8206 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -102,9 +102,12 @@ void update_rq_clock(struct rq *rq)
lockdep_assert_held(&rq->lock);
- if (rq->clock_skip_update & RQCF_ACT_SKIP)
+ if (rq->clock_update_flags & RQCF_ACT_SKIP)
return;
+#ifdef CONFIG_SCHED_DEBUG
+ rq->clock_update_flags |= RQCF_UPDATED;
+#endif
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
if (delta < 0)
return;
@@ -2889,7 +2892,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
rq->prev_mm = oldmm;
}
- rq->clock_skip_update = 0;
+ rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
/*
* Since the runqueue lock will be released by the next
@@ -3364,7 +3367,7 @@ static void __sched notrace __schedule(bool preempt)
raw_spin_lock(&rq->lock);
rq_pin_lock(rq, &rf);
- rq->clock_skip_update <<= 1; /* promote REQ to ACT */
+ rq->clock_update_flags <<= 1; /* promote REQ to ACT */
switch_count = &prev->nivcsw;
if (!preempt && prev->state) {
@@ -3405,7 +3408,7 @@ static void __sched notrace __schedule(bool preempt)
trace_sched_switch(preempt, prev, next);
rq = context_switch(rq, prev, next, &rf); /* unlocks the rq */
} else {
- rq->clock_skip_update = 0;
+ rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
rq_unpin_lock(rq, &rf);
raw_spin_unlock_irq(&rq->lock);
}