From 9c368b5b6eccce1cbd7f68142106b3b4ddb1c5b5 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Fri, 12 Sep 2014 09:12:15 -0400 Subject: sched, time: Fix lock inversion in thread_group_cputime() The sig->stats_lock nests inside the tasklist_lock and the sighand->siglock in __exit_signal and wait_task_zombie. However, both of those locks can be taken from irq context, which means we need to use the interrupt safe variant of read_seqbegin_or_lock. This blocks interrupts when the "lock" branch is taken (seq is odd), preventing the lock inversion. On the first (lockless) pass through the loop, irqs are not blocked. Reported-by: Stanislaw Gruszka Signed-off-by: Rik van Riel Signed-off-by: Peter Zijlstra (Intel) Cc: prarit@redhat.com Cc: oleg@redhat.com Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1410527535-9814-3-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar --- kernel/sched/cputime.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 2b57031afc19..64492dff8a81 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -289,13 +289,14 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) cputime_t utime, stime; struct task_struct *t; unsigned int seq, nextseq; + unsigned long flags; rcu_read_lock(); /* Attempt a lockless read on the first round. */ nextseq = 0; do { seq = nextseq; - read_seqbegin_or_lock(&sig->stats_lock, &seq); + flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); times->utime = sig->utime; times->stime = sig->stime; times->sum_exec_runtime = sig->sum_sched_runtime; @@ -309,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) /* If lockless access failed, take the lock. */ nextseq = 1; } while (need_seqretry(&sig->stats_lock, seq)); - done_seqretry(&sig->stats_lock, seq); + done_seqretry_irqrestore(&sig->stats_lock, seq, flags); rcu_read_unlock(); } -- cgit v1.2.3-55-g7522