summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_syscalls.c
diff options
context:
space:
mode:
authorFrederic Weisbecker2009-03-15 22:10:37 +0100
committerIngo Molnar2009-03-16 09:13:16 +0100
commit5be71b61f17b0e3bc8ad0b1a1b7b53ab7d574ebb (patch)
tree7ab4c915871b464a08de974570900381b169e034 /kernel/trace/trace_syscalls.c
parenttracing/syscalls: various cleanups (diff)
downloadkernel-qcow2-linux-5be71b61f17b0e3bc8ad0b1a1b7b53ab7d574ebb.tar.gz
kernel-qcow2-linux-5be71b61f17b0e3bc8ad0b1a1b7b53ab7d574ebb.tar.xz
kernel-qcow2-linux-5be71b61f17b0e3bc8ad0b1a1b7b53ab7d574ebb.zip
tracing/syscalls: protect thread flag toggling from races
Impact: fix syscall tracer enable/disable race The current thread flag toggling is racy as shown in the following scenario: - task A is the last user of syscall tracing, it releases the TIF_SYSCALL_FTRACE on each tasks - at the same time task B start syscall tracing. refcount == 0 so it sets up TIF_SYSCALL_FTRACE on each tasks. The effect of the mixup is unpredictable. So this fix adds a mutex on {start,stop}_syscall_tracing(). Reported-by: Andrew Morton <akpm@linux-foundation.org> Reported-by: Ingo Molnar <mingo@elte.hu> LKML-Reference: <1237151439-6755-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r--kernel/trace/trace_syscalls.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 26f9a8679d3d..a2a3af29c943 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -5,7 +5,11 @@
#include "trace_output.h"
#include "trace.h"
-static atomic_t refcount;
+/* Keep a counter of the syscall tracing users */
+static int refcount;
+
+/* Prevent from races on thread flags toggling */
+static DEFINE_MUTEX(syscall_trace_lock);
/* Option to display the parameters types */
enum {
@@ -96,9 +100,11 @@ void start_ftrace_syscalls(void)
unsigned long flags;
struct task_struct *g, *t;
+ mutex_lock(&syscall_trace_lock);
+
/* Don't enable the flag on the tasks twice */
- if (atomic_inc_return(&refcount) != 1)
- return;
+ if (++refcount != 1)
+ goto unlock;
arch_init_ftrace_syscalls();
read_lock_irqsave(&tasklist_lock, flags);
@@ -108,6 +114,9 @@ void start_ftrace_syscalls(void)
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
+
+unlock:
+ mutex_unlock(&syscall_trace_lock);
}
void stop_ftrace_syscalls(void)
@@ -115,9 +124,11 @@ void stop_ftrace_syscalls(void)
unsigned long flags;
struct task_struct *g, *t;
+ mutex_lock(&syscall_trace_lock);
+
/* There are perhaps still some users */
- if (atomic_dec_return(&refcount))
- return;
+ if (--refcount)
+ goto unlock;
read_lock_irqsave(&tasklist_lock, flags);
@@ -126,6 +137,9 @@ void stop_ftrace_syscalls(void)
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
+
+unlock:
+ mutex_unlock(&syscall_trace_lock);
}
void ftrace_syscall_enter(struct pt_regs *regs)