summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJason Baron2010-10-01 23:23:48 +0200
committerSteven Rostedt2010-10-28 15:17:40 +0200
commit91bad2f8d3057482b9afb599f14421b007136960 (patch)
treeea5e09e74107593dcfc192c65c1395ed22674df4 /kernel
parentjump label: Fix module __init section race (diff)
downloadkernel-qcow2-linux-91bad2f8d3057482b9afb599f14421b007136960.tar.gz
kernel-qcow2-linux-91bad2f8d3057482b9afb599f14421b007136960.tar.xz
kernel-qcow2-linux-91bad2f8d3057482b9afb599f14421b007136960.zip
jump label: Fix deadlock b/w jump_label_mutex vs. text_mutex
register_kprobe() downs the 'text_mutex' and then calls jump_label_text_reserved(), which downs the 'jump_label_mutex'. However, the jump label code takes those mutexes in the reverse order. Fix by requiring the caller of jump_label_text_reserved() to do the jump label locking via the newly added: jump_label_lock(), jump_label_unlock(). Currently, kprobes is the only user of jump_label_text_reserved(). Reported-by: Ingo Molnar <mingo@elte.hu> Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Jason Baron <jbaron@redhat.com> LKML-Reference: <759032c48d5e30c27f0bba003d09bffa8e9f28bb.1285965957.git.jbaron@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/jump_label.c33
-rw-r--r--kernel/kprobes.c6
2 files changed, 27 insertions, 12 deletions
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index be9e105345eb..12cce78e9568 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -39,6 +39,16 @@ struct jump_label_module_entry {
struct module *mod;
};
+void jump_label_lock(void)
+{
+ mutex_lock(&jump_label_mutex);
+}
+
+void jump_label_unlock(void)
+{
+ mutex_unlock(&jump_label_mutex);
+}
+
static int jump_label_cmp(const void *a, const void *b)
{
const struct jump_entry *jea = a;
@@ -152,7 +162,7 @@ void jump_label_update(unsigned long key, enum jump_label_type type)
struct jump_label_module_entry *e_module;
int count;
- mutex_lock(&jump_label_mutex);
+ jump_label_lock();
entry = get_jump_label_entry((jump_label_t)key);
if (entry) {
count = entry->nr_entries;
@@ -175,7 +185,7 @@ void jump_label_update(unsigned long key, enum jump_label_type type)
}
}
}
- mutex_unlock(&jump_label_mutex);
+ jump_label_unlock();
}
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
@@ -232,6 +242,7 @@ out:
* overlaps with any of the jump label patch addresses. Code
* that wants to modify kernel text should first verify that
* it does not overlap with any of the jump label addresses.
+ * Caller must hold jump_label_mutex.
*
* returns 1 if there is an overlap, 0 otherwise
*/
@@ -242,7 +253,6 @@ int jump_label_text_reserved(void *start, void *end)
struct jump_entry *iter_stop = __start___jump_table;
int conflict = 0;
- mutex_lock(&jump_label_mutex);
iter = iter_start;
while (iter < iter_stop) {
if (addr_conflict(iter, start, end)) {
@@ -257,7 +267,6 @@ int jump_label_text_reserved(void *start, void *end)
conflict = module_conflict(start, end);
#endif
out:
- mutex_unlock(&jump_label_mutex);
return conflict;
}
@@ -268,7 +277,7 @@ static __init int init_jump_label(void)
struct jump_entry *iter_stop = __stop___jump_table;
struct jump_entry *iter;
- mutex_lock(&jump_label_mutex);
+ jump_label_lock();
ret = build_jump_label_hashtable(__start___jump_table,
__stop___jump_table);
iter = iter_start;
@@ -276,7 +285,7 @@ static __init int init_jump_label(void)
arch_jump_label_text_poke_early(iter->code);
iter++;
}
- mutex_unlock(&jump_label_mutex);
+ jump_label_unlock();
return ret;
}
early_initcall(init_jump_label);
@@ -409,21 +418,21 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
switch (val) {
case MODULE_STATE_COMING:
- mutex_lock(&jump_label_mutex);
+ jump_label_lock();
ret = add_jump_label_module(mod);
if (ret)
remove_jump_label_module(mod);
- mutex_unlock(&jump_label_mutex);
+ jump_label_unlock();
break;
case MODULE_STATE_GOING:
- mutex_lock(&jump_label_mutex);
+ jump_label_lock();
remove_jump_label_module(mod);
- mutex_unlock(&jump_label_mutex);
+ jump_label_unlock();
break;
case MODULE_STATE_LIVE:
- mutex_lock(&jump_label_mutex);
+ jump_label_lock();
remove_jump_label_module_init(mod);
- mutex_unlock(&jump_label_mutex);
+ jump_label_unlock();
break;
}
return ret;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 99865c33a60d..9437e14f36bd 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1146,13 +1146,16 @@ int __kprobes register_kprobe(struct kprobe *p)
return ret;
preempt_disable();
+ jump_label_lock();
if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr) ||
ftrace_text_reserved(p->addr, p->addr) ||
jump_label_text_reserved(p->addr, p->addr)) {
preempt_enable();
+ jump_label_unlock();
return -EINVAL;
}
+ jump_label_unlock();
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED;
@@ -1187,6 +1190,8 @@ int __kprobes register_kprobe(struct kprobe *p)
INIT_LIST_HEAD(&p->list);
mutex_lock(&kprobe_mutex);
+ jump_label_lock(); /* needed to call jump_label_text_reserved() */
+
get_online_cpus(); /* For avoiding text_mutex deadlock. */
mutex_lock(&text_mutex);
@@ -1214,6 +1219,7 @@ int __kprobes register_kprobe(struct kprobe *p)
out:
mutex_unlock(&text_mutex);
put_online_cpus();
+ jump_label_unlock();
mutex_unlock(&kprobe_mutex);
if (probed_mod)