summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/mm/pageattr.c4
-rw-r--r--drivers/char/sysrq.c19
-rw-r--r--include/linux/mm.h4
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/sched.c1
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab.c1
7 files changed, 37 insertions, 0 deletions
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index c30a16df6440..e8a53552b13d 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -222,6 +222,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
+ if (!enable)
+ mutex_debug_check_no_locks_freed(page_address(page),
+ page_address(page+numpages));
+
/* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.
*/
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 145275ebdd7e..5765f672e853 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -153,6 +153,21 @@ static struct sysrq_key_op sysrq_mountro_op = {
/* END SYNC SYSRQ HANDLERS BLOCK */
+#ifdef CONFIG_DEBUG_MUTEXES
+
+static void
+sysrq_handle_showlocks(int key, struct pt_regs *pt_regs, struct tty_struct *tty)
+{
+ mutex_debug_show_all_locks();
+}
+
+static struct sysrq_key_op sysrq_showlocks_op = {
+ .handler = sysrq_handle_showlocks,
+ .help_msg = "show-all-locks(D)",
+ .action_msg = "Show Locks Held",
+};
+
+#endif
/* SHOW SYSRQ HANDLERS BLOCK */
@@ -294,7 +309,11 @@ static struct sysrq_key_op *sysrq_key_table[SYSRQ_KEY_TABLE_LENGTH] = {
#else
/* c */ NULL,
#endif
+#ifdef CONFIG_DEBUG_MUTEXES
+/* d */ &sysrq_showlocks_op,
+#else
/* d */ NULL,
+#endif
/* e */ &sysrq_term_op,
/* f */ &sysrq_moom_op,
/* g */ NULL,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index df80e63903b5..3f1fafc0245e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -13,6 +13,7 @@
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
#include <linux/fs.h>
+#include <linux/mutex.h>
struct mempolicy;
struct anon_vma;
@@ -1024,6 +1025,9 @@ static inline void vm_stat_account(struct mm_struct *mm,
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
+ if (!PageHighMem(page) && !enable)
+ mutex_debug_check_no_locks_freed(page_address(page),
+ page_address(page + numpages));
}
#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index caceabf3f230..309a46fa16f8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -29,6 +29,7 @@
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/cn_proc.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -869,6 +870,10 @@ fastcall NORET_TYPE void do_exit(long code)
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
+ /*
+ * If DEBUG_MUTEXES is on, make sure we are holding no locks:
+ */
+ mutex_debug_check_no_locks_held(tsk);
/* PF_DEAD causes final put_task_struct after we schedule. */
preempt_disable();
diff --git a/kernel/sched.c b/kernel/sched.c
index 92733091154c..34a945bcc022 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4386,6 +4386,7 @@ void show_state(void)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ mutex_debug_show_all_locks();
}
/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e0e84924171b..a5e6891f7bb6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -415,6 +415,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
int reserved = 0;
arch_free_page(page, order);
+ if (!PageHighMem(page))
+ mutex_debug_check_no_locks_freed(page_address(page),
+ page_address(page+(1<<order)));
#ifndef CONFIG_MMU
for (i = 1 ; i < (1 << order) ; ++i)
diff --git a/mm/slab.c b/mm/slab.c
index 1c46c6383552..33aab345cd4a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3071,6 +3071,7 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = page_get_cache(virt_to_page(objp));
+ mutex_debug_check_no_locks_freed(objp, objp+obj_reallen(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}