summaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorPeter Zijlstra2008-08-11 09:30:21 +0200
committerIngo Molnar2008-08-11 09:30:21 +0200
commit64aa348edc617dea17bbd01ddee4e47886d5ec8c (patch)
tree002b5fa796aff225d0cb9d0c32bb3ba96da6eaaf /kernel/lockdep.c
parentlockdep: change scheduler annotation (diff)
downloadkernel-qcow2-linux-64aa348edc617dea17bbd01ddee4e47886d5ec8c.tar.gz
kernel-qcow2-linux-64aa348edc617dea17bbd01ddee4e47886d5ec8c.tar.xz
kernel-qcow2-linux-64aa348edc617dea17bbd01ddee4e47886d5ec8c.zip
lockdep: lock_set_subclass - reset a held lock's subclass
this can be used to reset a held lock's subclass, for arbitrary-depth iterated data structures such as trees or lists which have per-node locks. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c69
1 files changed, 69 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 6999e64fc248..e14d383dcb0b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2660,6 +2660,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
return 1;
}
+static int
+__lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ struct task_struct *curr = current;
+ struct held_lock *hlock, *prev_hlock;
+ struct lock_class *class;
+ unsigned int depth;
+ int i;
+
+ depth = curr->lockdep_depth;
+ if (DEBUG_LOCKS_WARN_ON(!depth))
+ return 0;
+
+ prev_hlock = NULL;
+ for (i = depth-1; i >= 0; i--) {
+ hlock = curr->held_locks + i;
+ /*
+ * We must not cross into another context:
+ */
+ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
+ break;
+ if (hlock->instance == lock)
+ goto found_it;
+ prev_hlock = hlock;
+ }
+ return print_unlock_inbalance_bug(curr, lock, ip);
+
+found_it:
+ class = register_lock_class(lock, subclass, 0);
+ hlock->class = class;
+
+ curr->lockdep_depth = i;
+ curr->curr_chain_key = hlock->prev_chain_key;
+
+ for (; i < depth; i++) {
+ hlock = curr->held_locks + i;
+ if (!__lock_acquire(hlock->instance,
+ hlock->class->subclass, hlock->trylock,
+ hlock->read, hlock->check, hlock->hardirqs_off,
+ hlock->acquire_ip))
+ return 0;
+ }
+
+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+ return 0;
+ return 1;
+}
+
/*
* Remove the lock to the list of currently held locks in a
* potentially non-nested (out of order) manner. This is a
@@ -2824,6 +2873,26 @@ static void check_flags(unsigned long flags)
#endif
}
+void
+lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ unsigned long flags;
+
+ if (unlikely(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
+ check_flags(flags);
+ if (__lock_set_subclass(lock, subclass, ip))
+ check_chain_key(current);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL_GPL(lock_set_subclass);
+
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion: