summaryrefslogtreecommitdiffstats
path: root/kernel/audit_tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/audit_tree.c')
-rw-r--r--kernel/audit_tree.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index c3b5fcb8eca4..2fa8d61b6fd2 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -172,27 +172,18 @@ static unsigned long inode_to_key(const struct inode *inode)
/*
* Function to return search key in our hash from chunk. Key 0 is special and
* should never be present in the hash.
- *
- * Must be called with chunk->mark.lock held to protect from connector
- * becoming NULL.
*/
-static unsigned long __chunk_to_key(struct audit_chunk *chunk)
+static unsigned long chunk_to_key(struct audit_chunk *chunk)
{
- if (!chunk->mark.connector)
+ /*
+ * We have a reference to the mark so it should be attached to a
+ * connector.
+ */
+ if (WARN_ON_ONCE(!chunk->mark.connector))
return 0;
return (unsigned long)chunk->mark.connector->inode;
}
-static unsigned long chunk_to_key(struct audit_chunk *chunk)
-{
- unsigned long key;
-
- spin_lock(&chunk->mark.lock);
- key = __chunk_to_key(chunk);
- spin_unlock(&chunk->mark.lock);
- return key;
-}
-
static inline struct list_head *chunk_hash(unsigned long key)
{
unsigned long n = key / L1_CACHE_BYTES;
@@ -202,7 +193,7 @@ static inline struct list_head *chunk_hash(unsigned long key)
/* hash_lock & entry->lock is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
- unsigned long key = __chunk_to_key(chunk);
+ unsigned long key = chunk_to_key(chunk);
struct list_head *list;
if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
@@ -263,6 +254,10 @@ static void untag_chunk(struct node *p)
mutex_lock(&entry->group->mark_mutex);
spin_lock(&entry->lock);
+ /*
+ * mark_mutex protects mark from getting detached and thus also from
+ * mark->connector->inode getting NULL.
+ */
if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
spin_unlock(&entry->lock);
mutex_unlock(&entry->group->mark_mutex);
@@ -423,6 +418,10 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
mutex_lock(&old_entry->group->mark_mutex);
spin_lock(&old_entry->lock);
+ /*
+ * mark_mutex protects mark from getting detached and thus also from
+ * mark->connector->inode getting NULL.
+ */
if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
/* old_entry is being shot, lets just lie */
spin_unlock(&old_entry->lock);