diff mbox

[5/6] audit: Make hash table insertion safe against concurrent lookups

Message ID 20180628164014.4925-6-jack@suse.cz (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Kara June 28, 2018, 4:40 p.m. UTC
Currently, the audit tree code does not make sure that when a chunk in
inserted into the hash table, it is fully initialized. So in theory (in
practice only on DEC Alpha) a user of RCU lookup could see uninitialized
structure in the hash table and crash. Add appropriate barriers between
initialization of the structure and its insertion into hash table.

Signed-off-by: Jan Kara <jack@suse.cz>
---
 kernel/audit_tree.c | 28 +++++++++++++++++++++++++---
 1 file changed, 25 insertions(+), 3 deletions(-)

Comments

Amir Goldstein June 29, 2018, 1:02 p.m. UTC | #1
On Thu, Jun 28, 2018 at 7:40 PM, Jan Kara <jack@suse.cz> wrote:
> Currently, the audit tree code does not make sure that when a chunk in
> inserted into the hash table, it is fully initialized. So in theory (in
> practice only on DEC Alpha) a user of RCU lookup could see uninitialized
> structure in the hash table and crash. Add appropriate barriers between
> initialization of the structure and its insertion into hash table.
>
> Signed-off-by: Jan Kara <jack@suse.cz>
> ---
>  kernel/audit_tree.c | 28 +++++++++++++++++++++++++---
>  1 file changed, 25 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
> index 89ad8857a578..46cc6d046c75 100644
> --- a/kernel/audit_tree.c
> +++ b/kernel/audit_tree.c
> @@ -198,7 +198,11 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
>         struct audit_chunk *p;
>
>         list_for_each_entry_rcu(p, list, hash) {
> -               if (p->key == key) {
> +               /*
> +                * We use a data dependency barrier in READ_ONCE() to make sure
> +                * the chunk we see is fully initialized.
> +                */
> +               if (READ_ONCE(p->key) == key) {
>                         atomic_long_inc(&p->refs);
>                         return p;
>                 }
> @@ -303,9 +307,15 @@ static void untag_chunk(struct node *p)
>                 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
>         }
>
> -       list_replace_rcu(&chunk->hash, &new->hash);
>         list_for_each_entry(owner, &new->trees, same_root)
>                 owner->root = new;
> +       /*
> +        * Make sure chunk is fully initialized before making it visible in the
> +        * hash. Pairs with a data dependency barrier in READ_ONCE() in
> +        * audit_tree_lookup().
> +        */
> +       smp_wmb();
> +       list_replace_rcu(&chunk->hash, &new->hash);
>         spin_unlock(&hash_lock);
>         fsnotify_detach_mark(entry);
>         mutex_unlock(&entry->group->mark_mutex);
> @@ -367,6 +377,12 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
>                 list_add(&tree->same_root, &chunk->trees);
>         }
>         chunk->key = inode_to_key(inode);
> +       /*
> +        * Make sure chunk is fully initialized before making it visible in the
> +        * hash. Pairs with a data dependency barrier in READ_ONCE() in
> +        * audit_tree_lookup().
> +        */
> +       smp_wmb();
>         insert_hash(chunk);
>         spin_unlock(&hash_lock);
>         mutex_unlock(&audit_tree_group->mark_mutex);
> @@ -458,7 +474,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
>         p->owner = tree;
>         get_tree(tree);
>         list_add(&p->list, &tree->chunks);
> -       list_replace_rcu(&old->hash, &chunk->hash);
>         list_for_each_entry(owner, &chunk->trees, same_root)
>                 owner->root = chunk;
>         old->dead = 1;
> @@ -466,6 +481,13 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
>                 tree->root = chunk;
>                 list_add(&tree->same_root, &chunk->trees);
>         }
> +       /*
> +        * Make sure chunk is fully initialized before making it visible in the
> +        * hash. Pairs with a data dependency barrier in READ_ONCE() in
> +        * audit_tree_lookup().
> +        */
> +       smp_wmb();
> +       list_replace_rcu(&old->hash, &chunk->hash);

IMO, now that list_replace_rcu() is no longer a one liner (including the wmb and
comment above) it would be cleaner to have a helper update_hash(old, chunk)
right next to insert_hash() and for the same reason smp_wmb with the comment
should go into insert_hash() helpler.

Thanks,
Amir.
Jan Kara July 3, 2018, 3:31 p.m. UTC | #2
On Fri 29-06-18 16:02:10, Amir Goldstein wrote:
> On Thu, Jun 28, 2018 at 7:40 PM, Jan Kara <jack@suse.cz> wrote:
> > Currently, the audit tree code does not make sure that when a chunk in
> > inserted into the hash table, it is fully initialized. So in theory (in
> > practice only on DEC Alpha) a user of RCU lookup could see uninitialized
> > structure in the hash table and crash. Add appropriate barriers between
> > initialization of the structure and its insertion into hash table.
> >
> > Signed-off-by: Jan Kara <jack@suse.cz>
...
> > @@ -466,6 +481,13 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
> >                 tree->root = chunk;
> >                 list_add(&tree->same_root, &chunk->trees);
> >         }
> > +       /*
> > +        * Make sure chunk is fully initialized before making it visible in the
> > +        * hash. Pairs with a data dependency barrier in READ_ONCE() in
> > +        * audit_tree_lookup().
> > +        */
> > +       smp_wmb();
> > +       list_replace_rcu(&old->hash, &chunk->hash);
> 
> IMO, now that list_replace_rcu() is no longer a one liner (including the wmb and
> comment above) it would be cleaner to have a helper update_hash(old, chunk)
> right next to insert_hash() and for the same reason smp_wmb with the comment
> should go into insert_hash() helpler.

I was thinking about this as well when writing the code. What I disliked
about hiding smp_wmb() in some helper function is that after that it's much
less obvious that you should have a good reason to add anything after
smp_wmb() as RCU readers needn't see your write. However with some
commenting, I guess it should be obvious enough. I'll do that as a separate
cleanup patch though.

								Honza
diff mbox

Patch

diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 89ad8857a578..46cc6d046c75 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -198,7 +198,11 @@  struct audit_chunk *audit_tree_lookup(const struct inode *inode)
 	struct audit_chunk *p;
 
 	list_for_each_entry_rcu(p, list, hash) {
-		if (p->key == key) {
+		/*
+		 * We use a data dependency barrier in READ_ONCE() to make sure
+		 * the chunk we see is fully initialized.
+		 */
+		if (READ_ONCE(p->key) == key) {
 			atomic_long_inc(&p->refs);
 			return p;
 		}
@@ -303,9 +307,15 @@  static void untag_chunk(struct node *p)
 		list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
 	}
 
-	list_replace_rcu(&chunk->hash, &new->hash);
 	list_for_each_entry(owner, &new->trees, same_root)
 		owner->root = new;
+	/*
+	 * Make sure chunk is fully initialized before making it visible in the
+	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
+	 * audit_tree_lookup().
+	 */
+	smp_wmb();
+	list_replace_rcu(&chunk->hash, &new->hash);
 	spin_unlock(&hash_lock);
 	fsnotify_detach_mark(entry);
 	mutex_unlock(&entry->group->mark_mutex);
@@ -367,6 +377,12 @@  static int create_chunk(struct inode *inode, struct audit_tree *tree)
 		list_add(&tree->same_root, &chunk->trees);
 	}
 	chunk->key = inode_to_key(inode);
+	/*
+	 * Make sure chunk is fully initialized before making it visible in the
+	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
+	 * audit_tree_lookup().
+	 */
+	smp_wmb();
 	insert_hash(chunk);
 	spin_unlock(&hash_lock);
 	mutex_unlock(&audit_tree_group->mark_mutex);
@@ -458,7 +474,6 @@  static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 	p->owner = tree;
 	get_tree(tree);
 	list_add(&p->list, &tree->chunks);
-	list_replace_rcu(&old->hash, &chunk->hash);
 	list_for_each_entry(owner, &chunk->trees, same_root)
 		owner->root = chunk;
 	old->dead = 1;
@@ -466,6 +481,13 @@  static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 		tree->root = chunk;
 		list_add(&tree->same_root, &chunk->trees);
 	}
+	/*
+	 * Make sure chunk is fully initialized before making it visible in the
+	 * hash. Pairs with a data dependency barrier in READ_ONCE() in
+	 * audit_tree_lookup().
+	 */
+	smp_wmb();
+	list_replace_rcu(&old->hash, &chunk->hash);
 	spin_unlock(&hash_lock);
 	fsnotify_detach_mark(old_entry);
 	mutex_unlock(&audit_tree_group->mark_mutex);