diff mbox

[2/2] fsnotify: convert fsnotify_mark.refcnt from atomic_t to refcount_t

Message ID 1508495162-32339-2-git-send-email-elena.reshetova@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Reshetova, Elena Oct. 20, 2017, 10:26 a.m. UTC
atomic_t variables are currently used to implement reference
counters with the following properties:
 - counter is initialized to 1 using atomic_set()
 - a resource is freed upon counter reaching zero
 - once counter reaches zero, its further
   increments aren't allowed
 - counter schema uses basic atomic operations
   (set, inc, inc_not_zero, dec_and_test, etc.)

Such atomic variables should be converted to a newly provided
refcount_t type and API that prevents accidental counter overflows
and underflows. This is important since overflows and underflows
can lead to use-after-free situation and be exploitable.

The variable fsnotify_mark.refcnt is used as pure reference counter.
Convert it to refcount_t and fix up the operations.

Suggested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: David Windsor <dwindsor@gmail.com>
Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
---
 fs/notify/inotify/inotify_user.c |  4 ++--
 fs/notify/mark.c                 | 14 +++++++-------
 include/linux/fsnotify_backend.h |  2 +-
 kernel/audit_tree.c              |  2 +-
 4 files changed, 11 insertions(+), 11 deletions(-)

Comments

Jan Kara Oct. 31, 2017, 11:01 a.m. UTC | #1
On Fri 20-10-17 13:26:02, Elena Reshetova wrote:
> atomic_t variables are currently used to implement reference
> counters with the following properties:
>  - counter is initialized to 1 using atomic_set()
>  - a resource is freed upon counter reaching zero
>  - once counter reaches zero, its further
>    increments aren't allowed
>  - counter schema uses basic atomic operations
>    (set, inc, inc_not_zero, dec_and_test, etc.)
> 
> Such atomic variables should be converted to a newly provided
> refcount_t type and API that prevents accidental counter overflows
> and underflows. This is important since overflows and underflows
> can lead to use-after-free situation and be exploitable.
> 
> The variable fsnotify_mark.refcnt is used as pure reference counter.
> Convert it to refcount_t and fix up the operations.
> 
> Suggested-by: Kees Cook <keescook@chromium.org>
> Reviewed-by: David Windsor <dwindsor@gmail.com>
> Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com>
> Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>

Thanks for the patch! I will add it to my tree in a few days once other
pending work in this area is done (since this patch is going to conflict
with it and it's easier to resolve the conflict when applying this patch
later).

								Honza
> ---
>  fs/notify/inotify/inotify_user.c |  4 ++--
>  fs/notify/mark.c                 | 14 +++++++-------
>  include/linux/fsnotify_backend.h |  2 +-
>  kernel/audit_tree.c              |  2 +-
>  4 files changed, 11 insertions(+), 11 deletions(-)
> 
> diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
> index 7cc7d3f..d3c20e0 100644
> --- a/fs/notify/inotify/inotify_user.c
> +++ b/fs/notify/inotify/inotify_user.c
> @@ -376,7 +376,7 @@ static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group
>  
>  		fsnotify_get_mark(fsn_mark);
>  		/* One ref for being in the idr, one ref we just took */
> -		BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
> +		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
>  	}
>  
>  	return i_mark;
> @@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
>  	 * One ref for being in the idr
>  	 * one ref grabbed by inotify_idr_find
>  	 */
> -	if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
> +	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
>  		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
>  			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
>  		/* we can't really recover with bad ref cnting.. */
> diff --git a/fs/notify/mark.c b/fs/notify/mark.c
> index 9991f88..45f1141 100644
> --- a/fs/notify/mark.c
> +++ b/fs/notify/mark.c
> @@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
>  
>  void fsnotify_get_mark(struct fsnotify_mark *mark)
>  {
> -	WARN_ON_ONCE(!atomic_read(&mark->refcnt));
> -	atomic_inc(&mark->refcnt);
> +	WARN_ON_ONCE(!refcount_read(&mark->refcnt));
> +	refcount_inc(&mark->refcnt);
>  }
>  
>  /*
> @@ -116,7 +116,7 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
>   */
>  static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
>  {
> -	return atomic_inc_not_zero(&mark->refcnt);
> +	return refcount_inc_not_zero(&mark->refcnt);
>  }
>  
>  static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
> @@ -211,7 +211,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
>  
>  	/* Catch marks that were actually never attached to object */
>  	if (!mark->connector) {
> -		if (atomic_dec_and_test(&mark->refcnt))
> +		if (refcount_dec_and_test(&mark->refcnt))
>  			fsnotify_final_mark_destroy(mark);
>  		return;
>  	}
> @@ -220,7 +220,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
>  	 * We have to be careful so that traversals of obj_list under lock can
>  	 * safely grab mark reference.
>  	 */
> -	if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
> +	if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
>  		return;
>  
>  	conn = mark->connector;
> @@ -338,7 +338,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
>  
>  	WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
>  	WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
> -		     atomic_read(&mark->refcnt) < 1 +
> +		     refcount_read(&mark->refcnt) < 1 +
>  			!!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
>  
>  	spin_lock(&mark->lock);
> @@ -738,7 +738,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
>  {
>  	memset(mark, 0, sizeof(*mark));
>  	spin_lock_init(&mark->lock);
> -	atomic_set(&mark->refcnt, 1);
> +	refcount_set(&mark->refcnt, 1);
>  	fsnotify_get_group(group);
>  	mark->group = group;
>  }
> diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
> index 20a57ba..26485b1 100644
> --- a/include/linux/fsnotify_backend.h
> +++ b/include/linux/fsnotify_backend.h
> @@ -244,7 +244,7 @@ struct fsnotify_mark {
>  	__u32 mask;
>  	/* We hold one for presence in g_list. Also one ref for each 'thing'
>  	 * in kernel that found and may be using this mark. */
> -	atomic_t refcnt;
> +	refcount_t refcnt;
>  	/* Group this mark is for. Set on mark creation, stable until last ref
>  	 * is dropped */
>  	struct fsnotify_group *group;
> diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
> index 011d46e..45ec960 100644
> --- a/kernel/audit_tree.c
> +++ b/kernel/audit_tree.c
> @@ -1007,7 +1007,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
>  	 * We are guaranteed to have at least one reference to the mark from
>  	 * either the inode or the caller of fsnotify_destroy_mark().
>  	 */
> -	BUG_ON(atomic_read(&entry->refcnt) < 1);
> +	BUG_ON(refcount_read(&entry->refcnt) < 1);
>  }
>  
>  static const struct fsnotify_ops audit_tree_ops = {
> -- 
> 2.7.4
>
Reshetova, Elena Oct. 31, 2017, 11:40 a.m. UTC | #2
> On Fri 20-10-17 13:26:02, Elena Reshetova wrote:
> > atomic_t variables are currently used to implement reference
> > counters with the following properties:
> >  - counter is initialized to 1 using atomic_set()
> >  - a resource is freed upon counter reaching zero
> >  - once counter reaches zero, its further
> >    increments aren't allowed
> >  - counter schema uses basic atomic operations
> >    (set, inc, inc_not_zero, dec_and_test, etc.)
> >
> > Such atomic variables should be converted to a newly provided
> > refcount_t type and API that prevents accidental counter overflows
> > and underflows. This is important since overflows and underflows
> > can lead to use-after-free situation and be exploitable.
> >
> > The variable fsnotify_mark.refcnt is used as pure reference counter.
> > Convert it to refcount_t and fix up the operations.
> >
> > Suggested-by: Kees Cook <keescook@chromium.org>
> > Reviewed-by: David Windsor <dwindsor@gmail.com>
> > Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com>
> > Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
> 
> Thanks for the patch! I will add it to my tree in a few days once other
> pending work in this area is done (since this patch is going to conflict
> with it and it's easier to resolve the conflict when applying this patch
> later).

Thank you very much for both!

Best Regards,
Elena.
> 
> 
> 			Honza
> > ---
> >  fs/notify/inotify/inotify_user.c |  4 ++--
> >  fs/notify/mark.c                 | 14 +++++++-------
> >  include/linux/fsnotify_backend.h |  2 +-
> >  kernel/audit_tree.c              |  2 +-
> >  4 files changed, 11 insertions(+), 11 deletions(-)
> >
> > diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
> > index 7cc7d3f..d3c20e0 100644
> > --- a/fs/notify/inotify/inotify_user.c
> > +++ b/fs/notify/inotify/inotify_user.c
> > @@ -376,7 +376,7 @@ static struct inotify_inode_mark
> *inotify_idr_find_locked(struct fsnotify_group
> >
> >  		fsnotify_get_mark(fsn_mark);
> >  		/* One ref for being in the idr, one ref we just took */
> > -		BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
> > +		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
> >  	}
> >
> >  	return i_mark;
> > @@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group
> *group,
> >  	 * One ref for being in the idr
> >  	 * one ref grabbed by inotify_idr_find
> >  	 */
> > -	if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
> > +	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
> >  		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d
> i_mark->group=%p\n",
> >  			 __func__, i_mark, i_mark->wd, i_mark-
> >fsn_mark.group);
> >  		/* we can't really recover with bad ref cnting.. */
> > diff --git a/fs/notify/mark.c b/fs/notify/mark.c
> > index 9991f88..45f1141 100644
> > --- a/fs/notify/mark.c
> > +++ b/fs/notify/mark.c
> > @@ -105,8 +105,8 @@ static DECLARE_WORK(connector_reaper_work,
> fsnotify_connector_destroy_workfn);
> >
> >  void fsnotify_get_mark(struct fsnotify_mark *mark)
> >  {
> > -	WARN_ON_ONCE(!atomic_read(&mark->refcnt));
> > -	atomic_inc(&mark->refcnt);
> > +	WARN_ON_ONCE(!refcount_read(&mark->refcnt));
> > +	refcount_inc(&mark->refcnt);
> >  }
> >
> >  /*
> > @@ -116,7 +116,7 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
> >   */
> >  static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
> >  {
> > -	return atomic_inc_not_zero(&mark->refcnt);
> > +	return refcount_inc_not_zero(&mark->refcnt);
> >  }
> >
> >  static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
> > @@ -211,7 +211,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
> >
> >  	/* Catch marks that were actually never attached to object */
> >  	if (!mark->connector) {
> > -		if (atomic_dec_and_test(&mark->refcnt))
> > +		if (refcount_dec_and_test(&mark->refcnt))
> >  			fsnotify_final_mark_destroy(mark);
> >  		return;
> >  	}
> > @@ -220,7 +220,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
> >  	 * We have to be careful so that traversals of obj_list under lock can
> >  	 * safely grab mark reference.
> >  	 */
> > -	if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
> > +	if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector-
> >lock))
> >  		return;
> >
> >  	conn = mark->connector;
> > @@ -338,7 +338,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
> >
> >  	WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
> >  	WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
> > -		     atomic_read(&mark->refcnt) < 1 +
> > +		     refcount_read(&mark->refcnt) < 1 +
> >  			!!(mark->flags &
> FSNOTIFY_MARK_FLAG_ATTACHED));
> >
> >  	spin_lock(&mark->lock);
> > @@ -738,7 +738,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
> >  {
> >  	memset(mark, 0, sizeof(*mark));
> >  	spin_lock_init(&mark->lock);
> > -	atomic_set(&mark->refcnt, 1);
> > +	refcount_set(&mark->refcnt, 1);
> >  	fsnotify_get_group(group);
> >  	mark->group = group;
> >  }
> > diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
> > index 20a57ba..26485b1 100644
> > --- a/include/linux/fsnotify_backend.h
> > +++ b/include/linux/fsnotify_backend.h
> > @@ -244,7 +244,7 @@ struct fsnotify_mark {
> >  	__u32 mask;
> >  	/* We hold one for presence in g_list. Also one ref for each 'thing'
> >  	 * in kernel that found and may be using this mark. */
> > -	atomic_t refcnt;
> > +	refcount_t refcnt;
> >  	/* Group this mark is for. Set on mark creation, stable until last ref
> >  	 * is dropped */
> >  	struct fsnotify_group *group;
> > diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
> > index 011d46e..45ec960 100644
> > --- a/kernel/audit_tree.c
> > +++ b/kernel/audit_tree.c
> > @@ -1007,7 +1007,7 @@ static void audit_tree_freeing_mark(struct
> fsnotify_mark *entry, struct fsnotify
> >  	 * We are guaranteed to have at least one reference to the mark
> from
> >  	 * either the inode or the caller of fsnotify_destroy_mark().
> >  	 */
> > -	BUG_ON(atomic_read(&entry->refcnt) < 1);
> > +	BUG_ON(refcount_read(&entry->refcnt) < 1);
> >  }
> >
> >  static const struct fsnotify_ops audit_tree_ops = {
> > --
> > 2.7.4
> >
> --
> Jan Kara <jack@suse.com>
> SUSE Labs, CR
diff mbox

Patch

diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 7cc7d3f..d3c20e0 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -376,7 +376,7 @@  static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group
 
 		fsnotify_get_mark(fsn_mark);
 		/* One ref for being in the idr, one ref we just took */
-		BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+		BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
 	}
 
 	return i_mark;
@@ -446,7 +446,7 @@  static void inotify_remove_from_idr(struct fsnotify_group *group,
 	 * One ref for being in the idr
 	 * one ref grabbed by inotify_idr_find
 	 */
-	if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
+	if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
 		printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
 			 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
 		/* we can't really recover with bad ref cnting.. */
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 9991f88..45f1141 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -105,8 +105,8 @@  static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
 
 void fsnotify_get_mark(struct fsnotify_mark *mark)
 {
-	WARN_ON_ONCE(!atomic_read(&mark->refcnt));
-	atomic_inc(&mark->refcnt);
+	WARN_ON_ONCE(!refcount_read(&mark->refcnt));
+	refcount_inc(&mark->refcnt);
 }
 
 /*
@@ -116,7 +116,7 @@  void fsnotify_get_mark(struct fsnotify_mark *mark)
  */
 static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
 {
-	return atomic_inc_not_zero(&mark->refcnt);
+	return refcount_inc_not_zero(&mark->refcnt);
 }
 
 static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
@@ -211,7 +211,7 @@  void fsnotify_put_mark(struct fsnotify_mark *mark)
 
 	/* Catch marks that were actually never attached to object */
 	if (!mark->connector) {
-		if (atomic_dec_and_test(&mark->refcnt))
+		if (refcount_dec_and_test(&mark->refcnt))
 			fsnotify_final_mark_destroy(mark);
 		return;
 	}
@@ -220,7 +220,7 @@  void fsnotify_put_mark(struct fsnotify_mark *mark)
 	 * We have to be careful so that traversals of obj_list under lock can
 	 * safely grab mark reference.
 	 */
-	if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+	if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
 		return;
 
 	conn = mark->connector;
@@ -338,7 +338,7 @@  void fsnotify_detach_mark(struct fsnotify_mark *mark)
 
 	WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
 	WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
-		     atomic_read(&mark->refcnt) < 1 +
+		     refcount_read(&mark->refcnt) < 1 +
 			!!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
 
 	spin_lock(&mark->lock);
@@ -738,7 +738,7 @@  void fsnotify_init_mark(struct fsnotify_mark *mark,
 {
 	memset(mark, 0, sizeof(*mark));
 	spin_lock_init(&mark->lock);
-	atomic_set(&mark->refcnt, 1);
+	refcount_set(&mark->refcnt, 1);
 	fsnotify_get_group(group);
 	mark->group = group;
 }
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 20a57ba..26485b1 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -244,7 +244,7 @@  struct fsnotify_mark {
 	__u32 mask;
 	/* We hold one for presence in g_list. Also one ref for each 'thing'
 	 * in kernel that found and may be using this mark. */
-	atomic_t refcnt;
+	refcount_t refcnt;
 	/* Group this mark is for. Set on mark creation, stable until last ref
 	 * is dropped */
 	struct fsnotify_group *group;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 011d46e..45ec960 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1007,7 +1007,7 @@  static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
 	 * We are guaranteed to have at least one reference to the mark from
 	 * either the inode or the caller of fsnotify_destroy_mark().
 	 */
-	BUG_ON(atomic_read(&entry->refcnt) < 1);
+	BUG_ON(refcount_read(&entry->refcnt) < 1);
 }
 
 static const struct fsnotify_ops audit_tree_ops = {