[2/2] Btrfs: get rid of pointless wtag variable in async-thread.c
diff mbox series

Message ID 958fe7c61896c82b35b5c552d3fb5bfd4df62627.1565680721.git.osandov@fb.com
State New
Headers show
Series
  • Btrfs: workqueue cleanups
Related show

Commit Message

Omar Sandoval Aug. 13, 2019, 7:26 a.m. UTC
From: Omar Sandoval <osandov@fb.com>

Commit ac0c7cf8be00 ("btrfs: fix crash when tracepoint arguments are
freed by wq callbacks") added a void pointer, wtag, which is passed into
trace_btrfs_all_work_done() instead of the freed work item. This is
silly for a few reasons:

1. The freed work item still has the same address.
2. work is still in scope after it's freed, so assigning wtag doesn't
   stop anyone from using it.
3. The tracepoint has always taken a void * argument, so assigning wtag
   doesn't actually make things any more type-safe. (Note that the
   original bug in commit bc074524e123 ("btrfs: prefix fsid to all trace
   events") was that the void * was implicitly casted when it was passed
   to btrfs_work_owner() in the trace point itself).

Instead, let's add some clearer warnings as comments.

Signed-off-by: Omar Sandoval <osandov@fb.com>
---
 fs/btrfs/async-thread.c      | 21 ++++++++-------------
 include/trace/events/btrfs.h |  6 +++---
 2 files changed, 11 insertions(+), 16 deletions(-)

Comments

Nikolay Borisov Aug. 13, 2019, 8:08 a.m. UTC | #1
On 13.08.19 г. 10:26 ч., Omar Sandoval wrote:
> From: Omar Sandoval <osandov@fb.com>
> 
> Commit ac0c7cf8be00 ("btrfs: fix crash when tracepoint arguments are
> freed by wq callbacks") added a void pointer, wtag, which is passed into
> trace_btrfs_all_work_done() instead of the freed work item. This is
> silly for a few reasons:
> 
> 1. The freed work item still has the same address.
> 2. work is still in scope after it's freed, so assigning wtag doesn't
>    stop anyone from using it.
> 3. The tracepoint has always taken a void * argument, so assigning wtag
>    doesn't actually make things any more type-safe. (Note that the
>    original bug in commit bc074524e123 ("btrfs: prefix fsid to all trace
>    events") was that the void * was implicitly casted when it was passed
>    to btrfs_work_owner() in the trace point itself).
> 
> Instead, let's add some clearer warnings as comments.
> 
> Signed-off-by: Omar Sandoval <osandov@fb.com>

Reviewed-by: Nikolay Borisov <nborisov@suse.com> albeit one small nit
below.

> ---
>  fs/btrfs/async-thread.c      | 21 ++++++++-------------
>  include/trace/events/btrfs.h |  6 +++---
>  2 files changed, 11 insertions(+), 16 deletions(-)
> 
> diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
> index d105d3df6fa6..baeb4058e9dc 100644
> --- a/fs/btrfs/async-thread.c
> +++ b/fs/btrfs/async-thread.c
> @@ -226,7 +226,6 @@ static void run_ordered_work(struct btrfs_work *self)
>  	struct btrfs_work *work;
>  	spinlock_t *lock = &wq->list_lock;
>  	unsigned long flags;
> -	void *wtag;
>  	bool free_self = false;
>  
>  	while (1) {
> @@ -281,21 +280,19 @@ static void run_ordered_work(struct btrfs_work *self)
>  		} else {
>  			/*
>  			 * We don't want to call the ordered free functions with
> -			 * the lock held though. Save the work as tag for the
> -			 * trace event, because the callback could free the
> -			 * structure.
> +			 * the lock held.
>  			 */
> -			wtag = work;
>  			work->ordered_free(work);
> -			trace_btrfs_all_work_done(wq->fs_info, wtag);
> +			/* work must not be dereferenced past this point. */

perhaps prefix the comment with NB: to draw attention to that comment.

> +			trace_btrfs_all_work_done(wq->fs_info, work);
>  		}
>  	}
>  	spin_unlock_irqrestore(lock, flags);
>  
>  	if (free_self) {
> -		wtag = self;
>  		self->ordered_free(self);
> -		trace_btrfs_all_work_done(wq->fs_info, wtag);
> +		/* self must not be dereferenced past this point. */
> +		trace_btrfs_all_work_done(wq->fs_info, self);
>  	}
>  }
>  
> @@ -304,7 +301,6 @@ static void btrfs_work_helper(struct work_struct *normal_work)
>  	struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
>  					       normal_work);
>  	struct __btrfs_workqueue *wq;
> -	void *wtag;
>  	int need_order = 0;
>  
>  	/*
> @@ -318,8 +314,6 @@ static void btrfs_work_helper(struct work_struct *normal_work)
>  	if (work->ordered_func)
>  		need_order = 1;
>  	wq = work->wq;
> -	/* Safe for tracepoints in case work gets freed by the callback */
> -	wtag = work;
>  
>  	trace_btrfs_work_sched(work);
>  	thresh_exec_hook(wq);
> @@ -327,9 +321,10 @@ static void btrfs_work_helper(struct work_struct *normal_work)
>  	if (need_order) {
>  		set_bit(WORK_DONE_BIT, &work->flags);
>  		run_ordered_work(work);
> +	} else {
> +		/* work must not be dereferenced past this point. */
> +		trace_btrfs_all_work_done(wq->fs_info, work);
>  	}
> -	if (!need_order)
> -		trace_btrfs_all_work_done(wq->fs_info, wtag);
>  }
>  
>  void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
> diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
> index 5cb95646b94e..3d61e80d3c6e 100644
> --- a/include/trace/events/btrfs.h
> +++ b/include/trace/events/btrfs.h
> @@ -1388,9 +1388,9 @@ DECLARE_EVENT_CLASS(btrfs__work,
>  );
>  
>  /*
> - * For situiations when the work is freed, we pass fs_info and a tag that that
> - * matches address of the work structure so it can be paired with the
> - * scheduling event.
> + * For situations when the work is freed, we pass fs_info and a tag that that
> + * matches address of the work structure so it can be paired with the scheduling
> + * event. DO NOT add anything here that dereferences wtag.
>   */
>  DECLARE_EVENT_CLASS(btrfs__work__done,
>  
>
Filipe Manana Aug. 13, 2019, 9:06 a.m. UTC | #2
On Tue, Aug 13, 2019 at 8:28 AM Omar Sandoval <osandov@osandov.com> wrote:
>
> From: Omar Sandoval <osandov@fb.com>
>
> Commit ac0c7cf8be00 ("btrfs: fix crash when tracepoint arguments are
> freed by wq callbacks") added a void pointer, wtag, which is passed into
> trace_btrfs_all_work_done() instead of the freed work item. This is
> silly for a few reasons:
>
> 1. The freed work item still has the same address.
> 2. work is still in scope after it's freed, so assigning wtag doesn't
>    stop anyone from using it.
> 3. The tracepoint has always taken a void * argument, so assigning wtag
>    doesn't actually make things any more type-safe. (Note that the
>    original bug in commit bc074524e123 ("btrfs: prefix fsid to all trace
>    events") was that the void * was implicitly casted when it was passed
>    to btrfs_work_owner() in the trace point itself).
>
> Instead, let's add some clearer warnings as comments.
>
> Signed-off-by: Omar Sandoval <osandov@fb.com>

Looks good, just a double "that" word in a comment below.

Reviewed-by: Filipe Manana <fdmanana@suse.com>

> ---
>  fs/btrfs/async-thread.c      | 21 ++++++++-------------
>  include/trace/events/btrfs.h |  6 +++---
>  2 files changed, 11 insertions(+), 16 deletions(-)
>
> diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
> index d105d3df6fa6..baeb4058e9dc 100644
> --- a/fs/btrfs/async-thread.c
> +++ b/fs/btrfs/async-thread.c
> @@ -226,7 +226,6 @@ static void run_ordered_work(struct btrfs_work *self)
>         struct btrfs_work *work;
>         spinlock_t *lock = &wq->list_lock;
>         unsigned long flags;
> -       void *wtag;
>         bool free_self = false;
>
>         while (1) {
> @@ -281,21 +280,19 @@ static void run_ordered_work(struct btrfs_work *self)
>                 } else {
>                         /*
>                          * We don't want to call the ordered free functions with
> -                        * the lock held though. Save the work as tag for the
> -                        * trace event, because the callback could free the
> -                        * structure.
> +                        * the lock held.
>                          */
> -                       wtag = work;
>                         work->ordered_free(work);
> -                       trace_btrfs_all_work_done(wq->fs_info, wtag);
> +                       /* work must not be dereferenced past this point. */
> +                       trace_btrfs_all_work_done(wq->fs_info, work);
>                 }
>         }
>         spin_unlock_irqrestore(lock, flags);
>
>         if (free_self) {
> -               wtag = self;
>                 self->ordered_free(self);
> -               trace_btrfs_all_work_done(wq->fs_info, wtag);
> +               /* self must not be dereferenced past this point. */
> +               trace_btrfs_all_work_done(wq->fs_info, self);
>         }
>  }
>
> @@ -304,7 +301,6 @@ static void btrfs_work_helper(struct work_struct *normal_work)
>         struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
>                                                normal_work);
>         struct __btrfs_workqueue *wq;
> -       void *wtag;
>         int need_order = 0;
>
>         /*
> @@ -318,8 +314,6 @@ static void btrfs_work_helper(struct work_struct *normal_work)
>         if (work->ordered_func)
>                 need_order = 1;
>         wq = work->wq;
> -       /* Safe for tracepoints in case work gets freed by the callback */
> -       wtag = work;
>
>         trace_btrfs_work_sched(work);
>         thresh_exec_hook(wq);
> @@ -327,9 +321,10 @@ static void btrfs_work_helper(struct work_struct *normal_work)
>         if (need_order) {
>                 set_bit(WORK_DONE_BIT, &work->flags);
>                 run_ordered_work(work);
> +       } else {
> +               /* work must not be dereferenced past this point. */
> +               trace_btrfs_all_work_done(wq->fs_info, work);
>         }
> -       if (!need_order)
> -               trace_btrfs_all_work_done(wq->fs_info, wtag);
>  }
>
>  void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
> diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
> index 5cb95646b94e..3d61e80d3c6e 100644
> --- a/include/trace/events/btrfs.h
> +++ b/include/trace/events/btrfs.h
> @@ -1388,9 +1388,9 @@ DECLARE_EVENT_CLASS(btrfs__work,
>  );
>
>  /*
> - * For situiations when the work is freed, we pass fs_info and a tag that that
> - * matches address of the work structure so it can be paired with the
> - * scheduling event.
> + * For situations when the work is freed, we pass fs_info and a tag that that

"that" twice.

> + * matches address of the work structure so it can be paired with the scheduling
> + * event. DO NOT add anything here that dereferences wtag.
>   */
>  DECLARE_EVENT_CLASS(btrfs__work__done,
>
> --
> 2.22.0
>

Patch
diff mbox series

diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index d105d3df6fa6..baeb4058e9dc 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -226,7 +226,6 @@  static void run_ordered_work(struct btrfs_work *self)
 	struct btrfs_work *work;
 	spinlock_t *lock = &wq->list_lock;
 	unsigned long flags;
-	void *wtag;
 	bool free_self = false;
 
 	while (1) {
@@ -281,21 +280,19 @@  static void run_ordered_work(struct btrfs_work *self)
 		} else {
 			/*
 			 * We don't want to call the ordered free functions with
-			 * the lock held though. Save the work as tag for the
-			 * trace event, because the callback could free the
-			 * structure.
+			 * the lock held.
 			 */
-			wtag = work;
 			work->ordered_free(work);
-			trace_btrfs_all_work_done(wq->fs_info, wtag);
+			/* work must not be dereferenced past this point. */
+			trace_btrfs_all_work_done(wq->fs_info, work);
 		}
 	}
 	spin_unlock_irqrestore(lock, flags);
 
 	if (free_self) {
-		wtag = self;
 		self->ordered_free(self);
-		trace_btrfs_all_work_done(wq->fs_info, wtag);
+		/* self must not be dereferenced past this point. */
+		trace_btrfs_all_work_done(wq->fs_info, self);
 	}
 }
 
@@ -304,7 +301,6 @@  static void btrfs_work_helper(struct work_struct *normal_work)
 	struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
 					       normal_work);
 	struct __btrfs_workqueue *wq;
-	void *wtag;
 	int need_order = 0;
 
 	/*
@@ -318,8 +314,6 @@  static void btrfs_work_helper(struct work_struct *normal_work)
 	if (work->ordered_func)
 		need_order = 1;
 	wq = work->wq;
-	/* Safe for tracepoints in case work gets freed by the callback */
-	wtag = work;
 
 	trace_btrfs_work_sched(work);
 	thresh_exec_hook(wq);
@@ -327,9 +321,10 @@  static void btrfs_work_helper(struct work_struct *normal_work)
 	if (need_order) {
 		set_bit(WORK_DONE_BIT, &work->flags);
 		run_ordered_work(work);
+	} else {
+		/* work must not be dereferenced past this point. */
+		trace_btrfs_all_work_done(wq->fs_info, work);
 	}
-	if (!need_order)
-		trace_btrfs_all_work_done(wq->fs_info, wtag);
 }
 
 void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 5cb95646b94e..3d61e80d3c6e 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1388,9 +1388,9 @@  DECLARE_EVENT_CLASS(btrfs__work,
 );
 
 /*
- * For situiations when the work is freed, we pass fs_info and a tag that that
- * matches address of the work structure so it can be paired with the
- * scheduling event.
+ * For situations when the work is freed, we pass fs_info and a tag that that
+ * matches address of the work structure so it can be paired with the scheduling
+ * event. DO NOT add anything here that dereferences wtag.
  */
 DECLARE_EVENT_CLASS(btrfs__work__done,