diff mbox series

[2/6] btrfs: add cleanup_ref_head_accounting helper

Message ID 20181121185912.24288-3-josef@toxicpanda.com (mailing list archive)
State New, archived
Headers show
Series Delayed refs rsv | expand

Commit Message

Josef Bacik Nov. 21, 2018, 6:59 p.m. UTC
From: Josef Bacik <jbacik@fb.com>

We were missing some quota cleanups in check_ref_cleanup, so break the
ref head accounting cleanup into a helper and call that from both
check_ref_cleanup and cleanup_ref_head.  This will hopefully ensure that
we don't screw up accounting in the future for other things that we add.

Reviewed-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
---
 fs/btrfs/extent-tree.c | 67 +++++++++++++++++++++++++++++---------------------
 1 file changed, 39 insertions(+), 28 deletions(-)

Comments

Qu Wenruo Nov. 22, 2018, 1:06 a.m. UTC | #1
On 2018/11/22 上午2:59, Josef Bacik wrote:
> From: Josef Bacik <jbacik@fb.com>
> 
> We were missing some quota cleanups in check_ref_cleanup, so break the
> ref head accounting cleanup into a helper and call that from both
> check_ref_cleanup and cleanup_ref_head.  This will hopefully ensure that
> we don't screw up accounting in the future for other things that we add.
> 
> Reviewed-by: Omar Sandoval <osandov@fb.com>
> Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com>
> Signed-off-by: Josef Bacik <jbacik@fb.com>
> ---
>  fs/btrfs/extent-tree.c | 67 +++++++++++++++++++++++++++++---------------------
>  1 file changed, 39 insertions(+), 28 deletions(-)
> 
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index c36b3a42f2bb..e3ed3507018d 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -2443,6 +2443,41 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans,
>  	return ret ? ret : 1;
>  }
>  
> +static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
> +					struct btrfs_delayed_ref_head *head)
> +{
> +	struct btrfs_fs_info *fs_info = trans->fs_info;
> +	struct btrfs_delayed_ref_root *delayed_refs =
> +		&trans->transaction->delayed_refs;
> +
> +	if (head->total_ref_mod < 0) {
> +		struct btrfs_space_info *space_info;
> +		u64 flags;
> +
> +		if (head->is_data)
> +			flags = BTRFS_BLOCK_GROUP_DATA;
> +		else if (head->is_system)
> +			flags = BTRFS_BLOCK_GROUP_SYSTEM;
> +		else
> +			flags = BTRFS_BLOCK_GROUP_METADATA;
> +		space_info = __find_space_info(fs_info, flags);
> +		ASSERT(space_info);
> +		percpu_counter_add_batch(&space_info->total_bytes_pinned,
> +				   -head->num_bytes,
> +				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
> +
> +		if (head->is_data) {
> +			spin_lock(&delayed_refs->lock);
> +			delayed_refs->pending_csums -= head->num_bytes;
> +			spin_unlock(&delayed_refs->lock);
> +		}
> +	}
> +
> +	/* Also free its reserved qgroup space */
> +	btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
> +				      head->qgroup_reserved);

This part will be removed in patch "[PATCH] btrfs: qgroup: Move reserved
data account from btrfs_delayed_ref_head to btrfs_qgroup_extent_record".

So there is one less thing to worry about in delayed ref head.

Thanks,
Qu

> +}
> +
>  static int cleanup_ref_head(struct btrfs_trans_handle *trans,
>  			    struct btrfs_delayed_ref_head *head)
>  {
> @@ -2478,31 +2513,6 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
>  	spin_unlock(&head->lock);
>  	spin_unlock(&delayed_refs->lock);
>  
> -	trace_run_delayed_ref_head(fs_info, head, 0);
> -
> -	if (head->total_ref_mod < 0) {
> -		struct btrfs_space_info *space_info;
> -		u64 flags;
> -
> -		if (head->is_data)
> -			flags = BTRFS_BLOCK_GROUP_DATA;
> -		else if (head->is_system)
> -			flags = BTRFS_BLOCK_GROUP_SYSTEM;
> -		else
> -			flags = BTRFS_BLOCK_GROUP_METADATA;
> -		space_info = __find_space_info(fs_info, flags);
> -		ASSERT(space_info);
> -		percpu_counter_add_batch(&space_info->total_bytes_pinned,
> -				   -head->num_bytes,
> -				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
> -
> -		if (head->is_data) {
> -			spin_lock(&delayed_refs->lock);
> -			delayed_refs->pending_csums -= head->num_bytes;
> -			spin_unlock(&delayed_refs->lock);
> -		}
> -	}
> -
>  	if (head->must_insert_reserved) {
>  		btrfs_pin_extent(fs_info, head->bytenr,
>  				 head->num_bytes, 1);
> @@ -2512,9 +2522,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
>  		}
>  	}
>  
> -	/* Also free its reserved qgroup space */
> -	btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
> -				      head->qgroup_reserved);
> +	cleanup_ref_head_accounting(trans, head);
> +
> +	trace_run_delayed_ref_head(fs_info, head, 0);
>  	btrfs_delayed_ref_unlock(head);
>  	btrfs_put_delayed_ref_head(head);
>  	return 0;
> @@ -6991,6 +7001,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
>  	if (head->must_insert_reserved)
>  		ret = 1;
>  
> +	cleanup_ref_head_accounting(trans, head);
>  	mutex_unlock(&head->mutex);
>  	btrfs_put_delayed_ref_head(head);
>  	return ret;
>
David Sterba Nov. 23, 2018, 1:51 p.m. UTC | #2
On Thu, Nov 22, 2018 at 09:06:30AM +0800, Qu Wenruo wrote:
> 
> 
> On 2018/11/22 上午2:59, Josef Bacik wrote:
> > From: Josef Bacik <jbacik@fb.com>
> > 
> > We were missing some quota cleanups in check_ref_cleanup, so break the
> > ref head accounting cleanup into a helper and call that from both
> > check_ref_cleanup and cleanup_ref_head.  This will hopefully ensure that
> > we don't screw up accounting in the future for other things that we add.
> > 
> > Reviewed-by: Omar Sandoval <osandov@fb.com>
> > Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com>
> > Signed-off-by: Josef Bacik <jbacik@fb.com>
> > ---
> >  fs/btrfs/extent-tree.c | 67 +++++++++++++++++++++++++++++---------------------
> >  1 file changed, 39 insertions(+), 28 deletions(-)
> > 
> > diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> > index c36b3a42f2bb..e3ed3507018d 100644
> > --- a/fs/btrfs/extent-tree.c
> > +++ b/fs/btrfs/extent-tree.c
> > @@ -2443,6 +2443,41 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans,
> >  	return ret ? ret : 1;
> >  }
> >  
> > +static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
> > +					struct btrfs_delayed_ref_head *head)
> > +{
> > +	struct btrfs_fs_info *fs_info = trans->fs_info;
> > +	struct btrfs_delayed_ref_root *delayed_refs =
> > +		&trans->transaction->delayed_refs;
> > +
> > +	if (head->total_ref_mod < 0) {
> > +		struct btrfs_space_info *space_info;
> > +		u64 flags;
> > +
> > +		if (head->is_data)
> > +			flags = BTRFS_BLOCK_GROUP_DATA;
> > +		else if (head->is_system)
> > +			flags = BTRFS_BLOCK_GROUP_SYSTEM;
> > +		else
> > +			flags = BTRFS_BLOCK_GROUP_METADATA;
> > +		space_info = __find_space_info(fs_info, flags);
> > +		ASSERT(space_info);
> > +		percpu_counter_add_batch(&space_info->total_bytes_pinned,
> > +				   -head->num_bytes,
> > +				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
> > +
> > +		if (head->is_data) {
> > +			spin_lock(&delayed_refs->lock);
> > +			delayed_refs->pending_csums -= head->num_bytes;
> > +			spin_unlock(&delayed_refs->lock);
> > +		}
> > +	}
> > +
> > +	/* Also free its reserved qgroup space */
> > +	btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
> > +				      head->qgroup_reserved);
> 
> This part will be removed in patch "[PATCH] btrfs: qgroup: Move reserved
> data account from btrfs_delayed_ref_head to btrfs_qgroup_extent_record".

We need to decide the order of merging if the patches touch the same
code. Your patch looks easier to refresh on top of this series, so
please hold on until this gets merged.
diff mbox series

Patch

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c36b3a42f2bb..e3ed3507018d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2443,6 +2443,41 @@  static int cleanup_extent_op(struct btrfs_trans_handle *trans,
 	return ret ? ret : 1;
 }
 
+static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
+					struct btrfs_delayed_ref_head *head)
+{
+	struct btrfs_fs_info *fs_info = trans->fs_info;
+	struct btrfs_delayed_ref_root *delayed_refs =
+		&trans->transaction->delayed_refs;
+
+	if (head->total_ref_mod < 0) {
+		struct btrfs_space_info *space_info;
+		u64 flags;
+
+		if (head->is_data)
+			flags = BTRFS_BLOCK_GROUP_DATA;
+		else if (head->is_system)
+			flags = BTRFS_BLOCK_GROUP_SYSTEM;
+		else
+			flags = BTRFS_BLOCK_GROUP_METADATA;
+		space_info = __find_space_info(fs_info, flags);
+		ASSERT(space_info);
+		percpu_counter_add_batch(&space_info->total_bytes_pinned,
+				   -head->num_bytes,
+				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
+
+		if (head->is_data) {
+			spin_lock(&delayed_refs->lock);
+			delayed_refs->pending_csums -= head->num_bytes;
+			spin_unlock(&delayed_refs->lock);
+		}
+	}
+
+	/* Also free its reserved qgroup space */
+	btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
+				      head->qgroup_reserved);
+}
+
 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
 			    struct btrfs_delayed_ref_head *head)
 {
@@ -2478,31 +2513,6 @@  static int cleanup_ref_head(struct btrfs_trans_handle *trans,
 	spin_unlock(&head->lock);
 	spin_unlock(&delayed_refs->lock);
 
-	trace_run_delayed_ref_head(fs_info, head, 0);
-
-	if (head->total_ref_mod < 0) {
-		struct btrfs_space_info *space_info;
-		u64 flags;
-
-		if (head->is_data)
-			flags = BTRFS_BLOCK_GROUP_DATA;
-		else if (head->is_system)
-			flags = BTRFS_BLOCK_GROUP_SYSTEM;
-		else
-			flags = BTRFS_BLOCK_GROUP_METADATA;
-		space_info = __find_space_info(fs_info, flags);
-		ASSERT(space_info);
-		percpu_counter_add_batch(&space_info->total_bytes_pinned,
-				   -head->num_bytes,
-				   BTRFS_TOTAL_BYTES_PINNED_BATCH);
-
-		if (head->is_data) {
-			spin_lock(&delayed_refs->lock);
-			delayed_refs->pending_csums -= head->num_bytes;
-			spin_unlock(&delayed_refs->lock);
-		}
-	}
-
 	if (head->must_insert_reserved) {
 		btrfs_pin_extent(fs_info, head->bytenr,
 				 head->num_bytes, 1);
@@ -2512,9 +2522,9 @@  static int cleanup_ref_head(struct btrfs_trans_handle *trans,
 		}
 	}
 
-	/* Also free its reserved qgroup space */
-	btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
-				      head->qgroup_reserved);
+	cleanup_ref_head_accounting(trans, head);
+
+	trace_run_delayed_ref_head(fs_info, head, 0);
 	btrfs_delayed_ref_unlock(head);
 	btrfs_put_delayed_ref_head(head);
 	return 0;
@@ -6991,6 +7001,7 @@  static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
 	if (head->must_insert_reserved)
 		ret = 1;
 
+	cleanup_ref_head_accounting(trans, head);
 	mutex_unlock(&head->mutex);
 	btrfs_put_delayed_ref_head(head);
 	return ret;