diff mbox series

[09/19] btrfs: unify the btrfs_add_delayed_*_ref helpers into one helper

Message ID 5ed65a5c6829fb072e20a9e58897918ca4a21f3e.1713052088.git.josef@toxicpanda.com (mailing list archive)
State New, archived
Headers show
Series btrfs: delayed refs cleanups | expand

Commit Message

Josef Bacik April 13, 2024, 11:53 p.m. UTC
Now that these helpers are identical, create a helper function that
handles everything properly and strip the individual helpers down to use
just the common helper. This cleans up a significant amount of
duplicated code.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/delayed-ref.c | 106 +++++++++++------------------------------
 1 file changed, 28 insertions(+), 78 deletions(-)

Comments

Filipe Manana April 15, 2024, 12:48 p.m. UTC | #1
On Sun, Apr 14, 2024 at 12:54 AM Josef Bacik <josef@toxicpanda.com> wrote:
>
> Now that these helpers are identical, create a helper function that
> handles everything properly and strip the individual helpers down to use
> just the common helper. This cleans up a significant amount of
> duplicated code.
>
> Signed-off-by: Josef Bacik <josef@toxicpanda.com>
> ---
>  fs/btrfs/delayed-ref.c | 106 +++++++++++------------------------------
>  1 file changed, 28 insertions(+), 78 deletions(-)
>
> diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
> index cc1510d7eee8..342f32ae08c9 100644
> --- a/fs/btrfs/delayed-ref.c
> +++ b/fs/btrfs/delayed-ref.c
> @@ -1054,14 +1054,10 @@ void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
>                 generic_ref->skip_qgroup = false;
>  }
>
> -/*
> - * add a delayed tree ref.  This does all of the accounting required
> - * to make sure the delayed ref is eventually processed before this
> - * transaction commits.
> - */
> -int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
> -                              struct btrfs_ref *generic_ref,
> -                              struct btrfs_delayed_extent_op *extent_op)
> +static int __btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,

Please don't use a __ prefix for functions, even if private/static.
We don't do this anymore.

With that changed:

Reviewed-by: Filipe Manana <fdmanana@suse.com>

Thanks.


> +                                  struct btrfs_ref *generic_ref,
> +                                  struct btrfs_delayed_extent_op *extent_op,
> +                                  u64 reserved)
>  {
>         struct btrfs_fs_info *fs_info = trans->fs_info;
>         struct btrfs_delayed_ref_node *node;
> @@ -1069,10 +1065,9 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
>         struct btrfs_delayed_ref_root *delayed_refs;
>         struct btrfs_qgroup_extent_record *record = NULL;
>         bool qrecord_inserted;
> -       bool merged;
>         int action = generic_ref->action;
> +       bool merged;
>
> -       ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
>         node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
>         if (!node)
>                 return -ENOMEM;
> @@ -1087,14 +1082,14 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
>                 record = kzalloc(sizeof(*record), GFP_NOFS);
>                 if (!record) {
>                         kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
> -                       kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
> +                       kmem_cache_free(btrfs_delayed_ref_head_cachep,
> +                                       head_ref);
>                         return -ENOMEM;
>                 }
>         }
>
>         init_delayed_ref_common(fs_info, node, generic_ref);
> -
> -       init_delayed_ref_head(head_ref, generic_ref, record, 0);
> +       init_delayed_ref_head(head_ref, generic_ref, record, reserved);
>         head_ref->extent_op = extent_op;
>
>         delayed_refs = &trans->transaction->delayed_refs;
> @@ -1116,16 +1111,31 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
>          */
>         btrfs_update_delayed_refs_rsv(trans);
>
> -       trace_add_delayed_tree_ref(fs_info, node);
> +       if (generic_ref->type == BTRFS_REF_DATA)
> +               trace_add_delayed_data_ref(trans->fs_info, node);
> +       else
> +               trace_add_delayed_tree_ref(trans->fs_info, node);
>         if (merged)
>                 kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
>
>         if (qrecord_inserted)
> -               btrfs_qgroup_trace_extent_post(trans, record);
> -
> +               return btrfs_qgroup_trace_extent_post(trans, record);
>         return 0;
>  }
>
> +/*
> + * add a delayed tree ref.  This does all of the accounting required
> + * to make sure the delayed ref is eventually processed before this
> + * transaction commits.
> + */
> +int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
> +                              struct btrfs_ref *generic_ref,
> +                              struct btrfs_delayed_extent_op *extent_op)
> +{
> +       ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
> +       return __btrfs_add_delayed_ref(trans, generic_ref, extent_op, 0);
> +}
> +
>  /*
>   * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
>   */
> @@ -1133,68 +1143,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
>                                struct btrfs_ref *generic_ref,
>                                u64 reserved)
>  {
> -       struct btrfs_fs_info *fs_info = trans->fs_info;
> -       struct btrfs_delayed_ref_node *node;
> -       struct btrfs_delayed_ref_head *head_ref;
> -       struct btrfs_delayed_ref_root *delayed_refs;
> -       struct btrfs_qgroup_extent_record *record = NULL;
> -       bool qrecord_inserted;
> -       int action = generic_ref->action;
> -       bool merged;
> -
> -       ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
> -       node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
> -       if (!node)
> -               return -ENOMEM;
> -
> -       init_delayed_ref_common(fs_info, node, generic_ref);
> -
> -       head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
> -       if (!head_ref) {
> -               kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
> -               return -ENOMEM;
> -       }
> -
> -       if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
> -               record = kzalloc(sizeof(*record), GFP_NOFS);
> -               if (!record) {
> -                       kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
> -                       kmem_cache_free(btrfs_delayed_ref_head_cachep,
> -                                       head_ref);
> -                       return -ENOMEM;
> -               }
> -       }
> -
> -       init_delayed_ref_head(head_ref, generic_ref, record, reserved);
> -       head_ref->extent_op = NULL;
> -
> -       delayed_refs = &trans->transaction->delayed_refs;
> -       spin_lock(&delayed_refs->lock);
> -
> -       /*
> -        * insert both the head node and the new ref without dropping
> -        * the spin lock
> -        */
> -       head_ref = add_delayed_ref_head(trans, head_ref, record,
> -                                       action, &qrecord_inserted);
> -
> -       merged = insert_delayed_ref(trans, head_ref, node);
> -       spin_unlock(&delayed_refs->lock);
> -
> -       /*
> -        * Need to update the delayed_refs_rsv with any changes we may have
> -        * made.
> -        */
> -       btrfs_update_delayed_refs_rsv(trans);
> -
> -       trace_add_delayed_data_ref(trans->fs_info, node);
> -       if (merged)
> -               kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
> -
> -
> -       if (qrecord_inserted)
> -               return btrfs_qgroup_trace_extent_post(trans, record);
> -       return 0;
> +       ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
> +       return __btrfs_add_delayed_ref(trans, generic_ref, NULL, reserved);
>  }
>
>  int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
> --
> 2.43.0
>
>
diff mbox series

Patch

diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index cc1510d7eee8..342f32ae08c9 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -1054,14 +1054,10 @@  void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
 		generic_ref->skip_qgroup = false;
 }
 
-/*
- * add a delayed tree ref.  This does all of the accounting required
- * to make sure the delayed ref is eventually processed before this
- * transaction commits.
- */
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
-			       struct btrfs_ref *generic_ref,
-			       struct btrfs_delayed_extent_op *extent_op)
+static int __btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
+				   struct btrfs_ref *generic_ref,
+				   struct btrfs_delayed_extent_op *extent_op,
+				   u64 reserved)
 {
 	struct btrfs_fs_info *fs_info = trans->fs_info;
 	struct btrfs_delayed_ref_node *node;
@@ -1069,10 +1065,9 @@  int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 	struct btrfs_delayed_ref_root *delayed_refs;
 	struct btrfs_qgroup_extent_record *record = NULL;
 	bool qrecord_inserted;
-	bool merged;
 	int action = generic_ref->action;
+	bool merged;
 
-	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
 	node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
 	if (!node)
 		return -ENOMEM;
@@ -1087,14 +1082,14 @@  int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 		record = kzalloc(sizeof(*record), GFP_NOFS);
 		if (!record) {
 			kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
-			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+			kmem_cache_free(btrfs_delayed_ref_head_cachep,
+					head_ref);
 			return -ENOMEM;
 		}
 	}
 
 	init_delayed_ref_common(fs_info, node, generic_ref);
-
-	init_delayed_ref_head(head_ref, generic_ref, record, 0);
+	init_delayed_ref_head(head_ref, generic_ref, record, reserved);
 	head_ref->extent_op = extent_op;
 
 	delayed_refs = &trans->transaction->delayed_refs;
@@ -1116,16 +1111,31 @@  int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 	 */
 	btrfs_update_delayed_refs_rsv(trans);
 
-	trace_add_delayed_tree_ref(fs_info, node);
+	if (generic_ref->type == BTRFS_REF_DATA)
+		trace_add_delayed_data_ref(trans->fs_info, node);
+	else
+		trace_add_delayed_tree_ref(trans->fs_info, node);
 	if (merged)
 		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
 
 	if (qrecord_inserted)
-		btrfs_qgroup_trace_extent_post(trans, record);
-
+		return btrfs_qgroup_trace_extent_post(trans, record);
 	return 0;
 }
 
+/*
+ * add a delayed tree ref.  This does all of the accounting required
+ * to make sure the delayed ref is eventually processed before this
+ * transaction commits.
+ */
+int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+			       struct btrfs_ref *generic_ref,
+			       struct btrfs_delayed_extent_op *extent_op)
+{
+	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
+	return __btrfs_add_delayed_ref(trans, generic_ref, extent_op, 0);
+}
+
 /*
  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  */
@@ -1133,68 +1143,8 @@  int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 			       struct btrfs_ref *generic_ref,
 			       u64 reserved)
 {
-	struct btrfs_fs_info *fs_info = trans->fs_info;
-	struct btrfs_delayed_ref_node *node;
-	struct btrfs_delayed_ref_head *head_ref;
-	struct btrfs_delayed_ref_root *delayed_refs;
-	struct btrfs_qgroup_extent_record *record = NULL;
-	bool qrecord_inserted;
-	int action = generic_ref->action;
-	bool merged;
-
-	ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
-	node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
-	if (!node)
-		return -ENOMEM;
-
-	init_delayed_ref_common(fs_info, node, generic_ref);
-
-	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
-	if (!head_ref) {
-		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
-		return -ENOMEM;
-	}
-
-	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
-		record = kzalloc(sizeof(*record), GFP_NOFS);
-		if (!record) {
-			kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
-			kmem_cache_free(btrfs_delayed_ref_head_cachep,
-					head_ref);
-			return -ENOMEM;
-		}
-	}
-
-	init_delayed_ref_head(head_ref, generic_ref, record, reserved);
-	head_ref->extent_op = NULL;
-
-	delayed_refs = &trans->transaction->delayed_refs;
-	spin_lock(&delayed_refs->lock);
-
-	/*
-	 * insert both the head node and the new ref without dropping
-	 * the spin lock
-	 */
-	head_ref = add_delayed_ref_head(trans, head_ref, record,
-					action, &qrecord_inserted);
-
-	merged = insert_delayed_ref(trans, head_ref, node);
-	spin_unlock(&delayed_refs->lock);
-
-	/*
-	 * Need to update the delayed_refs_rsv with any changes we may have
-	 * made.
-	 */
-	btrfs_update_delayed_refs_rsv(trans);
-
-	trace_add_delayed_data_ref(trans->fs_info, node);
-	if (merged)
-		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
-
-
-	if (qrecord_inserted)
-		return btrfs_qgroup_trace_extent_post(trans, record);
-	return 0;
+	ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
+	return __btrfs_add_delayed_ref(trans, generic_ref, NULL, reserved);
 }
 
 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,