diff mbox series

[02/19] btrfs: embed data_ref and tree_ref in btrfs_delayed_ref_node

Message ID cb592516a71f62dd136bd858670d0ae6f54d8cdc.1713052088.git.josef@toxicpanda.com (mailing list archive)
State New, archived
Headers show
Series btrfs: delayed refs cleanups | expand

Commit Message

Josef Bacik April 13, 2024, 11:53 p.m. UTC
We have been embedding btrfs_delayed_ref_node in the
btrfs_delayed_data_ref and btrfs_delayed_tree_ref, and then we have two
sets of cachep's and a variety of handling that is awkward because of
this separation.

Instead union these two members inside of btrfs_delayed_ref_node and
make that the first class object.  This allows us to go down to one
cachep for our delayed ref nodes instead of two.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/delayed-ref.c | 51 ++++++++++++++----------------------------
 fs/btrfs/delayed-ref.h | 44 +++++++++++++++++++-----------------
 2 files changed, 40 insertions(+), 55 deletions(-)

Comments

Filipe Manana April 15, 2024, 12:38 p.m. UTC | #1
On Sun, Apr 14, 2024 at 12:53 AM Josef Bacik <josef@toxicpanda.com> wrote:
>
> We have been embedding btrfs_delayed_ref_node in the
> btrfs_delayed_data_ref and btrfs_delayed_tree_ref, and then we have two
> sets of cachep's and a variety of handling that is awkward because of
> this separation.
>
> Instead union these two members inside of btrfs_delayed_ref_node and
> make that the first class object.  This allows us to go down to one
> cachep for our delayed ref nodes instead of two.
>
> Signed-off-by: Josef Bacik <josef@toxicpanda.com>

Reviewed-by: Filipe Manana <fdmanana@suse.com>

Looks good, thanks.

> ---
>  fs/btrfs/delayed-ref.c | 51 ++++++++++++++----------------------------
>  fs/btrfs/delayed-ref.h | 44 +++++++++++++++++++-----------------
>  2 files changed, 40 insertions(+), 55 deletions(-)
>
> diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
> index d920663a18fd..9382f7c81c25 100644
> --- a/fs/btrfs/delayed-ref.c
> +++ b/fs/btrfs/delayed-ref.c
> @@ -16,8 +16,7 @@
>  #include "fs.h"
>
>  struct kmem_cache *btrfs_delayed_ref_head_cachep;
> -struct kmem_cache *btrfs_delayed_tree_ref_cachep;
> -struct kmem_cache *btrfs_delayed_data_ref_cachep;
> +struct kmem_cache *btrfs_delayed_ref_node_cachep;
>  struct kmem_cache *btrfs_delayed_extent_op_cachep;
>  /*
>   * delayed back reference update tracking.  For subvolume trees
> @@ -1082,26 +1081,26 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
>         is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID);
>
>         ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
> -       ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
> -       if (!ref)
> +       node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
> +       if (!node)
>                 return -ENOMEM;
>
>         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
>         if (!head_ref) {
> -               kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
> +               kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
>                 return -ENOMEM;
>         }
>
>         if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
>                 record = kzalloc(sizeof(*record), GFP_NOFS);
>                 if (!record) {
> -                       kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
> +                       kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
>                         kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
>                         return -ENOMEM;
>                 }
>         }
>
> -       node = btrfs_delayed_tree_ref_to_node(ref);
> +       ref = btrfs_delayed_node_to_tree_ref(node);
>
>         if (parent)
>                 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
> @@ -1143,7 +1142,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
>                                    action == BTRFS_ADD_DELAYED_EXTENT ?
>                                    BTRFS_ADD_DELAYED_REF : action);
>         if (merged)
> -               kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
> +               kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
>
>         if (qrecord_inserted)
>                 btrfs_qgroup_trace_extent_post(trans, record);
> @@ -1176,11 +1175,11 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
>         u8 ref_type;
>
>         ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
> -       ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
> -       if (!ref)
> +       node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
> +       if (!node)
>                 return -ENOMEM;
>
> -       node = btrfs_delayed_data_ref_to_node(ref);
> +       ref = btrfs_delayed_node_to_data_ref(node);
>
>         if (parent)
>                 ref_type = BTRFS_SHARED_DATA_REF_KEY;
> @@ -1196,14 +1195,14 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
>
>         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
>         if (!head_ref) {
> -               kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
> +               kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
>                 return -ENOMEM;
>         }
>
>         if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
>                 record = kzalloc(sizeof(*record), GFP_NOFS);
>                 if (!record) {
> -                       kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
> +                       kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
>                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
>                                         head_ref);
>                         return -ENOMEM;
> @@ -1237,7 +1236,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
>                                    action == BTRFS_ADD_DELAYED_EXTENT ?
>                                    BTRFS_ADD_DELAYED_REF : action);
>         if (merged)
> -               kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
> +               kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
>
>
>         if (qrecord_inserted)
> @@ -1280,18 +1279,7 @@ void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
>  {
>         if (refcount_dec_and_test(&ref->refs)) {
>                 WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
> -               switch (ref->type) {
> -               case BTRFS_TREE_BLOCK_REF_KEY:
> -               case BTRFS_SHARED_BLOCK_REF_KEY:
> -                       kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
> -                       break;
> -               case BTRFS_EXTENT_DATA_REF_KEY:
> -               case BTRFS_SHARED_DATA_REF_KEY:
> -                       kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
> -                       break;
> -               default:
> -                       BUG();
> -               }
> +               kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
>         }
>  }
>
> @@ -1310,8 +1298,7 @@ btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 byt
>  void __cold btrfs_delayed_ref_exit(void)
>  {
>         kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
> -       kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
> -       kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
> +       kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
>         kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
>  }
>
> @@ -1321,12 +1308,8 @@ int __init btrfs_delayed_ref_init(void)
>         if (!btrfs_delayed_ref_head_cachep)
>                 goto fail;
>
> -       btrfs_delayed_tree_ref_cachep = KMEM_CACHE(btrfs_delayed_tree_ref, 0);
> -       if (!btrfs_delayed_tree_ref_cachep)
> -               goto fail;
> -
> -       btrfs_delayed_data_ref_cachep = KMEM_CACHE(btrfs_delayed_data_ref, 0);
> -       if (!btrfs_delayed_data_ref_cachep)
> +       btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
> +       if (!btrfs_delayed_ref_node_cachep)
>                 goto fail;
>
>         btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
> diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
> index b3a78bf7b072..2de447d9aaba 100644
> --- a/fs/btrfs/delayed-ref.h
> +++ b/fs/btrfs/delayed-ref.h
> @@ -30,6 +30,19 @@ enum btrfs_delayed_ref_action {
>         BTRFS_UPDATE_DELAYED_HEAD,
>  } __packed;
>
> +struct btrfs_delayed_tree_ref {
> +       u64 root;
> +       u64 parent;
> +       int level;
> +};
> +
> +struct btrfs_delayed_data_ref {
> +       u64 root;
> +       u64 parent;
> +       u64 objectid;
> +       u64 offset;
> +};
> +
>  struct btrfs_delayed_ref_node {
>         struct rb_node ref_node;
>         /*
> @@ -64,6 +77,11 @@ struct btrfs_delayed_ref_node {
>
>         unsigned int action:8;
>         unsigned int type:8;
> +
> +       union {
> +               struct btrfs_delayed_tree_ref tree_ref;
> +               struct btrfs_delayed_data_ref data_ref;
> +       };
>  };
>
>  struct btrfs_delayed_extent_op {
> @@ -151,21 +169,6 @@ struct btrfs_delayed_ref_head {
>         bool processing;
>  };
>
> -struct btrfs_delayed_tree_ref {
> -       struct btrfs_delayed_ref_node node;
> -       u64 root;
> -       u64 parent;
> -       int level;
> -};
> -
> -struct btrfs_delayed_data_ref {
> -       struct btrfs_delayed_ref_node node;
> -       u64 root;
> -       u64 parent;
> -       u64 objectid;
> -       u64 offset;
> -};
> -
>  enum btrfs_delayed_ref_flags {
>         /* Indicate that we are flushing delayed refs for the commit */
>         BTRFS_DELAYED_REFS_FLUSHING,
> @@ -279,8 +282,7 @@ struct btrfs_ref {
>  };
>
>  extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
> -extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
> -extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
> +extern struct kmem_cache *btrfs_delayed_ref_node_cachep;
>  extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
>
>  int __init btrfs_delayed_ref_init(void);
> @@ -404,25 +406,25 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
>  static inline struct btrfs_delayed_tree_ref *
>  btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
>  {
> -       return container_of(node, struct btrfs_delayed_tree_ref, node);
> +       return &node->tree_ref;
>  }
>
>  static inline struct btrfs_delayed_data_ref *
>  btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
>  {
> -       return container_of(node, struct btrfs_delayed_data_ref, node);
> +       return &node->data_ref;
>  }
>
>  static inline struct btrfs_delayed_ref_node *
>  btrfs_delayed_tree_ref_to_node(struct btrfs_delayed_tree_ref *ref)
>  {
> -       return &ref->node;
> +       return container_of(ref, struct btrfs_delayed_ref_node, tree_ref);
>  }
>
>  static inline struct btrfs_delayed_ref_node *
>  btrfs_delayed_data_ref_to_node(struct btrfs_delayed_data_ref *ref)
>  {
> -       return &ref->node;
> +       return container_of(ref, struct btrfs_delayed_ref_node, data_ref);
>  }
>
>  #endif
> --
> 2.43.0
>
>
diff mbox series

Patch

diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index d920663a18fd..9382f7c81c25 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -16,8 +16,7 @@ 
 #include "fs.h"
 
 struct kmem_cache *btrfs_delayed_ref_head_cachep;
-struct kmem_cache *btrfs_delayed_tree_ref_cachep;
-struct kmem_cache *btrfs_delayed_data_ref_cachep;
+struct kmem_cache *btrfs_delayed_ref_node_cachep;
 struct kmem_cache *btrfs_delayed_extent_op_cachep;
 /*
  * delayed back reference update tracking.  For subvolume trees
@@ -1082,26 +1081,26 @@  int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 	is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID);
 
 	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
-	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
-	if (!ref)
+	node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
+	if (!node)
 		return -ENOMEM;
 
 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 	if (!head_ref) {
-		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
 		return -ENOMEM;
 	}
 
 	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
 		record = kzalloc(sizeof(*record), GFP_NOFS);
 		if (!record) {
-			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+			kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
 			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 			return -ENOMEM;
 		}
 	}
 
-	node = btrfs_delayed_tree_ref_to_node(ref);
+	ref = btrfs_delayed_node_to_tree_ref(node);
 
 	if (parent)
 		ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
@@ -1143,7 +1142,7 @@  int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 				   action == BTRFS_ADD_DELAYED_EXTENT ?
 				   BTRFS_ADD_DELAYED_REF : action);
 	if (merged)
-		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
 
 	if (qrecord_inserted)
 		btrfs_qgroup_trace_extent_post(trans, record);
@@ -1176,11 +1175,11 @@  int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 	u8 ref_type;
 
 	ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
-	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
-	if (!ref)
+	node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
+	if (!node)
 		return -ENOMEM;
 
-	node = btrfs_delayed_data_ref_to_node(ref);
+	ref = btrfs_delayed_node_to_data_ref(node);
 
 	if (parent)
 	        ref_type = BTRFS_SHARED_DATA_REF_KEY;
@@ -1196,14 +1195,14 @@  int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 
 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 	if (!head_ref) {
-		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
 		return -ENOMEM;
 	}
 
 	if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
 		record = kzalloc(sizeof(*record), GFP_NOFS);
 		if (!record) {
-			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+			kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
 			kmem_cache_free(btrfs_delayed_ref_head_cachep,
 					head_ref);
 			return -ENOMEM;
@@ -1237,7 +1236,7 @@  int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 				   action == BTRFS_ADD_DELAYED_EXTENT ?
 				   BTRFS_ADD_DELAYED_REF : action);
 	if (merged)
-		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+		kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
 
 
 	if (qrecord_inserted)
@@ -1280,18 +1279,7 @@  void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
 {
 	if (refcount_dec_and_test(&ref->refs)) {
 		WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
-		switch (ref->type) {
-		case BTRFS_TREE_BLOCK_REF_KEY:
-		case BTRFS_SHARED_BLOCK_REF_KEY:
-			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
-			break;
-		case BTRFS_EXTENT_DATA_REF_KEY:
-		case BTRFS_SHARED_DATA_REF_KEY:
-			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
-			break;
-		default:
-			BUG();
-		}
+		kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
 	}
 }
 
@@ -1310,8 +1298,7 @@  btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 byt
 void __cold btrfs_delayed_ref_exit(void)
 {
 	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
-	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
-	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
+	kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
 	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
 }
 
@@ -1321,12 +1308,8 @@  int __init btrfs_delayed_ref_init(void)
 	if (!btrfs_delayed_ref_head_cachep)
 		goto fail;
 
-	btrfs_delayed_tree_ref_cachep = KMEM_CACHE(btrfs_delayed_tree_ref, 0);
-	if (!btrfs_delayed_tree_ref_cachep)
-		goto fail;
-
-	btrfs_delayed_data_ref_cachep = KMEM_CACHE(btrfs_delayed_data_ref, 0);
-	if (!btrfs_delayed_data_ref_cachep)
+	btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
+	if (!btrfs_delayed_ref_node_cachep)
 		goto fail;
 
 	btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index b3a78bf7b072..2de447d9aaba 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -30,6 +30,19 @@  enum btrfs_delayed_ref_action {
 	BTRFS_UPDATE_DELAYED_HEAD,
 } __packed;
 
+struct btrfs_delayed_tree_ref {
+	u64 root;
+	u64 parent;
+	int level;
+};
+
+struct btrfs_delayed_data_ref {
+	u64 root;
+	u64 parent;
+	u64 objectid;
+	u64 offset;
+};
+
 struct btrfs_delayed_ref_node {
 	struct rb_node ref_node;
 	/*
@@ -64,6 +77,11 @@  struct btrfs_delayed_ref_node {
 
 	unsigned int action:8;
 	unsigned int type:8;
+
+	union {
+		struct btrfs_delayed_tree_ref tree_ref;
+		struct btrfs_delayed_data_ref data_ref;
+	};
 };
 
 struct btrfs_delayed_extent_op {
@@ -151,21 +169,6 @@  struct btrfs_delayed_ref_head {
 	bool processing;
 };
 
-struct btrfs_delayed_tree_ref {
-	struct btrfs_delayed_ref_node node;
-	u64 root;
-	u64 parent;
-	int level;
-};
-
-struct btrfs_delayed_data_ref {
-	struct btrfs_delayed_ref_node node;
-	u64 root;
-	u64 parent;
-	u64 objectid;
-	u64 offset;
-};
-
 enum btrfs_delayed_ref_flags {
 	/* Indicate that we are flushing delayed refs for the commit */
 	BTRFS_DELAYED_REFS_FLUSHING,
@@ -279,8 +282,7 @@  struct btrfs_ref {
 };
 
 extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
-extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
-extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_ref_node_cachep;
 extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
 
 int __init btrfs_delayed_ref_init(void);
@@ -404,25 +406,25 @@  bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
 static inline struct btrfs_delayed_tree_ref *
 btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
 {
-	return container_of(node, struct btrfs_delayed_tree_ref, node);
+	return &node->tree_ref;
 }
 
 static inline struct btrfs_delayed_data_ref *
 btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
 {
-	return container_of(node, struct btrfs_delayed_data_ref, node);
+	return &node->data_ref;
 }
 
 static inline struct btrfs_delayed_ref_node *
 btrfs_delayed_tree_ref_to_node(struct btrfs_delayed_tree_ref *ref)
 {
-	return &ref->node;
+	return container_of(ref, struct btrfs_delayed_ref_node, tree_ref);
 }
 
 static inline struct btrfs_delayed_ref_node *
 btrfs_delayed_data_ref_to_node(struct btrfs_delayed_data_ref *ref)
 {
-	return &ref->node;
+	return container_of(ref, struct btrfs_delayed_ref_node, data_ref);
 }
 
 #endif