diff mbox series

[v2,07/10] btrfs: relocation: Specify essential members for alloc_backref_node()

Message ID 20200302094553.58827-8-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: relocation: Refactor build_backref_tree() | expand

Commit Message

Qu Wenruo March 2, 2020, 9:45 a.m. UTC
Bytenr and level are essential parameters for backref_node, thus it
makes sense to initial them at alloc time.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/relocation.c | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

Comments

Nikolay Borisov March 4, 2020, 1:06 p.m. UTC | #1
On 2.03.20 г. 11:45 ч., Qu Wenruo wrote:
> Bytenr and level are essential parameters for backref_node, thus it
> makes sense to initial them at alloc time.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>

Reviewed-by: Nikolay Borisov <nborisov@suse.com>

> ---
>  fs/btrfs/relocation.c | 25 ++++++++++++-------------
>  1 file changed, 12 insertions(+), 13 deletions(-)
> 
> diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
> index c76849409c81..67a4a61eb86a 100644
> --- a/fs/btrfs/relocation.c
> +++ b/fs/btrfs/relocation.c
> @@ -308,10 +308,12 @@ static void backref_cache_cleanup(struct backref_cache *cache)
>  	ASSERT(!cache->nr_edges);
>  }
>  
> -static struct backref_node *alloc_backref_node(struct backref_cache *cache)
> +static struct backref_node *alloc_backref_node(struct backref_cache *cache,
> +						u64 bytenr, int level)
>  {
>  	struct backref_node *node;
>  
> +	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
>  	node = kzalloc(sizeof(*node), GFP_NOFS);
>  	if (node) {
>  		INIT_LIST_HEAD(&node->list);
> @@ -319,6 +321,9 @@ static struct backref_node *alloc_backref_node(struct backref_cache *cache)
>  		INIT_LIST_HEAD(&node->lower);
>  		RB_CLEAR_NODE(&node->rb_node);
>  		cache->nr_nodes++;
> +
> +		node->level = level;
> +		node->bytenr = bytenr;
>  	}

nit: One suggestion for this function, feel free to ignore:

What if you do it:
if (!node)
	return NULL

and then have the initialization happen with just 1 level of nesting
inside the function and not 2. That function is short and easy to
understand but it might be a good time to refactor like that. If you
think it's redundant then don't bother.

>  	return node;
>  }

<snip>
Qu Wenruo March 4, 2020, 1:09 p.m. UTC | #2
On 2020/3/4 下午9:06, Nikolay Borisov wrote:
>
>
> On 2.03.20 г. 11:45 ч., Qu Wenruo wrote:
>> Bytenr and level are essential parameters for backref_node, thus it
>> makes sense to initial them at alloc time.
>>
>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>
> Reviewed-by: Nikolay Borisov <nborisov@suse.com>
>
>> ---
>>  fs/btrfs/relocation.c | 25 ++++++++++++-------------
>>  1 file changed, 12 insertions(+), 13 deletions(-)
>>
>> diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
>> index c76849409c81..67a4a61eb86a 100644
>> --- a/fs/btrfs/relocation.c
>> +++ b/fs/btrfs/relocation.c
>> @@ -308,10 +308,12 @@ static void backref_cache_cleanup(struct backref_cache *cache)
>>  	ASSERT(!cache->nr_edges);
>>  }
>>
>> -static struct backref_node *alloc_backref_node(struct backref_cache *cache)
>> +static struct backref_node *alloc_backref_node(struct backref_cache *cache,
>> +						u64 bytenr, int level)
>>  {
>>  	struct backref_node *node;
>>
>> +	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
>>  	node = kzalloc(sizeof(*node), GFP_NOFS);
>>  	if (node) {
>>  		INIT_LIST_HEAD(&node->list);
>> @@ -319,6 +321,9 @@ static struct backref_node *alloc_backref_node(struct backref_cache *cache)
>>  		INIT_LIST_HEAD(&node->lower);
>>  		RB_CLEAR_NODE(&node->rb_node);
>>  		cache->nr_nodes++;
>> +
>> +		node->level = level;
>> +		node->bytenr = bytenr;
>>  	}
>
> nit: One suggestion for this function, feel free to ignore:
>
> What if you do it:
> if (!node)
> 	return NULL

I didn't notice we can do that.

Looks like the style I prefer too.

I'll use that style in next version.

Thanks,
Qu
>
> and then have the initialization happen with just 1 level of nesting
> inside the function and not 2. That function is short and easy to
> understand but it might be a good time to refactor like that. If you
> think it's redundant then don't bother.
>
>>  	return node;
>>  }
>
> <snip>
>
diff mbox series

Patch

diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c76849409c81..67a4a61eb86a 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -308,10 +308,12 @@  static void backref_cache_cleanup(struct backref_cache *cache)
 	ASSERT(!cache->nr_edges);
 }
 
-static struct backref_node *alloc_backref_node(struct backref_cache *cache)
+static struct backref_node *alloc_backref_node(struct backref_cache *cache,
+						u64 bytenr, int level)
 {
 	struct backref_node *node;
 
+	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
 	node = kzalloc(sizeof(*node), GFP_NOFS);
 	if (node) {
 		INIT_LIST_HEAD(&node->list);
@@ -319,6 +321,9 @@  static struct backref_node *alloc_backref_node(struct backref_cache *cache)
 		INIT_LIST_HEAD(&node->lower);
 		RB_CLEAR_NODE(&node->rb_node);
 		cache->nr_nodes++;
+
+		node->level = level;
+		node->bytenr = bytenr;
 	}
 	return node;
 }
@@ -736,13 +741,12 @@  static int handle_one_tree_backref(struct reloc_control *rc,
 		rb_node = tree_search(&cache->rb_root, ref_key->offset);
 		if (!rb_node) {
 			/* Parent node not yet cached */
-			upper = alloc_backref_node(cache);
+			upper = alloc_backref_node(cache, ref_key->offset,
+						   cur->level + 1);
 			if (!upper) {
 				free_backref_edge(cache, edge);
 				return -ENOMEM;
 			}
-			upper->bytenr = ref_key->offset;
-			upper->level = cur->level + 1;
 
 			/*
 			 *  backrefs for the upper level block isn't
@@ -833,16 +837,15 @@  static int handle_one_tree_backref(struct reloc_control *rc,
 		eb = path->nodes[level];
 		rb_node = tree_search(&cache->rb_root, eb->start);
 		if (!rb_node) {
-			upper = alloc_backref_node(cache);
+			upper = alloc_backref_node(cache, eb->start,
+						   lower->level + 1);
 			if (!upper) {
 				btrfs_put_root(root);
 				free_backref_edge(cache, edge);
 				ret = -ENOMEM;
 				goto out;
 			}
-			upper->bytenr = eb->start;
 			upper->owner = btrfs_header_owner(eb);
-			upper->level = lower->level + 1;
 			if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
 				upper->cowonly = 1;
 
@@ -934,14 +937,12 @@  struct backref_node *build_backref_tree(struct reloc_control *rc,
 	}
 	path->reada = READA_FORWARD;
 
-	node = alloc_backref_node(cache);
+	node = alloc_backref_node(cache, bytenr, level);
 	if (!node) {
 		err = -ENOMEM;
 		goto out;
 	}
 
-	node->bytenr = bytenr;
-	node->level = level;
 	node->lowest = 1;
 	cur = node;
 again:
@@ -1252,12 +1253,10 @@  static int clone_backref_node(struct btrfs_trans_handle *trans,
 	if (!node)
 		return 0;
 
-	new_node = alloc_backref_node(cache);
+	new_node = alloc_backref_node(cache, dest->node->start, node->level);
 	if (!new_node)
 		return -ENOMEM;
 
-	new_node->bytenr = dest->node->start;
-	new_node->level = node->level;
 	new_node->lowest = node->lowest;
 	new_node->checked = 1;
 	new_node->root = btrfs_grab_root(dest);