diff mbox series

[v2,2/3] btrfs: backref: Implement btrfs_backref_iterator_next()

Message ID 20200214081354.56605-3-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series Btrfs: relocation: Refactor build_backref_tree() using btrfs_backref_iterator infrastructure | expand

Commit Message

Qu Wenruo Feb. 14, 2020, 8:13 a.m. UTC
This function will go next inline/keyed backref for
btrfs_backref_iterator infrastructure.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/backref.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

Comments

Nikolay Borisov Feb. 14, 2020, 9:25 a.m. UTC | #1
On 14.02.20 г. 10:13 ч., Qu Wenruo wrote:
> This function will go next inline/keyed backref for
> btrfs_backref_iterator infrastructure.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
>  fs/btrfs/backref.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 48 insertions(+)
> 
> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
> index 73c4829609c9..c5f87439f31c 100644
> --- a/fs/btrfs/backref.c
> +++ b/fs/btrfs/backref.c
> @@ -2310,3 +2310,51 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>  	btrfs_release_path(path);
>  	return ret;
>  }
> +
> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
> +{

make it a bool function.

> +	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
> +	struct btrfs_path *path = iterator->path;
> +	struct btrfs_extent_inline_ref *iref;
> +	int ret;
> +	u32 size;
> +
> +	if (iterator->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
> +	    iterator->cur_key.type == BTRFS_METADATA_ITEM_KEY) {
> +		/* We're still inside the inline refs */
> +		if (btrfs_backref_has_tree_block_info(iterator)) {
> +			/* First tree block info */
> +			size = sizeof(struct btrfs_tree_block_info);
> +		} else {
> +			/* Use inline ref type to determine the size */
> +			int type;
> +
> +			iref = (struct btrfs_extent_inline_ref *)
> +				(iterator->cur_ptr);
> +			type = btrfs_extent_inline_ref_type(eb, iref);
> +
> +			size = btrfs_extent_inline_ref_size(type);
> +		}
> +		iterator->cur_ptr += size;
> +		if (iterator->cur_ptr < iterator->end_ptr)
> +			return 0;
> +
> +		/* All inline items iterated, fall through */
> +	}
> +	/* We're at keyed items, there is no inline item, just go next item */
> +	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
> +	if (ret > 0 || ret < 0)
> +		return ret;
> +
> +	btrfs_item_key_to_cpu(path->nodes[0], &iterator->cur_key,
> +			      path->slots[0]);
> +	if (iterator->cur_key.objectid != iterator->bytenr ||
> +	    (iterator->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
> +	     iterator->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
> +		return 1;
> +	iterator->item_ptr = btrfs_item_ptr_offset(path->nodes[0],
> +						   path->slots[0]);
> +	iterator->cur_ptr = iterator->item_ptr;
> +	iterator->end_ptr = btrfs_item_end_nr(path->nodes[0], path->slots[0]);
> +	return 0;
> +}
>
Nikolay Borisov Feb. 14, 2020, 9:28 a.m. UTC | #2
On 14.02.20 г. 11:25 ч., Nikolay Borisov wrote:
> 
> 
> On 14.02.20 г. 10:13 ч., Qu Wenruo wrote:
>> This function will go next inline/keyed backref for
>> btrfs_backref_iterator infrastructure.
>>
>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>> ---
>>  fs/btrfs/backref.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 48 insertions(+)
>>
>> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
>> index 73c4829609c9..c5f87439f31c 100644
>> --- a/fs/btrfs/backref.c
>> +++ b/fs/btrfs/backref.c
>> @@ -2310,3 +2310,51 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>>  	btrfs_release_path(path);
>>  	return ret;
>>  }
>> +
>> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
>> +{
> 
> make it a bool function.

Disregard this .
Qu Wenruo Feb. 14, 2020, 9:35 a.m. UTC | #3
On 2020/2/14 下午5:25, Nikolay Borisov wrote:
>
>
> On 14.02.20 г. 10:13 ч., Qu Wenruo wrote:
>> This function will go next inline/keyed backref for
>> btrfs_backref_iterator infrastructure.
>>
>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>> ---
>>  fs/btrfs/backref.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 48 insertions(+)
>>
>> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
>> index 73c4829609c9..c5f87439f31c 100644
>> --- a/fs/btrfs/backref.c
>> +++ b/fs/btrfs/backref.c
>> @@ -2310,3 +2310,51 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>>  	btrfs_release_path(path);
>>  	return ret;
>>  }
>> +
>> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
>> +{
>
> make it a bool function.

Even when we could hit error in this case?

In the next patch, you would see we handle end of backrefs and error
different.
Thus I don't think it's a good idea to make it bool.

Thanks,
Qu
>
>> +	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
>> +	struct btrfs_path *path = iterator->path;
>> +	struct btrfs_extent_inline_ref *iref;
>> +	int ret;
>> +	u32 size;
>> +
>> +	if (iterator->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
>> +	    iterator->cur_key.type == BTRFS_METADATA_ITEM_KEY) {
>> +		/* We're still inside the inline refs */
>> +		if (btrfs_backref_has_tree_block_info(iterator)) {
>> +			/* First tree block info */
>> +			size = sizeof(struct btrfs_tree_block_info);
>> +		} else {
>> +			/* Use inline ref type to determine the size */
>> +			int type;
>> +
>> +			iref = (struct btrfs_extent_inline_ref *)
>> +				(iterator->cur_ptr);
>> +			type = btrfs_extent_inline_ref_type(eb, iref);
>> +
>> +			size = btrfs_extent_inline_ref_size(type);
>> +		}
>> +		iterator->cur_ptr += size;
>> +		if (iterator->cur_ptr < iterator->end_ptr)
>> +			return 0;
>> +
>> +		/* All inline items iterated, fall through */
>> +	}
>> +	/* We're at keyed items, there is no inline item, just go next item */
>> +	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
>> +	if (ret > 0 || ret < 0)
>> +		return ret;
>> +
>> +	btrfs_item_key_to_cpu(path->nodes[0], &iterator->cur_key,
>> +			      path->slots[0]);
>> +	if (iterator->cur_key.objectid != iterator->bytenr ||
>> +	    (iterator->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
>> +	     iterator->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
>> +		return 1;
>> +	iterator->item_ptr = btrfs_item_ptr_offset(path->nodes[0],
>> +						   path->slots[0]);
>> +	iterator->cur_ptr = iterator->item_ptr;
>> +	iterator->end_ptr = btrfs_item_end_nr(path->nodes[0], path->slots[0]);
>> +	return 0;
>> +}
>>
diff mbox series

Patch

diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 73c4829609c9..c5f87439f31c 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2310,3 +2310,51 @@  int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
 	btrfs_release_path(path);
 	return ret;
 }
+
+int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
+{
+	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
+	struct btrfs_path *path = iterator->path;
+	struct btrfs_extent_inline_ref *iref;
+	int ret;
+	u32 size;
+
+	if (iterator->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
+	    iterator->cur_key.type == BTRFS_METADATA_ITEM_KEY) {
+		/* We're still inside the inline refs */
+		if (btrfs_backref_has_tree_block_info(iterator)) {
+			/* First tree block info */
+			size = sizeof(struct btrfs_tree_block_info);
+		} else {
+			/* Use inline ref type to determine the size */
+			int type;
+
+			iref = (struct btrfs_extent_inline_ref *)
+				(iterator->cur_ptr);
+			type = btrfs_extent_inline_ref_type(eb, iref);
+
+			size = btrfs_extent_inline_ref_size(type);
+		}
+		iterator->cur_ptr += size;
+		if (iterator->cur_ptr < iterator->end_ptr)
+			return 0;
+
+		/* All inline items iterated, fall through */
+	}
+	/* We're at keyed items, there is no inline item, just go next item */
+	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
+	if (ret > 0 || ret < 0)
+		return ret;
+
+	btrfs_item_key_to_cpu(path->nodes[0], &iterator->cur_key,
+			      path->slots[0]);
+	if (iterator->cur_key.objectid != iterator->bytenr ||
+	    (iterator->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
+	     iterator->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
+		return 1;
+	iterator->item_ptr = btrfs_item_ptr_offset(path->nodes[0],
+						   path->slots[0]);
+	iterator->cur_ptr = iterator->item_ptr;
+	iterator->end_ptr = btrfs_item_end_nr(path->nodes[0], path->slots[0]);
+	return 0;
+}