diff mbox series

[v3,2/3] btrfs: backref: Implement btrfs_backref_iterator_next()

Message ID 20200217063111.65941-3-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series Btrfs: relocation: Refactor build_backref_tree() using btrfs_backref_iterator infrastructure | expand

Commit Message

Qu Wenruo Feb. 17, 2020, 6:31 a.m. UTC
This function will go next inline/keyed backref for
btrfs_backref_iterator infrastructure.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/backref.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/backref.h | 34 +++++++++++++++++++++++++++++++++
 2 files changed, 81 insertions(+)

Comments

Nikolay Borisov Feb. 17, 2020, 10:47 a.m. UTC | #1
On 17.02.20 г. 8:31 ч., Qu Wenruo wrote:
> This function will go next inline/keyed backref for
> btrfs_backref_iterator infrastructure.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
>  fs/btrfs/backref.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++
>  fs/btrfs/backref.h | 34 +++++++++++++++++++++++++++++++++
>  2 files changed, 81 insertions(+)
> 
> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
> index 8bd5e067831c..fb0abe344851 100644
> --- a/fs/btrfs/backref.c
> +++ b/fs/btrfs/backref.c
> @@ -2310,3 +2310,50 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>  	btrfs_backref_iterator_release(iterator);
>  	return ret;
>  }
> +
> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)

Document the return values: 0 in case there are more backerfs for the
given bytenr or 1 in case there are'nt. And a negative value in case of
error.

> +{
> +	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
> +	struct btrfs_path *path = iterator->path;
> +	struct btrfs_extent_inline_ref *iref;
> +	int ret;
> +	u32 size;
> +
> +	if (btrfs_backref_iterator_is_inline_ref(iterator)) {
> +		/* We're still inside the inline refs */
> +		if (btrfs_backref_has_tree_block_info(iterator)) {
> +			/* First tree block info */
> +			size = sizeof(struct btrfs_tree_block_info);
> +		} else {
> +			/* Use inline ref type to determine the size */
> +			int type;
> +
> +			iref = (struct btrfs_extent_inline_ref *)
> +				(iterator->cur_ptr);
> +			type = btrfs_extent_inline_ref_type(eb, iref);
> +
> +			size = btrfs_extent_inline_ref_size(type);
> +		}
> +		iterator->cur_ptr += size;
> +		if (iterator->cur_ptr < iterator->end_ptr)
> +			return 0;
> +
> +		/* All inline items iterated, fall through */
> +	}

This if could be rewritten as:
if (btrfs_backref_iterator_is_inline_ref(iterator) && iterator->cur_ptr
< iterator->end_ptr)

what this achieves is:

1. Clarity that this whole branch is executed only if we are within the
inline refs limits
2. It also optimises that function since in the current version, after
the last inline backref has been processed iterator->cur_ptr ==
iterator->end_ptr. On the next call to btrfs_backref_iterator_next you
will execute (needlessly)

(struct btrfs_extent_inline_ref *) (iterator->cur_ptr);
type = btrfs_extent_inline_ref_type(eb, iref);
size = btrfs_extent_inline_ref_size(type);
iterator->cur_ptr += size;
only to fail "if (iterator->cur_ptr < iterator->end_ptr)" check and
continue processing keyed items.

As a matter of fact you will be reading past the metadata_item  since
cur_ptr will be at the end of them and any deferences will read from the
next item this might not cause a crash but it's still wrong.


> +	/* We're at keyed items, there is no inline item, just go next item */
> +	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
> +	if (ret > 0 || ret < 0)
> +		return ret;

nit: if (ret != 0) return ret;

<snip>
Qu Wenruo Feb. 17, 2020, 11:29 a.m. UTC | #2
On 2020/2/17 下午6:47, Nikolay Borisov wrote:
>
>
> On 17.02.20 г. 8:31 ч., Qu Wenruo wrote:
>> This function will go next inline/keyed backref for
>> btrfs_backref_iterator infrastructure.
>>
>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>> ---
>>  fs/btrfs/backref.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++
>>  fs/btrfs/backref.h | 34 +++++++++++++++++++++++++++++++++
>>  2 files changed, 81 insertions(+)
>>
>> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
>> index 8bd5e067831c..fb0abe344851 100644
>> --- a/fs/btrfs/backref.c
>> +++ b/fs/btrfs/backref.c
>> @@ -2310,3 +2310,50 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>>  	btrfs_backref_iterator_release(iterator);
>>  	return ret;
>>  }
>> +
>> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
>
> Document the return values: 0 in case there are more backerfs for the
> given bytenr or 1 in case there are'nt. And a negative value in case of
> error.
>
>> +{
>> +	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
>> +	struct btrfs_path *path = iterator->path;
>> +	struct btrfs_extent_inline_ref *iref;
>> +	int ret;
>> +	u32 size;
>> +
>> +	if (btrfs_backref_iterator_is_inline_ref(iterator)) {
>> +		/* We're still inside the inline refs */
>> +		if (btrfs_backref_has_tree_block_info(iterator)) {
>> +			/* First tree block info */
>> +			size = sizeof(struct btrfs_tree_block_info);
>> +		} else {
>> +			/* Use inline ref type to determine the size */
>> +			int type;
>> +
>> +			iref = (struct btrfs_extent_inline_ref *)
>> +				(iterator->cur_ptr);
>> +			type = btrfs_extent_inline_ref_type(eb, iref);
>> +
>> +			size = btrfs_extent_inline_ref_size(type);
>> +		}
>> +		iterator->cur_ptr += size;
>> +		if (iterator->cur_ptr < iterator->end_ptr)
>> +			return 0;
>> +
>> +		/* All inline items iterated, fall through */
>> +	}
>
> This if could be rewritten as:
> if (btrfs_backref_iterator_is_inline_ref(iterator) && iterator->cur_ptr
> < iterator->end_ptr)
>
> what this achieves is:
>
> 1. Clarity that this whole branch is executed only if we are within the
> inline refs limits
> 2. It also optimises that function since in the current version, after
> the last inline backref has been processed iterator->cur_ptr ==
> iterator->end_ptr. On the next call to btrfs_backref_iterator_next you
> will execute (needlessly)
>
> (struct btrfs_extent_inline_ref *) (iterator->cur_ptr);
> type = btrfs_extent_inline_ref_type(eb, iref);
> size = btrfs_extent_inline_ref_size(type);
> iterator->cur_ptr += size;
> only to fail "if (iterator->cur_ptr < iterator->end_ptr)" check and
> continue processing keyed items.
>
> As a matter of fact you will be reading past the metadata_item  since
> cur_ptr will be at the end of them and any deferences will read from the
> next item this might not cause a crash but it's still wrong.

This shouldn't happen, as we must ensure the cur_ptr < item_end for callers.

For the _next() call, the check after increased cur_ptr check it's OK.

But it's a problem for _start() call, as we may have a case where an
EXTENT_ITEM/METADATA_ITEM has no inlined ref.

I'll fix this in next version.

Thanks,
Qu

>
>
>> +	/* We're at keyed items, there is no inline item, just go next item */
>> +	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
>> +	if (ret > 0 || ret < 0)
>> +		return ret;
>
> nit: if (ret != 0) return ret;
>
> <snip>
>
Nikolay Borisov Feb. 17, 2020, 11:42 a.m. UTC | #3
On 17.02.20 г. 13:29 ч., Qu Wenruo wrote:
> 
> 
> On 2020/2/17 下午6:47, Nikolay Borisov wrote:
>>
>>
>> On 17.02.20 г. 8:31 ч., Qu Wenruo wrote:
>>> This function will go next inline/keyed backref for
>>> btrfs_backref_iterator infrastructure.
>>>
>>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>>> ---
>>>  fs/btrfs/backref.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++
>>>  fs/btrfs/backref.h | 34 +++++++++++++++++++++++++++++++++
>>>  2 files changed, 81 insertions(+)
>>>
>>> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
>>> index 8bd5e067831c..fb0abe344851 100644
>>> --- a/fs/btrfs/backref.c
>>> +++ b/fs/btrfs/backref.c
>>> @@ -2310,3 +2310,50 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>>>  	btrfs_backref_iterator_release(iterator);
>>>  	return ret;
>>>  }
>>> +
>>> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
>>
>> Document the return values: 0 in case there are more backerfs for the
>> given bytenr or 1 in case there are'nt. And a negative value in case of
>> error.
>>
>>> +{
>>> +	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
>>> +	struct btrfs_path *path = iterator->path;
>>> +	struct btrfs_extent_inline_ref *iref;
>>> +	int ret;
>>> +	u32 size;
>>> +
>>> +	if (btrfs_backref_iterator_is_inline_ref(iterator)) {
>>> +		/* We're still inside the inline refs */
>>> +		if (btrfs_backref_has_tree_block_info(iterator)) {
>>> +			/* First tree block info */
>>> +			size = sizeof(struct btrfs_tree_block_info);
>>> +		} else {
>>> +			/* Use inline ref type to determine the size */
>>> +			int type;
>>> +
>>> +			iref = (struct btrfs_extent_inline_ref *)
>>> +				(iterator->cur_ptr);
>>> +			type = btrfs_extent_inline_ref_type(eb, iref);
>>> +
>>> +			size = btrfs_extent_inline_ref_size(type);
>>> +		}
>>> +		iterator->cur_ptr += size;
>>> +		if (iterator->cur_ptr < iterator->end_ptr)
>>> +			return 0;
>>> +
>>> +		/* All inline items iterated, fall through */
>>> +	}
>>
>> This if could be rewritten as:
>> if (btrfs_backref_iterator_is_inline_ref(iterator) && iterator->cur_ptr
>> < iterator->end_ptr)
>>
>> what this achieves is:
>>
>> 1. Clarity that this whole branch is executed only if we are within the
>> inline refs limits
>> 2. It also optimises that function since in the current version, after
>> the last inline backref has been processed iterator->cur_ptr ==
>> iterator->end_ptr. On the next call to btrfs_backref_iterator_next you
>> will execute (needlessly)
>>
>> (struct btrfs_extent_inline_ref *) (iterator->cur_ptr);
>> type = btrfs_extent_inline_ref_type(eb, iref);
>> size = btrfs_extent_inline_ref_size(type);
>> iterator->cur_ptr += size;
>> only to fail "if (iterator->cur_ptr < iterator->end_ptr)" check and
>> continue processing keyed items.
>>
>> As a matter of fact you will be reading past the metadata_item  since
>> cur_ptr will be at the end of them and any deferences will read from the
>> next item this might not cause a crash but it's still wrong.
> 
> This shouldn't happen, as we must ensure the cur_ptr < item_end for callers.


How are you ensuring this? Before processing the last inline ref
cur_ptr  would be end_ptr - btrfs_extent_inline_ref_size(type);

After it's processed cur_ptr == end_ptr. THen you will do another call
to btrfs_backref_iterator_next which will do the same calculation? What
am I missing?

> 
> For the _next() call, the check after increased cur_ptr check it's OK.
> 
> But it's a problem for _start() call, as we may have a case where an
> EXTENT_ITEM/METADATA_ITEM has no inlined ref.
> 
> I'll fix this in next version.
> 
> Thanks,
> Qu
> 
>>
>>
>>> +	/* We're at keyed items, there is no inline item, just go next item */
>>> +	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
>>> +	if (ret > 0 || ret < 0)
>>> +		return ret;
>>
>> nit: if (ret != 0) return ret;
>>
>> <snip>
>>
Qu Wenruo Feb. 17, 2020, 11:45 a.m. UTC | #4
On 2020/2/17 下午7:42, Nikolay Borisov wrote:
>
>
> On 17.02.20 г. 13:29 ч., Qu Wenruo wrote:
>>
>>
>> On 2020/2/17 下午6:47, Nikolay Borisov wrote:
>>>
>>>
>>> On 17.02.20 г. 8:31 ч., Qu Wenruo wrote:
>>>> This function will go next inline/keyed backref for
>>>> btrfs_backref_iterator infrastructure.
>>>>
>>>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>>>> ---
>>>>  fs/btrfs/backref.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++
>>>>  fs/btrfs/backref.h | 34 +++++++++++++++++++++++++++++++++
>>>>  2 files changed, 81 insertions(+)
>>>>
>>>> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
>>>> index 8bd5e067831c..fb0abe344851 100644
>>>> --- a/fs/btrfs/backref.c
>>>> +++ b/fs/btrfs/backref.c
>>>> @@ -2310,3 +2310,50 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>>>>  	btrfs_backref_iterator_release(iterator);
>>>>  	return ret;
>>>>  }
>>>> +
>>>> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
>>>
>>> Document the return values: 0 in case there are more backerfs for the
>>> given bytenr or 1 in case there are'nt. And a negative value in case of
>>> error.
>>>
>>>> +{
>>>> +	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
>>>> +	struct btrfs_path *path = iterator->path;
>>>> +	struct btrfs_extent_inline_ref *iref;
>>>> +	int ret;
>>>> +	u32 size;
>>>> +
>>>> +	if (btrfs_backref_iterator_is_inline_ref(iterator)) {
>>>> +		/* We're still inside the inline refs */
>>>> +		if (btrfs_backref_has_tree_block_info(iterator)) {
>>>> +			/* First tree block info */
>>>> +			size = sizeof(struct btrfs_tree_block_info);
>>>> +		} else {
>>>> +			/* Use inline ref type to determine the size */
>>>> +			int type;
>>>> +
>>>> +			iref = (struct btrfs_extent_inline_ref *)
>>>> +				(iterator->cur_ptr);
>>>> +			type = btrfs_extent_inline_ref_type(eb, iref);
>>>> +
>>>> +			size = btrfs_extent_inline_ref_size(type);
>>>> +		}
>>>> +		iterator->cur_ptr += size;
>>>> +		if (iterator->cur_ptr < iterator->end_ptr)
>>>> +			return 0;
>>>> +
>>>> +		/* All inline items iterated, fall through */
>>>> +	}
>>>
>>> This if could be rewritten as:
>>> if (btrfs_backref_iterator_is_inline_ref(iterator) && iterator->cur_ptr
>>> < iterator->end_ptr)
>>>
>>> what this achieves is:
>>>
>>> 1. Clarity that this whole branch is executed only if we are within the
>>> inline refs limits
>>> 2. It also optimises that function since in the current version, after
>>> the last inline backref has been processed iterator->cur_ptr ==
>>> iterator->end_ptr. On the next call to btrfs_backref_iterator_next you
>>> will execute (needlessly)
>>>
>>> (struct btrfs_extent_inline_ref *) (iterator->cur_ptr);
>>> type = btrfs_extent_inline_ref_type(eb, iref);
>>> size = btrfs_extent_inline_ref_size(type);
>>> iterator->cur_ptr += size;
>>> only to fail "if (iterator->cur_ptr < iterator->end_ptr)" check and
>>> continue processing keyed items.
>>>
>>> As a matter of fact you will be reading past the metadata_item  since
>>> cur_ptr will be at the end of them and any deferences will read from the
>>> next item this might not cause a crash but it's still wrong.
>>
>> This shouldn't happen, as we must ensure the cur_ptr < item_end for callers.
>
>
> How are you ensuring this? Before processing the last inline ref
> cur_ptr  would be end_ptr - btrfs_extent_inline_ref_size(type);

Firstly, in _start() call, we can easily check if we have any inline refs.

If no, search next item.
If yes, return cur_ptr which points to the current inline extent ref.

Secondly, in _next() call, we keep current check. Increase cur_ptr, then
check against ptr_end.

So that, all backref_iter callers will get a cur_ptr that is always
smaller than ptr_end.

Thanks,
Qu
>
> After it's processed cur_ptr == end_ptr. THen you will do another call
> to btrfs_backref_iterator_next which will do the same calculation? What
> am I missing?
>
>>
>> For the _next() call, the check after increased cur_ptr check it's OK.
>>
>> But it's a problem for _start() call, as we may have a case where an
>> EXTENT_ITEM/METADATA_ITEM has no inlined ref.
>>
>> I'll fix this in next version.
>>
>> Thanks,
>> Qu
>>
>>>
>>>
>>>> +	/* We're at keyed items, there is no inline item, just go next item */
>>>> +	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
>>>> +	if (ret > 0 || ret < 0)
>>>> +		return ret;
>>>
>>> nit: if (ret != 0) return ret;
>>>
>>> <snip>
>>>
Nikolay Borisov Feb. 17, 2020, 2:13 p.m. UTC | #5
On 17.02.20 г. 13:45 ч., Qu Wenruo wrote:
> 
> 
> On 2020/2/17 下午7:42, Nikolay Borisov wrote:
>>
>>
>> On 17.02.20 г. 13:29 ч., Qu Wenruo wrote:
>>>
>>>
>>> On 2020/2/17 下午6:47, Nikolay Borisov wrote:
>>>>
>>>>
>>>> On 17.02.20 г. 8:31 ч., Qu Wenruo wrote:
>>>>> This function will go next inline/keyed backref for
>>>>> btrfs_backref_iterator infrastructure.
>>>>>
>>>>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>>>>> ---
>>>>>  fs/btrfs/backref.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++
>>>>>  fs/btrfs/backref.h | 34 +++++++++++++++++++++++++++++++++
>>>>>  2 files changed, 81 insertions(+)
>>>>>
>>>>> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
>>>>> index 8bd5e067831c..fb0abe344851 100644
>>>>> --- a/fs/btrfs/backref.c
>>>>> +++ b/fs/btrfs/backref.c
>>>>> @@ -2310,3 +2310,50 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>>>>>  	btrfs_backref_iterator_release(iterator);
>>>>>  	return ret;
>>>>>  }
>>>>> +
>>>>> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
>>>>
>>>> Document the return values: 0 in case there are more backerfs for the
>>>> given bytenr or 1 in case there are'nt. And a negative value in case of
>>>> error.
>>>>
>>>>> +{
>>>>> +	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
>>>>> +	struct btrfs_path *path = iterator->path;
>>>>> +	struct btrfs_extent_inline_ref *iref;
>>>>> +	int ret;
>>>>> +	u32 size;
>>>>> +
>>>>> +	if (btrfs_backref_iterator_is_inline_ref(iterator)) {
>>>>> +		/* We're still inside the inline refs */
>>>>> +		if (btrfs_backref_has_tree_block_info(iterator)) {
>>>>> +			/* First tree block info */
>>>>> +			size = sizeof(struct btrfs_tree_block_info);
>>>>> +		} else {
>>>>> +			/* Use inline ref type to determine the size */
>>>>> +			int type;
>>>>> +
>>>>> +			iref = (struct btrfs_extent_inline_ref *)
>>>>> +				(iterator->cur_ptr);
>>>>> +			type = btrfs_extent_inline_ref_type(eb, iref);
>>>>> +
>>>>> +			size = btrfs_extent_inline_ref_size(type);
>>>>> +		}
>>>>> +		iterator->cur_ptr += size;
>>>>> +		if (iterator->cur_ptr < iterator->end_ptr)
>>>>> +			return 0;
>>>>> +
>>>>> +		/* All inline items iterated, fall through */
>>>>> +	}
>>>>
>>>> This if could be rewritten as:
>>>> if (btrfs_backref_iterator_is_inline_ref(iterator) && iterator->cur_ptr
>>>> < iterator->end_ptr)
>>>>
>>>> what this achieves is:
>>>>
>>>> 1. Clarity that this whole branch is executed only if we are within the
>>>> inline refs limits
>>>> 2. It also optimises that function since in the current version, after
>>>> the last inline backref has been processed iterator->cur_ptr ==
>>>> iterator->end_ptr. On the next call to btrfs_backref_iterator_next you
>>>> will execute (needlessly)
>>>>
>>>> (struct btrfs_extent_inline_ref *) (iterator->cur_ptr);
>>>> type = btrfs_extent_inline_ref_type(eb, iref);
>>>> size = btrfs_extent_inline_ref_size(type);
>>>> iterator->cur_ptr += size;
>>>> only to fail "if (iterator->cur_ptr < iterator->end_ptr)" check and
>>>> continue processing keyed items.
>>>>
>>>> As a matter of fact you will be reading past the metadata_item  since
>>>> cur_ptr will be at the end of them and any deferences will read from the
>>>> next item this might not cause a crash but it's still wrong.
>>>
>>> This shouldn't happen, as we must ensure the cur_ptr < item_end for callers.
>>
>>
>> How are you ensuring this? Before processing the last inline ref
>> cur_ptr  would be end_ptr - btrfs_extent_inline_ref_size(type);
> 
> Firstly, in _start() call, we can easily check if we have any inline refs.
> 
> If no, search next item.
> If yes, return cur_ptr which points to the current inline extent ref.
> 
> Secondly, in _next() call, we keep current check. Increase cur_ptr, then
> check against ptr_end.
> 
> So that, all backref_iter callers will get a cur_ptr that is always
> smaller than ptr_end.

Apparently not, btrfs/003 with the following assert:

diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index fb0abe344851..403a75f0c99c 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2328,6 +2328,7 @@ int btrfs_backref_iterator_next(struct
btrfs_backref_iterator *iterator)
                        /* Use inline ref type to determine the size */
                        int type;

+                       ASSERT(iterator->cur_ptr < iterator->end_ptr);
                        iref = (struct btrfs_extent_inline_ref *)
                                (iterator->cur_ptr);
                        type = btrfs_extent_inline_ref_type(eb, iref);


Trigger:

[   58.884441] assertion failed: iterator->cur_ptr < iterator->end_ptr,
in fs/btrfs/backref.c:2331


> 
> Thanks,
> Qu
>>
>> After it's processed cur_ptr == end_ptr. THen you will do another call
>> to btrfs_backref_iterator_next which will do the same calculation? What
>> am I missing?
>>
>>>
>>> For the _next() call, the check after increased cur_ptr check it's OK.
>>>
>>> But it's a problem for _start() call, as we may have a case where an
>>> EXTENT_ITEM/METADATA_ITEM has no inlined ref.
>>>
>>> I'll fix this in next version.
>>>
>>> Thanks,
>>> Qu
>>>
>>>>
>>>>
>>>>> +	/* We're at keyed items, there is no inline item, just go next item */
>>>>> +	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
>>>>> +	if (ret > 0 || ret < 0)
>>>>> +		return ret;
>>>>
>>>> nit: if (ret != 0) return ret;
>>>>
>>>> <snip>
>>>>
Qu Wenruo Feb. 17, 2020, 11:59 p.m. UTC | #6
On 2020/2/17 下午10:13, Nikolay Borisov wrote:
> 
> 
> On 17.02.20 г. 13:45 ч., Qu Wenruo wrote:
>>
>>
>> On 2020/2/17 下午7:42, Nikolay Borisov wrote:
>>>
>>>
>>> On 17.02.20 г. 13:29 ч., Qu Wenruo wrote:
>>>>
>>>>
>>>> On 2020/2/17 下午6:47, Nikolay Borisov wrote:
>>>>>
>>>>>
>>>>> On 17.02.20 г. 8:31 ч., Qu Wenruo wrote:
>>>>>> This function will go next inline/keyed backref for
>>>>>> btrfs_backref_iterator infrastructure.
>>>>>>
>>>>>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>>>>>> ---
>>>>>>  fs/btrfs/backref.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++
>>>>>>  fs/btrfs/backref.h | 34 +++++++++++++++++++++++++++++++++
>>>>>>  2 files changed, 81 insertions(+)
>>>>>>
>>>>>> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
>>>>>> index 8bd5e067831c..fb0abe344851 100644
>>>>>> --- a/fs/btrfs/backref.c
>>>>>> +++ b/fs/btrfs/backref.c
>>>>>> @@ -2310,3 +2310,50 @@ int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
>>>>>>  	btrfs_backref_iterator_release(iterator);
>>>>>>  	return ret;
>>>>>>  }
>>>>>> +
>>>>>> +int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
>>>>>
>>>>> Document the return values: 0 in case there are more backerfs for the
>>>>> given bytenr or 1 in case there are'nt. And a negative value in case of
>>>>> error.
>>>>>
>>>>>> +{
>>>>>> +	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
>>>>>> +	struct btrfs_path *path = iterator->path;
>>>>>> +	struct btrfs_extent_inline_ref *iref;
>>>>>> +	int ret;
>>>>>> +	u32 size;
>>>>>> +
>>>>>> +	if (btrfs_backref_iterator_is_inline_ref(iterator)) {
>>>>>> +		/* We're still inside the inline refs */
>>>>>> +		if (btrfs_backref_has_tree_block_info(iterator)) {
>>>>>> +			/* First tree block info */
>>>>>> +			size = sizeof(struct btrfs_tree_block_info);
>>>>>> +		} else {
>>>>>> +			/* Use inline ref type to determine the size */
>>>>>> +			int type;
>>>>>> +
>>>>>> +			iref = (struct btrfs_extent_inline_ref *)
>>>>>> +				(iterator->cur_ptr);
>>>>>> +			type = btrfs_extent_inline_ref_type(eb, iref);
>>>>>> +
>>>>>> +			size = btrfs_extent_inline_ref_size(type);
>>>>>> +		}
>>>>>> +		iterator->cur_ptr += size;
>>>>>> +		if (iterator->cur_ptr < iterator->end_ptr)
>>>>>> +			return 0;
>>>>>> +
>>>>>> +		/* All inline items iterated, fall through */
>>>>>> +	}
>>>>>
>>>>> This if could be rewritten as:
>>>>> if (btrfs_backref_iterator_is_inline_ref(iterator) && iterator->cur_ptr
>>>>> < iterator->end_ptr)
>>>>>
>>>>> what this achieves is:
>>>>>
>>>>> 1. Clarity that this whole branch is executed only if we are within the
>>>>> inline refs limits
>>>>> 2. It also optimises that function since in the current version, after
>>>>> the last inline backref has been processed iterator->cur_ptr ==
>>>>> iterator->end_ptr. On the next call to btrfs_backref_iterator_next you
>>>>> will execute (needlessly)
>>>>>
>>>>> (struct btrfs_extent_inline_ref *) (iterator->cur_ptr);
>>>>> type = btrfs_extent_inline_ref_type(eb, iref);
>>>>> size = btrfs_extent_inline_ref_size(type);
>>>>> iterator->cur_ptr += size;
>>>>> only to fail "if (iterator->cur_ptr < iterator->end_ptr)" check and
>>>>> continue processing keyed items.
>>>>>
>>>>> As a matter of fact you will be reading past the metadata_item  since
>>>>> cur_ptr will be at the end of them and any deferences will read from the
>>>>> next item this might not cause a crash but it's still wrong.
>>>>
>>>> This shouldn't happen, as we must ensure the cur_ptr < item_end for callers.
>>>
>>>
>>> How are you ensuring this? Before processing the last inline ref
>>> cur_ptr  would be end_ptr - btrfs_extent_inline_ref_size(type);
>>
>> Firstly, in _start() call, we can easily check if we have any inline refs.
>>
>> If no, search next item.
>> If yes, return cur_ptr which points to the current inline extent ref.
>>
>> Secondly, in _next() call, we keep current check. Increase cur_ptr, then
>> check against ptr_end.
>>
>> So that, all backref_iter callers will get a cur_ptr that is always
>> smaller than ptr_end.
> 
> Apparently not, btrfs/003 with the following assert:
> 
> diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
> index fb0abe344851..403a75f0c99c 100644
> --- a/fs/btrfs/backref.c
> +++ b/fs/btrfs/backref.c
> @@ -2328,6 +2328,7 @@ int btrfs_backref_iterator_next(struct
> btrfs_backref_iterator *iterator)
>                         /* Use inline ref type to determine the size */
>                         int type;
> 
> +                       ASSERT(iterator->cur_ptr < iterator->end_ptr);
>                         iref = (struct btrfs_extent_inline_ref *)
>                                 (iterator->cur_ptr);
>                         type = btrfs_extent_inline_ref_type(eb, iref);

Exactly what I said, in _start() there is not enough check to ensure
cur_ptr is always smaller than end_ptr.

Thus it triggers the ASSERT().
Will fix in next version.

Thanks,
Qu
> 
> 
> Trigger:
> 
> [   58.884441] assertion failed: iterator->cur_ptr < iterator->end_ptr,
> in fs/btrfs/backref.c:2331
> 
> 
>>
>> Thanks,
>> Qu
>>>
>>> After it's processed cur_ptr == end_ptr. THen you will do another call
>>> to btrfs_backref_iterator_next which will do the same calculation? What
>>> am I missing?
>>>
>>>>
>>>> For the _next() call, the check after increased cur_ptr check it's OK.
>>>>
>>>> But it's a problem for _start() call, as we may have a case where an
>>>> EXTENT_ITEM/METADATA_ITEM has no inlined ref.
>>>>
>>>> I'll fix this in next version.
>>>>
>>>> Thanks,
>>>> Qu
>>>>
>>>>>
>>>>>
>>>>>> +	/* We're at keyed items, there is no inline item, just go next item */
>>>>>> +	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
>>>>>> +	if (ret > 0 || ret < 0)
>>>>>> +		return ret;
>>>>>
>>>>> nit: if (ret != 0) return ret;
>>>>>
>>>>> <snip>
>>>>>
diff mbox series

Patch

diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8bd5e067831c..fb0abe344851 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2310,3 +2310,50 @@  int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
 	btrfs_backref_iterator_release(iterator);
 	return ret;
 }
+
+int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator)
+{
+	struct extent_buffer *eb = btrfs_backref_get_eb(iterator);
+	struct btrfs_path *path = iterator->path;
+	struct btrfs_extent_inline_ref *iref;
+	int ret;
+	u32 size;
+
+	if (btrfs_backref_iterator_is_inline_ref(iterator)) {
+		/* We're still inside the inline refs */
+		if (btrfs_backref_has_tree_block_info(iterator)) {
+			/* First tree block info */
+			size = sizeof(struct btrfs_tree_block_info);
+		} else {
+			/* Use inline ref type to determine the size */
+			int type;
+
+			iref = (struct btrfs_extent_inline_ref *)
+				(iterator->cur_ptr);
+			type = btrfs_extent_inline_ref_type(eb, iref);
+
+			size = btrfs_extent_inline_ref_size(type);
+		}
+		iterator->cur_ptr += size;
+		if (iterator->cur_ptr < iterator->end_ptr)
+			return 0;
+
+		/* All inline items iterated, fall through */
+	}
+	/* We're at keyed items, there is no inline item, just go next item */
+	ret = btrfs_next_item(iterator->fs_info->extent_root, iterator->path);
+	if (ret > 0 || ret < 0)
+		return ret;
+
+	btrfs_item_key_to_cpu(path->nodes[0], &iterator->cur_key,
+			      path->slots[0]);
+	if (iterator->cur_key.objectid != iterator->bytenr ||
+	    (iterator->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
+	     iterator->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
+		return 1;
+	iterator->item_ptr = btrfs_item_ptr_offset(path->nodes[0],
+						   path->slots[0]);
+	iterator->cur_ptr = iterator->item_ptr;
+	iterator->end_ptr = btrfs_item_end_nr(path->nodes[0], path->slots[0]);
+	return 0;
+}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index fa73f02f14f6..016999339be1 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -122,8 +122,42 @@  static inline void btrfs_backref_iterator_free(
 	kfree(iterator);
 }
 
+static inline struct extent_buffer *
+btrfs_backref_get_eb(struct btrfs_backref_iterator *iterator)
+{
+	if (!iterator)
+		return NULL;
+	return iterator->path->nodes[0];
+}
+
+/*
+ * For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data
+ * is btrfs_tree_block_info, without a btrfs_extent_inline_ref header.
+ *
+ * This helper is here to determine if that's the case.
+ */
+static inline bool btrfs_backref_has_tree_block_info(
+		struct btrfs_backref_iterator *iterator)
+{
+	if (iterator->cur_key.type == BTRFS_EXTENT_ITEM_KEY &&
+	    iterator->cur_ptr - iterator->item_ptr ==
+	    sizeof(struct btrfs_extent_item))
+		return true;
+	return false;
+}
+
 int btrfs_backref_iterator_start(struct btrfs_backref_iterator *iterator,
 				 u64 bytenr);
+int btrfs_backref_iterator_next(struct btrfs_backref_iterator *iterator);
+
+static inline bool
+btrfs_backref_iterator_is_inline_ref(struct btrfs_backref_iterator *iterator)
+{
+	if (iterator->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
+	    iterator->cur_key.type == BTRFS_METADATA_ITEM_KEY)
+		return true;
+	return false;
+}
 
 static inline void
 btrfs_backref_iterator_release(struct btrfs_backref_iterator *iterator)