diff mbox series

[v3,2/3] btrfs: block-group: Refactor btrfs_read_block_groups()

Message ID 20191010023928.24586-3-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: Introduce new incompat feature BG_TREE to hugely reduce mount time | expand

Commit Message

Qu Wenruo Oct. 10, 2019, 2:39 a.m. UTC
Refactor the work inside the loop of btrfs_read_block_groups() into one
separate function, read_one_block_group().

This allows read_one_block_group to be reused for later BG_TREE feature.

The refactor does the following extra fix:
- Use btrfs_fs_incompat() to replace open-coded feature check

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Anand Jain <anand.jain@oracle.com>
---
 fs/btrfs/block-group.c | 215 +++++++++++++++++++++--------------------
 1 file changed, 108 insertions(+), 107 deletions(-)

Comments

Anand Jain Oct. 10, 2019, 2:52 a.m. UTC | #1
On 10/10/19 10:39 AM, Qu Wenruo wrote:
> Refactor the work inside the loop of btrfs_read_block_groups() into one
> separate function, read_one_block_group().
> 
> This allows read_one_block_group to be reused for later BG_TREE feature.
> 
> The refactor does the following extra fix:
> - Use btrfs_fs_incompat() to replace open-coded feature check
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
> Reviewed-by: Anand Jain <anand.jain@oracle.com>
> ---
>   fs/btrfs/block-group.c | 215 +++++++++++++++++++++--------------------
>   1 file changed, 108 insertions(+), 107 deletions(-)
> 
> diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
> index c906a2b6c2cf..0c5eef0610fa 100644
> --- a/fs/btrfs/block-group.c
> +++ b/fs/btrfs/block-group.c
> @@ -1687,6 +1687,109 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
>   	return ret;
>   }
>   
> +static int read_one_block_group(struct btrfs_fs_info *info,
> +				struct btrfs_path *path,
> +				int need_clear)
> +{
> +	struct extent_buffer *leaf = path->nodes[0];
> +	struct btrfs_block_group_cache *cache;
> +	struct btrfs_space_info *space_info;
> +	struct btrfs_key key;
> +	int mixed = btrfs_fs_incompat(info, MIXED_GROUPS);

  nit:  mixed can be bool. (David can change while integrating.)

Thanks, Anand
Qu Wenruo Oct. 30, 2019, 4:59 a.m. UTC | #2
On 2019/10/10 上午10:39, Qu Wenruo wrote:
> Refactor the work inside the loop of btrfs_read_block_groups() into one
> separate function, read_one_block_group().
> 
> This allows read_one_block_group to be reused for later BG_TREE feature.
> 
> The refactor does the following extra fix:
> - Use btrfs_fs_incompat() to replace open-coded feature check
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
> Reviewed-by: Anand Jain <anand.jain@oracle.com>

Hi David,

Mind to add this patch to for-next branch?

Considering the recent changes to struct btrfs_block_group_cache, there
is some considerable conflicts.

It would be much easier to solve them sooner than later.
If needed I could send a newer version based on latest for-next branch.

Thanks,
Qu

> ---
>  fs/btrfs/block-group.c | 215 +++++++++++++++++++++--------------------
>  1 file changed, 108 insertions(+), 107 deletions(-)
> 
> diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
> index c906a2b6c2cf..0c5eef0610fa 100644
> --- a/fs/btrfs/block-group.c
> +++ b/fs/btrfs/block-group.c
> @@ -1687,6 +1687,109 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
>  	return ret;
>  }
>  
> +static int read_one_block_group(struct btrfs_fs_info *info,
> +				struct btrfs_path *path,
> +				int need_clear)
> +{
> +	struct extent_buffer *leaf = path->nodes[0];
> +	struct btrfs_block_group_cache *cache;
> +	struct btrfs_space_info *space_info;
> +	struct btrfs_key key;
> +	int mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
> +	int slot = path->slots[0];
> +	int ret;
> +
> +	btrfs_item_key_to_cpu(leaf, &key, slot);
> +	ASSERT(key.type == BTRFS_BLOCK_GROUP_ITEM_KEY);
> +
> +	cache = btrfs_create_block_group_cache(info, key.objectid,
> +					       key.offset);
> +	if (!cache)
> +		return -ENOMEM;
> +
> +	if (need_clear) {
> +		/*
> +		 * When we mount with old space cache, we need to
> +		 * set BTRFS_DC_CLEAR and set dirty flag.
> +		 *
> +		 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
> +		 *    truncate the old free space cache inode and
> +		 *    setup a new one.
> +		 * b) Setting 'dirty flag' makes sure that we flush
> +		 *    the new space cache info onto disk.
> +		 */
> +		if (btrfs_test_opt(info, SPACE_CACHE))
> +			cache->disk_cache_state = BTRFS_DC_CLEAR;
> +	}
> +	read_extent_buffer(leaf, &cache->item,
> +			   btrfs_item_ptr_offset(leaf, slot),
> +			   sizeof(cache->item));
> +	cache->flags = btrfs_block_group_flags(&cache->item);
> +	if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
> +	    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
> +			btrfs_err(info,
> +"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
> +				  cache->key.objectid);
> +			ret = -EINVAL;
> +			goto error;
> +	}
> +
> +	/*
> +	 * We need to exclude the super stripes now so that the space info has
> +	 * super bytes accounted for, otherwise we'll think we have more space
> +	 * than we actually do.
> +	 */
> +	ret = exclude_super_stripes(cache);
> +	if (ret) {
> +		/* We may have excluded something, so call this just in case. */
> +		btrfs_free_excluded_extents(cache);
> +		goto error;
> +	}
> +
> +	/*
> +	 * Check for two cases, either we are full, and therefore don't need
> +	 * to bother with the caching work since we won't find any space, or we
> +	 * are empty, and we can just add all the space in and be done with it.
> +	 * This saves us _a_lot_ of time, particularly in the full case.
> +	 */
> +	if (key.offset == btrfs_block_group_used(&cache->item)) {
> +		cache->last_byte_to_unpin = (u64)-1;
> +		cache->cached = BTRFS_CACHE_FINISHED;
> +		btrfs_free_excluded_extents(cache);
> +	} else if (btrfs_block_group_used(&cache->item) == 0) {
> +		cache->last_byte_to_unpin = (u64)-1;
> +		cache->cached = BTRFS_CACHE_FINISHED;
> +		add_new_free_space(cache, key.objectid,
> +				   key.objectid + key.offset);
> +		btrfs_free_excluded_extents(cache);
> +	}
> +	ret = btrfs_add_block_group_cache(info, cache);
> +	if (ret) {
> +		btrfs_remove_free_space_cache(cache);
> +		goto error;
> +	}
> +	trace_btrfs_add_block_group(info, cache, 0);
> +	btrfs_update_space_info(info, cache->flags, key.offset,
> +				btrfs_block_group_used(&cache->item),
> +				cache->bytes_super, &space_info);
> +
> +	cache->space_info = space_info;
> +
> +	link_block_group(cache);
> +
> +	set_avail_alloc_bits(info, cache->flags);
> +	if (btrfs_chunk_readonly(info, cache->key.objectid)) {
> +		inc_block_group_ro(cache, 1);
> +	} else if (btrfs_block_group_used(&cache->item) == 0) {
> +		ASSERT(list_empty(&cache->bg_list));
> +		btrfs_mark_bg_unused(cache);
> +	}
> +	return 0;
> +error:
> +	btrfs_put_block_group(cache);
> +	return ret;
> +}
> +
>  int btrfs_read_block_groups(struct btrfs_fs_info *info)
>  {
>  	struct btrfs_path *path;
> @@ -1694,15 +1797,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
>  	struct btrfs_block_group_cache *cache;
>  	struct btrfs_space_info *space_info;
>  	struct btrfs_key key;
> -	struct btrfs_key found_key;
> -	struct extent_buffer *leaf;
>  	int need_clear = 0;
>  	u64 cache_gen;
> -	u64 feature;
> -	int mixed;
> -
> -	feature = btrfs_super_incompat_flags(info->super_copy);
> -	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
>  
>  	key.objectid = 0;
>  	key.offset = 0;
> @@ -1726,108 +1822,13 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
>  		if (ret != 0)
>  			goto error;
>  
> -		leaf = path->nodes[0];
> -		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
> -
> -		cache = btrfs_create_block_group_cache(info, found_key.objectid,
> -						       found_key.offset);
> -		if (!cache) {
> -			ret = -ENOMEM;
> -			goto error;
> -		}
> -
> -		if (need_clear) {
> -			/*
> -			 * When we mount with old space cache, we need to
> -			 * set BTRFS_DC_CLEAR and set dirty flag.
> -			 *
> -			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
> -			 *    truncate the old free space cache inode and
> -			 *    setup a new one.
> -			 * b) Setting 'dirty flag' makes sure that we flush
> -			 *    the new space cache info onto disk.
> -			 */
> -			if (btrfs_test_opt(info, SPACE_CACHE))
> -				cache->disk_cache_state = BTRFS_DC_CLEAR;
> -		}
> -
> -		read_extent_buffer(leaf, &cache->item,
> -				   btrfs_item_ptr_offset(leaf, path->slots[0]),
> -				   sizeof(cache->item));
> -		cache->flags = btrfs_block_group_flags(&cache->item);
> -		if (!mixed &&
> -		    ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
> -		    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
> -			btrfs_err(info,
> -"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
> -				  cache->key.objectid);
> -			ret = -EINVAL;
> -			btrfs_put_block_group(cache);
> +		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
> +		ret = read_one_block_group(info, path, need_clear);
> +		if (ret < 0)
>  			goto error;
> -		}
> -
> -		key.objectid = found_key.objectid + found_key.offset;
> +		key.objectid += key.offset;
> +		key.offset = 0;
>  		btrfs_release_path(path);
> -
> -		/*
> -		 * We need to exclude the super stripes now so that the space
> -		 * info has super bytes accounted for, otherwise we'll think
> -		 * we have more space than we actually do.
> -		 */
> -		ret = exclude_super_stripes(cache);
> -		if (ret) {
> -			/*
> -			 * We may have excluded something, so call this just in
> -			 * case.
> -			 */
> -			btrfs_free_excluded_extents(cache);
> -			btrfs_put_block_group(cache);
> -			goto error;
> -		}
> -
> -		/*
> -		 * Check for two cases, either we are full, and therefore
> -		 * don't need to bother with the caching work since we won't
> -		 * find any space, or we are empty, and we can just add all
> -		 * the space in and be done with it.  This saves us _a_lot_ of
> -		 * time, particularly in the full case.
> -		 */
> -		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
> -			cache->last_byte_to_unpin = (u64)-1;
> -			cache->cached = BTRFS_CACHE_FINISHED;
> -			btrfs_free_excluded_extents(cache);
> -		} else if (btrfs_block_group_used(&cache->item) == 0) {
> -			cache->last_byte_to_unpin = (u64)-1;
> -			cache->cached = BTRFS_CACHE_FINISHED;
> -			add_new_free_space(cache, found_key.objectid,
> -					   found_key.objectid +
> -					   found_key.offset);
> -			btrfs_free_excluded_extents(cache);
> -		}
> -
> -		ret = btrfs_add_block_group_cache(info, cache);
> -		if (ret) {
> -			btrfs_remove_free_space_cache(cache);
> -			btrfs_put_block_group(cache);
> -			goto error;
> -		}
> -
> -		trace_btrfs_add_block_group(info, cache, 0);
> -		btrfs_update_space_info(info, cache->flags, found_key.offset,
> -					btrfs_block_group_used(&cache->item),
> -					cache->bytes_super, &space_info);
> -
> -		cache->space_info = space_info;
> -
> -		link_block_group(cache);
> -
> -		set_avail_alloc_bits(info, cache->flags);
> -		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
> -			inc_block_group_ro(cache, 1);
> -		} else if (btrfs_block_group_used(&cache->item) == 0) {
> -			ASSERT(list_empty(&cache->bg_list));
> -			btrfs_mark_bg_unused(cache);
> -		}
>  	}
>  
>  	list_for_each_entry_rcu(space_info, &info->space_info, list) {
>
David Sterba Nov. 4, 2019, 7:53 p.m. UTC | #3
On Wed, Oct 30, 2019 at 04:59:17AM +0000, Qu WenRuo wrote:
> 
> 
> On 2019/10/10 上午10:39, Qu Wenruo wrote:
> > Refactor the work inside the loop of btrfs_read_block_groups() into one
> > separate function, read_one_block_group().
> > 
> > This allows read_one_block_group to be reused for later BG_TREE feature.
> > 
> > The refactor does the following extra fix:
> > - Use btrfs_fs_incompat() to replace open-coded feature check
> > 
> > Signed-off-by: Qu Wenruo <wqu@suse.com>
> > Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
> > Reviewed-by: Anand Jain <anand.jain@oracle.com>
> 
> Hi David,
> 
> Mind to add this patch to for-next branch?
> 
> Considering the recent changes to struct btrfs_block_group_cache, there
> is some considerable conflicts.

I see, as the patch is idependent I'll add it.

> It would be much easier to solve them sooner than later.
> If needed I could send a newer version based on latest for-next branch.

I've fixed the conflicts, but please have a look anyway. The change was
cache->item to local block group item and rename of found_key to key in
read_one_block_group.
David Sterba Nov. 4, 2019, 7:55 p.m. UTC | #4
On Thu, Oct 10, 2019 at 10:39:27AM +0800, Qu Wenruo wrote:
> +static int read_one_block_group(struct btrfs_fs_info *info,
> +				struct btrfs_path *path,
> +				int need_clear)
> +{
> +	struct extent_buffer *leaf = path->nodes[0];
> +	struct btrfs_block_group_cache *cache;
> +	struct btrfs_space_info *space_info;
> +	struct btrfs_key key;
> +	int mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
> +	int slot = path->slots[0];
> +	int ret;
> +
> +	btrfs_item_key_to_cpu(leaf, &key, slot);

The first thing done here is the same as in the caller:

> +		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
> +		ret = read_one_block_group(info, path, need_clear);

The key can be passed by pointer so it's not on stack and the conversion
can be removed. I left it in the patch, please send a followup. Thanks.
David Sterba Nov. 4, 2019, 9:44 p.m. UTC | #5
On Mon, Nov 04, 2019 at 08:53:52PM +0100, David Sterba wrote:
> On Wed, Oct 30, 2019 at 04:59:17AM +0000, Qu WenRuo wrote:
> > 
> > 
> > On 2019/10/10 上午10:39, Qu Wenruo wrote:
> > > Refactor the work inside the loop of btrfs_read_block_groups() into one
> > > separate function, read_one_block_group().
> > > 
> > > This allows read_one_block_group to be reused for later BG_TREE feature.
> > > 
> > > The refactor does the following extra fix:
> > > - Use btrfs_fs_incompat() to replace open-coded feature check
> > > 
> > > Signed-off-by: Qu Wenruo <wqu@suse.com>
> > > Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
> > > Reviewed-by: Anand Jain <anand.jain@oracle.com>
> > 
> > Hi David,
> > 
> > Mind to add this patch to for-next branch?
> > 
> > Considering the recent changes to struct btrfs_block_group_cache, there
> > is some considerable conflicts.
> 
> I see, as the patch is idependent I'll add it.
> 
> > It would be much easier to solve them sooner than later.
> > If needed I could send a newer version based on latest for-next branch.
> 
> I've fixed the conflicts, but please have a look anyway. The change was
> cache->item to local block group item and rename of found_key to key in
> read_one_block_group.

And it crashes during the self-tests, the patch is in branch
misc-next-with-bg-refactoring in my github tree, please have a look.
I've removed it from misc-next for now as I need to test for-next, but
it's probably going to be some trivial typo so the patch will be added
once it's found. Thanks.
Qu Wenruo Nov. 5, 2019, 12:47 a.m. UTC | #6
On 2019/11/5 上午5:44, David Sterba wrote:
> On Mon, Nov 04, 2019 at 08:53:52PM +0100, David Sterba wrote:
>> On Wed, Oct 30, 2019 at 04:59:17AM +0000, Qu WenRuo wrote:
>>>
>>>
>>> On 2019/10/10 上午10:39, Qu Wenruo wrote:
>>>> Refactor the work inside the loop of btrfs_read_block_groups() into one
>>>> separate function, read_one_block_group().
>>>>
>>>> This allows read_one_block_group to be reused for later BG_TREE feature.
>>>>
>>>> The refactor does the following extra fix:
>>>> - Use btrfs_fs_incompat() to replace open-coded feature check
>>>>
>>>> Signed-off-by: Qu Wenruo <wqu@suse.com>
>>>> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
>>>> Reviewed-by: Anand Jain <anand.jain@oracle.com>
>>>
>>> Hi David,
>>>
>>> Mind to add this patch to for-next branch?
>>>
>>> Considering the recent changes to struct btrfs_block_group_cache, there
>>> is some considerable conflicts.
>>
>> I see, as the patch is idependent I'll add it.
>>
>>> It would be much easier to solve them sooner than later.
>>> If needed I could send a newer version based on latest for-next branch.
>>
>> I've fixed the conflicts, but please have a look anyway. The change was
>> cache->item to local block group item and rename of found_key to key in
>> read_one_block_group.
> 
> And it crashes during the self-tests, the patch is in branch
> misc-next-with-bg-refactoring in my github tree, please have a look.
> I've removed it from misc-next for now as I need to test for-next, but
> it's probably going to be some trivial typo so the patch will be added
> once it's found. Thanks.
>

Found the cause.

It's missing cache->flags assignment.
There is a line in the original patch:

cache->flags = btrfs_block_group_flags(&cache->item);

But not in the rebased one.

Exactly the same bug I hit when developing the skinny-bg-tree feature.

I'll send out the fix for it, with the removal of unneeded
btrfs_item_key_to_cpu() call.

Thanks,
Qu
diff mbox series

Patch

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index c906a2b6c2cf..0c5eef0610fa 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1687,6 +1687,109 @@  static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
 	return ret;
 }
 
+static int read_one_block_group(struct btrfs_fs_info *info,
+				struct btrfs_path *path,
+				int need_clear)
+{
+	struct extent_buffer *leaf = path->nodes[0];
+	struct btrfs_block_group_cache *cache;
+	struct btrfs_space_info *space_info;
+	struct btrfs_key key;
+	int mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
+	int slot = path->slots[0];
+	int ret;
+
+	btrfs_item_key_to_cpu(leaf, &key, slot);
+	ASSERT(key.type == BTRFS_BLOCK_GROUP_ITEM_KEY);
+
+	cache = btrfs_create_block_group_cache(info, key.objectid,
+					       key.offset);
+	if (!cache)
+		return -ENOMEM;
+
+	if (need_clear) {
+		/*
+		 * When we mount with old space cache, we need to
+		 * set BTRFS_DC_CLEAR and set dirty flag.
+		 *
+		 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
+		 *    truncate the old free space cache inode and
+		 *    setup a new one.
+		 * b) Setting 'dirty flag' makes sure that we flush
+		 *    the new space cache info onto disk.
+		 */
+		if (btrfs_test_opt(info, SPACE_CACHE))
+			cache->disk_cache_state = BTRFS_DC_CLEAR;
+	}
+	read_extent_buffer(leaf, &cache->item,
+			   btrfs_item_ptr_offset(leaf, slot),
+			   sizeof(cache->item));
+	cache->flags = btrfs_block_group_flags(&cache->item);
+	if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
+	    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
+			btrfs_err(info,
+"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
+				  cache->key.objectid);
+			ret = -EINVAL;
+			goto error;
+	}
+
+	/*
+	 * We need to exclude the super stripes now so that the space info has
+	 * super bytes accounted for, otherwise we'll think we have more space
+	 * than we actually do.
+	 */
+	ret = exclude_super_stripes(cache);
+	if (ret) {
+		/* We may have excluded something, so call this just in case. */
+		btrfs_free_excluded_extents(cache);
+		goto error;
+	}
+
+	/*
+	 * Check for two cases, either we are full, and therefore don't need
+	 * to bother with the caching work since we won't find any space, or we
+	 * are empty, and we can just add all the space in and be done with it.
+	 * This saves us _a_lot_ of time, particularly in the full case.
+	 */
+	if (key.offset == btrfs_block_group_used(&cache->item)) {
+		cache->last_byte_to_unpin = (u64)-1;
+		cache->cached = BTRFS_CACHE_FINISHED;
+		btrfs_free_excluded_extents(cache);
+	} else if (btrfs_block_group_used(&cache->item) == 0) {
+		cache->last_byte_to_unpin = (u64)-1;
+		cache->cached = BTRFS_CACHE_FINISHED;
+		add_new_free_space(cache, key.objectid,
+				   key.objectid + key.offset);
+		btrfs_free_excluded_extents(cache);
+	}
+	ret = btrfs_add_block_group_cache(info, cache);
+	if (ret) {
+		btrfs_remove_free_space_cache(cache);
+		goto error;
+	}
+	trace_btrfs_add_block_group(info, cache, 0);
+	btrfs_update_space_info(info, cache->flags, key.offset,
+				btrfs_block_group_used(&cache->item),
+				cache->bytes_super, &space_info);
+
+	cache->space_info = space_info;
+
+	link_block_group(cache);
+
+	set_avail_alloc_bits(info, cache->flags);
+	if (btrfs_chunk_readonly(info, cache->key.objectid)) {
+		inc_block_group_ro(cache, 1);
+	} else if (btrfs_block_group_used(&cache->item) == 0) {
+		ASSERT(list_empty(&cache->bg_list));
+		btrfs_mark_bg_unused(cache);
+	}
+	return 0;
+error:
+	btrfs_put_block_group(cache);
+	return ret;
+}
+
 int btrfs_read_block_groups(struct btrfs_fs_info *info)
 {
 	struct btrfs_path *path;
@@ -1694,15 +1797,8 @@  int btrfs_read_block_groups(struct btrfs_fs_info *info)
 	struct btrfs_block_group_cache *cache;
 	struct btrfs_space_info *space_info;
 	struct btrfs_key key;
-	struct btrfs_key found_key;
-	struct extent_buffer *leaf;
 	int need_clear = 0;
 	u64 cache_gen;
-	u64 feature;
-	int mixed;
-
-	feature = btrfs_super_incompat_flags(info->super_copy);
-	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
 
 	key.objectid = 0;
 	key.offset = 0;
@@ -1726,108 +1822,13 @@  int btrfs_read_block_groups(struct btrfs_fs_info *info)
 		if (ret != 0)
 			goto error;
 
-		leaf = path->nodes[0];
-		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
-		cache = btrfs_create_block_group_cache(info, found_key.objectid,
-						       found_key.offset);
-		if (!cache) {
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		if (need_clear) {
-			/*
-			 * When we mount with old space cache, we need to
-			 * set BTRFS_DC_CLEAR and set dirty flag.
-			 *
-			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
-			 *    truncate the old free space cache inode and
-			 *    setup a new one.
-			 * b) Setting 'dirty flag' makes sure that we flush
-			 *    the new space cache info onto disk.
-			 */
-			if (btrfs_test_opt(info, SPACE_CACHE))
-				cache->disk_cache_state = BTRFS_DC_CLEAR;
-		}
-
-		read_extent_buffer(leaf, &cache->item,
-				   btrfs_item_ptr_offset(leaf, path->slots[0]),
-				   sizeof(cache->item));
-		cache->flags = btrfs_block_group_flags(&cache->item);
-		if (!mixed &&
-		    ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
-		    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
-			btrfs_err(info,
-"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
-				  cache->key.objectid);
-			ret = -EINVAL;
-			btrfs_put_block_group(cache);
+		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+		ret = read_one_block_group(info, path, need_clear);
+		if (ret < 0)
 			goto error;
-		}
-
-		key.objectid = found_key.objectid + found_key.offset;
+		key.objectid += key.offset;
+		key.offset = 0;
 		btrfs_release_path(path);
-
-		/*
-		 * We need to exclude the super stripes now so that the space
-		 * info has super bytes accounted for, otherwise we'll think
-		 * we have more space than we actually do.
-		 */
-		ret = exclude_super_stripes(cache);
-		if (ret) {
-			/*
-			 * We may have excluded something, so call this just in
-			 * case.
-			 */
-			btrfs_free_excluded_extents(cache);
-			btrfs_put_block_group(cache);
-			goto error;
-		}
-
-		/*
-		 * Check for two cases, either we are full, and therefore
-		 * don't need to bother with the caching work since we won't
-		 * find any space, or we are empty, and we can just add all
-		 * the space in and be done with it.  This saves us _a_lot_ of
-		 * time, particularly in the full case.
-		 */
-		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
-			cache->last_byte_to_unpin = (u64)-1;
-			cache->cached = BTRFS_CACHE_FINISHED;
-			btrfs_free_excluded_extents(cache);
-		} else if (btrfs_block_group_used(&cache->item) == 0) {
-			cache->last_byte_to_unpin = (u64)-1;
-			cache->cached = BTRFS_CACHE_FINISHED;
-			add_new_free_space(cache, found_key.objectid,
-					   found_key.objectid +
-					   found_key.offset);
-			btrfs_free_excluded_extents(cache);
-		}
-
-		ret = btrfs_add_block_group_cache(info, cache);
-		if (ret) {
-			btrfs_remove_free_space_cache(cache);
-			btrfs_put_block_group(cache);
-			goto error;
-		}
-
-		trace_btrfs_add_block_group(info, cache, 0);
-		btrfs_update_space_info(info, cache->flags, found_key.offset,
-					btrfs_block_group_used(&cache->item),
-					cache->bytes_super, &space_info);
-
-		cache->space_info = space_info;
-
-		link_block_group(cache);
-
-		set_avail_alloc_bits(info, cache->flags);
-		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
-			inc_block_group_ro(cache, 1);
-		} else if (btrfs_block_group_used(&cache->item) == 0) {
-			ASSERT(list_empty(&cache->bg_list));
-			btrfs_mark_bg_unused(cache);
-		}
 	}
 
 	list_for_each_entry_rcu(space_info, &info->space_info, list) {