diff mbox series

[1/3] btrfs: block-group: Refactor btrfs_read_block_groups()

Message ID 20191009130747.47252-1-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series [1/3] btrfs: block-group: Refactor btrfs_read_block_groups() | expand

Commit Message

Qu Wenruo Oct. 9, 2019, 1:07 p.m. UTC
Refactor the work inside the loop of btrfs_read_block_groups() into one
separate function, read_one_block_group().

This allows read_one_block_group to be reused for later BG_TREE feature.

The refactor does the following extra fixes:
- Use btrfs_fs_incompat() to replace open-coded feature check
- Fix a missing "btrfs_put_block_group(cache)" in error path

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Anand Jain <anand.jain@oracle.com>
---
Changelog:
v2.1->v2
- Add commit message for the fixes done in the refactor
- Add reviewed-by tags
---
 fs/btrfs/block-group.c | 214 +++++++++++++++++++++--------------------
 1 file changed, 108 insertions(+), 106 deletions(-)

Comments

Filipe Manana Oct. 9, 2019, 2:25 p.m. UTC | #1
On Wed, Oct 9, 2019 at 2:09 PM Qu Wenruo <wqu@suse.com> wrote:
>
> Refactor the work inside the loop of btrfs_read_block_groups() into one
> separate function, read_one_block_group().
>
> This allows read_one_block_group to be reused for later BG_TREE feature.
>
> The refactor does the following extra fixes:
> - Use btrfs_fs_incompat() to replace open-coded feature check
> - Fix a missing "btrfs_put_block_group(cache)" in error path

I think that (the bug fix for the error path) should go into a
separate patch, so that it can be backported without adding a refactor
(something not meant for stable releases).

Thanks.

>
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
> Reviewed-by: Anand Jain <anand.jain@oracle.com>
> ---
> Changelog:
> v2.1->v2
> - Add commit message for the fixes done in the refactor
> - Add reviewed-by tags
> ---
>  fs/btrfs/block-group.c | 214 +++++++++++++++++++++--------------------
>  1 file changed, 108 insertions(+), 106 deletions(-)
>
> diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
> index bf7e3f23bba7..0c5eef0610fa 100644
> --- a/fs/btrfs/block-group.c
> +++ b/fs/btrfs/block-group.c
> @@ -1687,6 +1687,109 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
>         return ret;
>  }
>
> +static int read_one_block_group(struct btrfs_fs_info *info,
> +                               struct btrfs_path *path,
> +                               int need_clear)
> +{
> +       struct extent_buffer *leaf = path->nodes[0];
> +       struct btrfs_block_group_cache *cache;
> +       struct btrfs_space_info *space_info;
> +       struct btrfs_key key;
> +       int mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
> +       int slot = path->slots[0];
> +       int ret;
> +
> +       btrfs_item_key_to_cpu(leaf, &key, slot);
> +       ASSERT(key.type == BTRFS_BLOCK_GROUP_ITEM_KEY);
> +
> +       cache = btrfs_create_block_group_cache(info, key.objectid,
> +                                              key.offset);
> +       if (!cache)
> +               return -ENOMEM;
> +
> +       if (need_clear) {
> +               /*
> +                * When we mount with old space cache, we need to
> +                * set BTRFS_DC_CLEAR and set dirty flag.
> +                *
> +                * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
> +                *    truncate the old free space cache inode and
> +                *    setup a new one.
> +                * b) Setting 'dirty flag' makes sure that we flush
> +                *    the new space cache info onto disk.
> +                */
> +               if (btrfs_test_opt(info, SPACE_CACHE))
> +                       cache->disk_cache_state = BTRFS_DC_CLEAR;
> +       }
> +       read_extent_buffer(leaf, &cache->item,
> +                          btrfs_item_ptr_offset(leaf, slot),
> +                          sizeof(cache->item));
> +       cache->flags = btrfs_block_group_flags(&cache->item);
> +       if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
> +           (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
> +                       btrfs_err(info,
> +"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
> +                                 cache->key.objectid);
> +                       ret = -EINVAL;
> +                       goto error;
> +       }
> +
> +       /*
> +        * We need to exclude the super stripes now so that the space info has
> +        * super bytes accounted for, otherwise we'll think we have more space
> +        * than we actually do.
> +        */
> +       ret = exclude_super_stripes(cache);
> +       if (ret) {
> +               /* We may have excluded something, so call this just in case. */
> +               btrfs_free_excluded_extents(cache);
> +               goto error;
> +       }
> +
> +       /*
> +        * Check for two cases, either we are full, and therefore don't need
> +        * to bother with the caching work since we won't find any space, or we
> +        * are empty, and we can just add all the space in and be done with it.
> +        * This saves us _a_lot_ of time, particularly in the full case.
> +        */
> +       if (key.offset == btrfs_block_group_used(&cache->item)) {
> +               cache->last_byte_to_unpin = (u64)-1;
> +               cache->cached = BTRFS_CACHE_FINISHED;
> +               btrfs_free_excluded_extents(cache);
> +       } else if (btrfs_block_group_used(&cache->item) == 0) {
> +               cache->last_byte_to_unpin = (u64)-1;
> +               cache->cached = BTRFS_CACHE_FINISHED;
> +               add_new_free_space(cache, key.objectid,
> +                                  key.objectid + key.offset);
> +               btrfs_free_excluded_extents(cache);
> +       }
> +       ret = btrfs_add_block_group_cache(info, cache);
> +       if (ret) {
> +               btrfs_remove_free_space_cache(cache);
> +               goto error;
> +       }
> +       trace_btrfs_add_block_group(info, cache, 0);
> +       btrfs_update_space_info(info, cache->flags, key.offset,
> +                               btrfs_block_group_used(&cache->item),
> +                               cache->bytes_super, &space_info);
> +
> +       cache->space_info = space_info;
> +
> +       link_block_group(cache);
> +
> +       set_avail_alloc_bits(info, cache->flags);
> +       if (btrfs_chunk_readonly(info, cache->key.objectid)) {
> +               inc_block_group_ro(cache, 1);
> +       } else if (btrfs_block_group_used(&cache->item) == 0) {
> +               ASSERT(list_empty(&cache->bg_list));
> +               btrfs_mark_bg_unused(cache);
> +       }
> +       return 0;
> +error:
> +       btrfs_put_block_group(cache);
> +       return ret;
> +}
> +
>  int btrfs_read_block_groups(struct btrfs_fs_info *info)
>  {
>         struct btrfs_path *path;
> @@ -1694,15 +1797,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
>         struct btrfs_block_group_cache *cache;
>         struct btrfs_space_info *space_info;
>         struct btrfs_key key;
> -       struct btrfs_key found_key;
> -       struct extent_buffer *leaf;
>         int need_clear = 0;
>         u64 cache_gen;
> -       u64 feature;
> -       int mixed;
> -
> -       feature = btrfs_super_incompat_flags(info->super_copy);
> -       mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
>
>         key.objectid = 0;
>         key.offset = 0;
> @@ -1726,107 +1822,13 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
>                 if (ret != 0)
>                         goto error;
>
> -               leaf = path->nodes[0];
> -               btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
> -
> -               cache = btrfs_create_block_group_cache(info, found_key.objectid,
> -                                                      found_key.offset);
> -               if (!cache) {
> -                       ret = -ENOMEM;
> -                       goto error;
> -               }
> -
> -               if (need_clear) {
> -                       /*
> -                        * When we mount with old space cache, we need to
> -                        * set BTRFS_DC_CLEAR and set dirty flag.
> -                        *
> -                        * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
> -                        *    truncate the old free space cache inode and
> -                        *    setup a new one.
> -                        * b) Setting 'dirty flag' makes sure that we flush
> -                        *    the new space cache info onto disk.
> -                        */
> -                       if (btrfs_test_opt(info, SPACE_CACHE))
> -                               cache->disk_cache_state = BTRFS_DC_CLEAR;
> -               }
> -
> -               read_extent_buffer(leaf, &cache->item,
> -                                  btrfs_item_ptr_offset(leaf, path->slots[0]),
> -                                  sizeof(cache->item));
> -               cache->flags = btrfs_block_group_flags(&cache->item);
> -               if (!mixed &&
> -                   ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
> -                   (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
> -                       btrfs_err(info,
> -"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
> -                                 cache->key.objectid);
> -                       ret = -EINVAL;
> +               btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
> +               ret = read_one_block_group(info, path, need_clear);
> +               if (ret < 0)
>                         goto error;
> -               }
> -
> -               key.objectid = found_key.objectid + found_key.offset;
> +               key.objectid += key.offset;
> +               key.offset = 0;
>                 btrfs_release_path(path);
> -
> -               /*
> -                * We need to exclude the super stripes now so that the space
> -                * info has super bytes accounted for, otherwise we'll think
> -                * we have more space than we actually do.
> -                */
> -               ret = exclude_super_stripes(cache);
> -               if (ret) {
> -                       /*
> -                        * We may have excluded something, so call this just in
> -                        * case.
> -                        */
> -                       btrfs_free_excluded_extents(cache);
> -                       btrfs_put_block_group(cache);
> -                       goto error;
> -               }
> -
> -               /*
> -                * Check for two cases, either we are full, and therefore
> -                * don't need to bother with the caching work since we won't
> -                * find any space, or we are empty, and we can just add all
> -                * the space in and be done with it.  This saves us _a_lot_ of
> -                * time, particularly in the full case.
> -                */
> -               if (found_key.offset == btrfs_block_group_used(&cache->item)) {
> -                       cache->last_byte_to_unpin = (u64)-1;
> -                       cache->cached = BTRFS_CACHE_FINISHED;
> -                       btrfs_free_excluded_extents(cache);
> -               } else if (btrfs_block_group_used(&cache->item) == 0) {
> -                       cache->last_byte_to_unpin = (u64)-1;
> -                       cache->cached = BTRFS_CACHE_FINISHED;
> -                       add_new_free_space(cache, found_key.objectid,
> -                                          found_key.objectid +
> -                                          found_key.offset);
> -                       btrfs_free_excluded_extents(cache);
> -               }
> -
> -               ret = btrfs_add_block_group_cache(info, cache);
> -               if (ret) {
> -                       btrfs_remove_free_space_cache(cache);
> -                       btrfs_put_block_group(cache);
> -                       goto error;
> -               }
> -
> -               trace_btrfs_add_block_group(info, cache, 0);
> -               btrfs_update_space_info(info, cache->flags, found_key.offset,
> -                                       btrfs_block_group_used(&cache->item),
> -                                       cache->bytes_super, &space_info);
> -
> -               cache->space_info = space_info;
> -
> -               link_block_group(cache);
> -
> -               set_avail_alloc_bits(info, cache->flags);
> -               if (btrfs_chunk_readonly(info, cache->key.objectid)) {
> -                       inc_block_group_ro(cache, 1);
> -               } else if (btrfs_block_group_used(&cache->item) == 0) {
> -                       ASSERT(list_empty(&cache->bg_list));
> -                       btrfs_mark_bg_unused(cache);
> -               }
>         }
>
>         list_for_each_entry_rcu(space_info, &info->space_info, list) {
> --
> 2.23.0
>
diff mbox series

Patch

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index bf7e3f23bba7..0c5eef0610fa 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1687,6 +1687,109 @@  static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
 	return ret;
 }
 
+static int read_one_block_group(struct btrfs_fs_info *info,
+				struct btrfs_path *path,
+				int need_clear)
+{
+	struct extent_buffer *leaf = path->nodes[0];
+	struct btrfs_block_group_cache *cache;
+	struct btrfs_space_info *space_info;
+	struct btrfs_key key;
+	int mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
+	int slot = path->slots[0];
+	int ret;
+
+	btrfs_item_key_to_cpu(leaf, &key, slot);
+	ASSERT(key.type == BTRFS_BLOCK_GROUP_ITEM_KEY);
+
+	cache = btrfs_create_block_group_cache(info, key.objectid,
+					       key.offset);
+	if (!cache)
+		return -ENOMEM;
+
+	if (need_clear) {
+		/*
+		 * When we mount with old space cache, we need to
+		 * set BTRFS_DC_CLEAR and set dirty flag.
+		 *
+		 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
+		 *    truncate the old free space cache inode and
+		 *    setup a new one.
+		 * b) Setting 'dirty flag' makes sure that we flush
+		 *    the new space cache info onto disk.
+		 */
+		if (btrfs_test_opt(info, SPACE_CACHE))
+			cache->disk_cache_state = BTRFS_DC_CLEAR;
+	}
+	read_extent_buffer(leaf, &cache->item,
+			   btrfs_item_ptr_offset(leaf, slot),
+			   sizeof(cache->item));
+	cache->flags = btrfs_block_group_flags(&cache->item);
+	if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
+	    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
+			btrfs_err(info,
+"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
+				  cache->key.objectid);
+			ret = -EINVAL;
+			goto error;
+	}
+
+	/*
+	 * We need to exclude the super stripes now so that the space info has
+	 * super bytes accounted for, otherwise we'll think we have more space
+	 * than we actually do.
+	 */
+	ret = exclude_super_stripes(cache);
+	if (ret) {
+		/* We may have excluded something, so call this just in case. */
+		btrfs_free_excluded_extents(cache);
+		goto error;
+	}
+
+	/*
+	 * Check for two cases, either we are full, and therefore don't need
+	 * to bother with the caching work since we won't find any space, or we
+	 * are empty, and we can just add all the space in and be done with it.
+	 * This saves us _a_lot_ of time, particularly in the full case.
+	 */
+	if (key.offset == btrfs_block_group_used(&cache->item)) {
+		cache->last_byte_to_unpin = (u64)-1;
+		cache->cached = BTRFS_CACHE_FINISHED;
+		btrfs_free_excluded_extents(cache);
+	} else if (btrfs_block_group_used(&cache->item) == 0) {
+		cache->last_byte_to_unpin = (u64)-1;
+		cache->cached = BTRFS_CACHE_FINISHED;
+		add_new_free_space(cache, key.objectid,
+				   key.objectid + key.offset);
+		btrfs_free_excluded_extents(cache);
+	}
+	ret = btrfs_add_block_group_cache(info, cache);
+	if (ret) {
+		btrfs_remove_free_space_cache(cache);
+		goto error;
+	}
+	trace_btrfs_add_block_group(info, cache, 0);
+	btrfs_update_space_info(info, cache->flags, key.offset,
+				btrfs_block_group_used(&cache->item),
+				cache->bytes_super, &space_info);
+
+	cache->space_info = space_info;
+
+	link_block_group(cache);
+
+	set_avail_alloc_bits(info, cache->flags);
+	if (btrfs_chunk_readonly(info, cache->key.objectid)) {
+		inc_block_group_ro(cache, 1);
+	} else if (btrfs_block_group_used(&cache->item) == 0) {
+		ASSERT(list_empty(&cache->bg_list));
+		btrfs_mark_bg_unused(cache);
+	}
+	return 0;
+error:
+	btrfs_put_block_group(cache);
+	return ret;
+}
+
 int btrfs_read_block_groups(struct btrfs_fs_info *info)
 {
 	struct btrfs_path *path;
@@ -1694,15 +1797,8 @@  int btrfs_read_block_groups(struct btrfs_fs_info *info)
 	struct btrfs_block_group_cache *cache;
 	struct btrfs_space_info *space_info;
 	struct btrfs_key key;
-	struct btrfs_key found_key;
-	struct extent_buffer *leaf;
 	int need_clear = 0;
 	u64 cache_gen;
-	u64 feature;
-	int mixed;
-
-	feature = btrfs_super_incompat_flags(info->super_copy);
-	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
 
 	key.objectid = 0;
 	key.offset = 0;
@@ -1726,107 +1822,13 @@  int btrfs_read_block_groups(struct btrfs_fs_info *info)
 		if (ret != 0)
 			goto error;
 
-		leaf = path->nodes[0];
-		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
-		cache = btrfs_create_block_group_cache(info, found_key.objectid,
-						       found_key.offset);
-		if (!cache) {
-			ret = -ENOMEM;
-			goto error;
-		}
-
-		if (need_clear) {
-			/*
-			 * When we mount with old space cache, we need to
-			 * set BTRFS_DC_CLEAR and set dirty flag.
-			 *
-			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
-			 *    truncate the old free space cache inode and
-			 *    setup a new one.
-			 * b) Setting 'dirty flag' makes sure that we flush
-			 *    the new space cache info onto disk.
-			 */
-			if (btrfs_test_opt(info, SPACE_CACHE))
-				cache->disk_cache_state = BTRFS_DC_CLEAR;
-		}
-
-		read_extent_buffer(leaf, &cache->item,
-				   btrfs_item_ptr_offset(leaf, path->slots[0]),
-				   sizeof(cache->item));
-		cache->flags = btrfs_block_group_flags(&cache->item);
-		if (!mixed &&
-		    ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
-		    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
-			btrfs_err(info,
-"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
-				  cache->key.objectid);
-			ret = -EINVAL;
+		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+		ret = read_one_block_group(info, path, need_clear);
+		if (ret < 0)
 			goto error;
-		}
-
-		key.objectid = found_key.objectid + found_key.offset;
+		key.objectid += key.offset;
+		key.offset = 0;
 		btrfs_release_path(path);
-
-		/*
-		 * We need to exclude the super stripes now so that the space
-		 * info has super bytes accounted for, otherwise we'll think
-		 * we have more space than we actually do.
-		 */
-		ret = exclude_super_stripes(cache);
-		if (ret) {
-			/*
-			 * We may have excluded something, so call this just in
-			 * case.
-			 */
-			btrfs_free_excluded_extents(cache);
-			btrfs_put_block_group(cache);
-			goto error;
-		}
-
-		/*
-		 * Check for two cases, either we are full, and therefore
-		 * don't need to bother with the caching work since we won't
-		 * find any space, or we are empty, and we can just add all
-		 * the space in and be done with it.  This saves us _a_lot_ of
-		 * time, particularly in the full case.
-		 */
-		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
-			cache->last_byte_to_unpin = (u64)-1;
-			cache->cached = BTRFS_CACHE_FINISHED;
-			btrfs_free_excluded_extents(cache);
-		} else if (btrfs_block_group_used(&cache->item) == 0) {
-			cache->last_byte_to_unpin = (u64)-1;
-			cache->cached = BTRFS_CACHE_FINISHED;
-			add_new_free_space(cache, found_key.objectid,
-					   found_key.objectid +
-					   found_key.offset);
-			btrfs_free_excluded_extents(cache);
-		}
-
-		ret = btrfs_add_block_group_cache(info, cache);
-		if (ret) {
-			btrfs_remove_free_space_cache(cache);
-			btrfs_put_block_group(cache);
-			goto error;
-		}
-
-		trace_btrfs_add_block_group(info, cache, 0);
-		btrfs_update_space_info(info, cache->flags, found_key.offset,
-					btrfs_block_group_used(&cache->item),
-					cache->bytes_super, &space_info);
-
-		cache->space_info = space_info;
-
-		link_block_group(cache);
-
-		set_avail_alloc_bits(info, cache->flags);
-		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
-			inc_block_group_ro(cache, 1);
-		} else if (btrfs_block_group_used(&cache->item) == 0) {
-			ASSERT(list_empty(&cache->bg_list));
-			btrfs_mark_bg_unused(cache);
-		}
 	}
 
 	list_for_each_entry_rcu(space_info, &info->space_info, list) {