[3/3] btrfs: factor out reading of bg from find_frist_block_group
diff mbox series

Message ID 20200526142124.36202-4-johannes.thumshirn@wdc.com
State New
Headers show
Series
  • 3 small cleanups for find_first_block_group
Related show

Commit Message

Johannes Thumshirn May 26, 2020, 2:21 p.m. UTC
When find_first_block_group() finds a block group item in the extent-tree,
it does a lookup of the object in the extent mapping tree and does further
checks on the item.

Factor out this step from find_first_block_group() so we can further
simplify the code.

As a bonus we even get a slight decrease in size:
$ ./scripts/bloat-o-meter btrfs_old.ko btrfs.ko
add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-2503 (-2503)
Function                                     old     new   delta
btrfs_read_block_groups.cold                 462     337    -125
btrfs_read_block_groups                     4787    2409   -2378
Total: Before=2369371, After=2366868, chg -0.11%

Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
---
 fs/btrfs/block-group.c | 95 ++++++++++++++++++++++--------------------
 1 file changed, 49 insertions(+), 46 deletions(-)

Comments

Nikolay Borisov May 26, 2020, 3:33 p.m. UTC | #1
On 26.05.20 г. 17:21 ч., Johannes Thumshirn wrote:
> When find_first_block_group() finds a block group item in the extent-tree,
> it does a lookup of the object in the extent mapping tree and does further
> checks on the item.
> 
> Factor out this step from find_first_block_group() so we can further
> simplify the code.
> 
> As a bonus we even get a slight decrease in size:
> $ ./scripts/bloat-o-meter btrfs_old.ko btrfs.ko
> add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-2503 (-2503)
> Function                                     old     new   delta
> btrfs_read_block_groups.cold                 462     337    -125
> btrfs_read_block_groups                     4787    2409   -2378
> Total: Before=2369371, After=2366868, chg -0.11%

Always be careful of such results since this is likely due to less
inlining. The bulk of the size overhead is likely in the new function
and now those have been replaced by a 'call' and a function prologue
etc. So this doesn't always mean better performance is all I'm trying to
say ;)

> 
> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>

Reviewed-by: Nikolay Borisov <nborisov@suse.com>

Although look down for one minor discussion point.

> ---
>  fs/btrfs/block-group.c | 95 ++++++++++++++++++++++--------------------
>  1 file changed, 49 insertions(+), 46 deletions(-)
> 
> diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
> index c4462e4c8413..3d9e0ee1d1be 100644
> --- a/fs/btrfs/block-group.c
> +++ b/fs/btrfs/block-group.c
> @@ -1522,6 +1522,52 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
>  	spin_unlock(&fs_info->unused_bgs_lock);
>  }
>  
> +static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
> +			   struct extent_buffer *leaf, int slot)

nit: I wonder if instead of passing leaf/slot it'll be better to pass
btrfs_path, since that function always refers to nodes/slots [0]. My gut
feeling is this provides better interface abstraction, but this is
effectively a private, function so I might be overthinking it.

<snip>
Johannes Thumshirn May 27, 2020, 7:46 a.m. UTC | #2
On 26/05/2020 17:33, Nikolay Borisov wrote:
[...]
> Always be careful of such results since this is likely due to less
> inlining. The bulk of the size overhead is likely in the new function
> and now those have been replaced by a 'call' and a function prologue
> etc. So this doesn't always mean better performance is all I'm trying to
> say ;)

Yep I know, I've just included it as a small "bonus" but my point was the 
increased readability.


>> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> 
> Reviewed-by: Nikolay Borisov <nborisov@suse.com>
> 
> Although look down for one minor discussion point.

[...]

>> +static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
>> +			   struct extent_buffer *leaf, int slot)
> 
> nit: I wonder if instead of passing leaf/slot it'll be better to pass
> btrfs_path, since that function always refers to nodes/slots [0]. My gut
> feeling is this provides better interface abstraction, but this is
> effectively a private, function so I might be overthinking it.

Agreed, I'll add it in a v2.

Patch
diff mbox series

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index c4462e4c8413..3d9e0ee1d1be 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1522,6 +1522,52 @@  void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
 	spin_unlock(&fs_info->unused_bgs_lock);
 }
 
+static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
+			   struct extent_buffer *leaf, int slot)
+{
+	struct extent_map_tree *em_tree;
+	struct extent_map *em;
+	struct btrfs_block_group_item bg;
+	u64 flags;
+	int ret = 0;
+
+	em_tree = &fs_info->mapping_tree;
+	read_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, key->objectid, key->offset);
+	read_unlock(&em_tree->lock);
+	if (!em) {
+		btrfs_err(fs_info,
+			  "logical %llu len %llu found bg but no related chunk",
+			  key->objectid, key->offset);
+		return -ENOENT;
+	}
+
+	if (em->start != key->objectid || em->len != key->offset) {
+		btrfs_err(fs_info,
+			  "block group %llu len %llu mismatch with chunk %llu len %llu",
+			  key->objectid, key->offset, em->start, em->len);
+		ret = -EUCLEAN;
+		goto out_free_em;
+	}
+
+	read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
+			   sizeof(bg));
+	flags = btrfs_stack_block_group_flags(&bg) &
+		BTRFS_BLOCK_GROUP_TYPE_MASK;
+
+	if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+		btrfs_err(fs_info,
+			  "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
+			  key->objectid, key->offset, flags,
+			  (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type));
+		ret = -EUCLEAN;
+	}
+
+out_free_em:
+	free_extent_map(em);
+	return ret;
+}
+
 static int find_first_block_group(struct btrfs_fs_info *fs_info,
 				  struct btrfs_path *path,
 				  struct btrfs_key *key)
@@ -1530,8 +1576,6 @@  static int find_first_block_group(struct btrfs_fs_info *fs_info,
 	int ret;
 	struct btrfs_key found_key;
 	struct extent_buffer *leaf;
-	struct btrfs_block_group_item bg;
-	u64 flags;
 	int slot;
 
 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
@@ -1552,50 +1596,9 @@  static int find_first_block_group(struct btrfs_fs_info *fs_info,
 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
 		if (found_key.objectid >= key->objectid &&
-		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
-			struct extent_map_tree *em_tree;
-			struct extent_map *em;
-
-			em_tree = &fs_info->mapping_tree;
-			read_lock(&em_tree->lock);
-			em = lookup_extent_mapping(em_tree, found_key.objectid,
-						   found_key.offset);
-			read_unlock(&em_tree->lock);
-			if (!em) {
-				btrfs_err(fs_info,
-			"logical %llu len %llu found bg but no related chunk",
-					  found_key.objectid, found_key.offset);
-				ret = -ENOENT;
-			} else if (em->start != found_key.objectid ||
-				   em->len != found_key.offset) {
-				btrfs_err(fs_info,
-		"block group %llu len %llu mismatch with chunk %llu len %llu",
-					  found_key.objectid, found_key.offset,
-					  em->start, em->len);
-				ret = -EUCLEAN;
-			} else {
-				read_extent_buffer(leaf, &bg,
-					btrfs_item_ptr_offset(leaf, slot),
-					sizeof(bg));
-				flags = btrfs_stack_block_group_flags(&bg) &
-					BTRFS_BLOCK_GROUP_TYPE_MASK;
-
-				if (flags != (em->map_lookup->type &
-					      BTRFS_BLOCK_GROUP_TYPE_MASK)) {
-					btrfs_err(fs_info,
-"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
-						found_key.objectid,
-						found_key.offset, flags,
-						(BTRFS_BLOCK_GROUP_TYPE_MASK &
-						 em->map_lookup->type));
-					ret = -EUCLEAN;
-				} else {
-					ret = 0;
-				}
-			}
-			free_extent_map(em);
-			return ret;
-		}
+		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY)
+			return read_bg_from_eb(fs_info, &found_key, leaf, slot);
+
 		path->slots[0]++;
 	}