diff mbox series

[v2,03/17] btrfs: zoned: calculate free space from zone capacity

Message ID 03bf2db22301fcc6706d489dab1dc3ed6ac54a8e.1629349224.git.naohiro.aota@wdc.com (mailing list archive)
State New, archived
Headers show
Series ZNS Support for Btrfs | expand

Commit Message

Naohiro Aota Aug. 19, 2021, 12:19 p.m. UTC
Now that we introduced capacity in a block group, we need to calculate free
space using the capacity instead of the length. Thus, bytes we account
capacity - alloc_pointer as free, and account bytes [capacity, length] as
zone unusable.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 fs/btrfs/block-group.c      | 6 ++++--
 fs/btrfs/extent-tree.c      | 3 ++-
 fs/btrfs/free-space-cache.c | 8 +++++++-
 fs/btrfs/zoned.c            | 5 +++--
 4 files changed, 16 insertions(+), 6 deletions(-)

Comments

Johannes Thumshirn Aug. 24, 2021, 7:59 a.m. UTC | #1
On 19/08/2021 14:27, Naohiro Aota wrote:
> diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
> index da0eee7c9e5f..bb2536c745cd 100644
> --- a/fs/btrfs/free-space-cache.c
> +++ b/fs/btrfs/free-space-cache.c
> @@ -2539,10 +2539,15 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
>  	u64 offset = bytenr - block_group->start;
>  	u64 to_free, to_unusable;
>  	const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
> +	bool initial = (size == block_group->length);
> +

Nit: Unneeded parenthesis

Otherwise,
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
David Sterba Aug. 24, 2021, 3:27 p.m. UTC | #2
On Tue, Aug 24, 2021 at 07:59:38AM +0000, Johannes Thumshirn wrote:
> On 19/08/2021 14:27, Naohiro Aota wrote:
> > diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
> > index da0eee7c9e5f..bb2536c745cd 100644
> > --- a/fs/btrfs/free-space-cache.c
> > +++ b/fs/btrfs/free-space-cache.c
> > @@ -2539,10 +2539,15 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
> >  	u64 offset = bytenr - block_group->start;
> >  	u64 to_free, to_unusable;
> >  	const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
> > +	bool initial = (size == block_group->length);
> > +
> 
> Nit: Unneeded parenthesis

No it's actually preferred this way for clarity.
Johannes Thumshirn Aug. 24, 2021, 4:04 p.m. UTC | #3
On 24/08/2021 17:30, David Sterba wrote:
> On Tue, Aug 24, 2021 at 07:59:38AM +0000, Johannes Thumshirn wrote:
>> On 19/08/2021 14:27, Naohiro Aota wrote:
>>> diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
>>> index da0eee7c9e5f..bb2536c745cd 100644
>>> --- a/fs/btrfs/free-space-cache.c
>>> +++ b/fs/btrfs/free-space-cache.c
>>> @@ -2539,10 +2539,15 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
>>>  	u64 offset = bytenr - block_group->start;
>>>  	u64 to_free, to_unusable;
>>>  	const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
>>> +	bool initial = (size == block_group->length);
>>> +
>>
>> Nit: Unneeded parenthesis
> 
> No it's actually preferred this way for clarity.
> 

Oh good to know.
/me goes fixing another patch
diff mbox series

Patch

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index db368518d42c..de22e3c9599e 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -2486,7 +2486,8 @@  struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
 	 */
 	trace_btrfs_add_block_group(fs_info, cache, 1);
 	btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
-				cache->bytes_super, 0, &cache->space_info);
+				cache->bytes_super, cache->zone_unusable,
+				&cache->space_info);
 	btrfs_update_global_block_rsv(fs_info);
 
 	link_block_group(cache);
@@ -2601,7 +2602,8 @@  void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
 	if (!--cache->ro) {
 		if (btrfs_is_zoned(cache->fs_info)) {
 			/* Migrate zone_unusable bytes back */
-			cache->zone_unusable = cache->alloc_offset - cache->used;
+			cache->zone_unusable = (cache->alloc_offset - cache->used) +
+				(cache->length - cache->zone_capacity);
 			sinfo->bytes_zone_unusable += cache->zone_unusable;
 			sinfo->bytes_readonly -= cache->zone_unusable;
 		}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index fc3da7585fb7..8dafb61c4946 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3796,7 +3796,8 @@  static int do_allocation_zoned(struct btrfs_block_group *block_group,
 		goto out;
 	}
 
-	avail = block_group->length - block_group->alloc_offset;
+	WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
+	avail = block_group->zone_capacity - block_group->alloc_offset;
 	if (avail < num_bytes) {
 		if (ffe_ctl->max_extent_size < avail) {
 			/*
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index da0eee7c9e5f..bb2536c745cd 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2539,10 +2539,15 @@  static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
 	u64 offset = bytenr - block_group->start;
 	u64 to_free, to_unusable;
 	const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
+	bool initial = (size == block_group->length);
+
+	WARN_ON(!initial && offset + size > block_group->zone_capacity);
 
 	spin_lock(&ctl->tree_lock);
 	if (!used)
 		to_free = size;
+	else if (initial)
+		to_free = block_group->zone_capacity;
 	else if (offset >= block_group->alloc_offset)
 		to_free = size;
 	else if (offset + size <= block_group->alloc_offset)
@@ -2755,7 +2760,8 @@  void btrfs_dump_free_space(struct btrfs_block_group *block_group,
 	 */
 	if (btrfs_is_zoned(fs_info)) {
 		btrfs_info(fs_info, "free space %llu",
-			   block_group->length - block_group->alloc_offset);
+			   block_group->zone_capacity -
+			   block_group->alloc_offset);
 		return;
 	}
 
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 579fb03ba937..0eb8ea4d3542 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1265,8 +1265,9 @@  void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
 		return;
 
 	WARN_ON(cache->bytes_super != 0);
-	unusable = cache->alloc_offset - cache->used;
-	free = cache->length - cache->alloc_offset;
+	unusable = (cache->alloc_offset - cache->used) +
+		(cache->length - cache->zone_capacity);
+	free = cache->zone_capacity - cache->alloc_offset;
 
 	/* We only need ->free_space in ALLOC_SEQ block groups */
 	cache->last_byte_to_unpin = (u64)-1;