@@ -2697,15 +2697,17 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
u64 offset = bytenr - block_group->start;
u64 to_free, to_unusable;
int bg_reclaim_threshold = 0;
- bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
+ bool initial = false;
u64 reclaimable_unusable;
- WARN_ON(!initial && offset + size > block_group->zone_capacity);
+ spin_lock(&block_group->lock);
+ if (size == block_group->length && block_group->alloc_offset == 0)
+ initial = true;
+ WARN_ON(!initial && offset + size > block_group->zone_capacity);
if (!initial)
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
- spin_lock(&ctl->tree_lock);
if (!used)
to_free = size;
else if (initial)
@@ -2718,7 +2720,9 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
to_free = offset + size - block_group->alloc_offset;
to_unusable = size - to_free;
+ spin_lock(&ctl->tree_lock);
ctl->free_space += to_free;
+ spin_unlock(&ctl->tree_lock);
/*
* If the block group is read-only, we should account freed space into
* bytes_readonly.
@@ -2727,11 +2731,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
block_group->zone_unusable += to_unusable;
WARN_ON(block_group->zone_unusable > block_group->length);
}
- spin_unlock(&ctl->tree_lock);
if (!used) {
- spin_lock(&block_group->lock);
block_group->alloc_offset -= size;
- spin_unlock(&block_group->lock);
}
reclaimable_unusable = block_group->zone_unusable -
@@ -2745,6 +2746,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
btrfs_mark_bg_to_reclaim(block_group);
}
+ spin_unlock(&block_group->lock);
+
return 0;
}
__btrfs_add_free_space_zoned() references and modifies BG's alloc_offset, ro, and zone_unusable, but without taking the lock. It is mostly safe because they monotonically increase (at least for now) and this function is mostly called by a transaction commit, which is serialized by itself. Still, taking the lock is a safer and correct option and I'm going to add a change to reset zone_unusable while a block group is still alive. So, add locking around the operations. Fixes: 169e0da91a21 ("btrfs: zoned: track unusable bytes for zones") CC: stable@vger.kernel.org # 5.15+ Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> --- v3: - Rewrite the initial freeing detection with "if" v2: - Use plain spin_lock()s instead of guard()s - Drop unnecessary empty line --- fs/btrfs/free-space-cache.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-)