@@ -784,6 +784,61 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
return 0;
}
+static int emulate_write_pointer(struct btrfs_block_group *cache,
+ u64 *offset_ret)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ int ret;
+ u64 length;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = cache->start + cache->length;
+ key.type = 0;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ /* We should not find the exact match */
+ if (ret <= 0) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ ret = btrfs_previous_extent_item(root, path, cache->start);
+ if (ret) {
+ if (ret == 1) {
+ ret = 0;
+ *offset_ret = 0;
+ }
+ goto out;
+ }
+
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
+
+ if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
+ length = found_key.offset;
+ else
+ length = fs_info->nodesize;
+
+ if (!(found_key.objectid >= cache->start &&
+ found_key.objectid + length <= cache->start + cache->length)) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ *offset_ret = found_key.objectid + length - cache->start;
+ ret = 0;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -798,6 +853,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
int i;
unsigned int nofs_flag;
u64 *alloc_offsets = NULL;
+ u64 emulated_offset = 0;
u32 num_sequential = 0, num_conventional = 0;
if (!btrfs_is_zoned(fs_info))
@@ -899,12 +955,16 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
}
if (num_conventional > 0) {
- /*
- * Since conventional zones do not have a write pointer, we
- * cannot determine alloc_offset from the pointer
- */
- ret = -EINVAL;
- goto out;
+ ret = emulate_write_pointer(cache, &emulated_offset);
+ if (ret || map->num_stripes == num_conventional) {
+ if (!ret)
+ cache->alloc_offset = emulated_offset;
+ else
+ btrfs_err(fs_info,
+ "zoned: failed to emulate write pointer of bg %llu",
+ cache->start);
+ goto out;
+ }
}
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
@@ -926,6 +986,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
}
out:
+ /* An extent is allocated after the write pointer */
+ if (num_conventional && emulated_offset > cache->alloc_offset) {
+ btrfs_err(fs_info,
+ "zoned: got wrong write pointer in BG %llu: %llu > %llu",
+ logical, emulated_offset, cache->alloc_offset);
+ ret = -EIO;
+ }
+
kfree(alloc_offsets);
free_extent_map(em);
Conventional zones do not have a write pointer, so we cannot use it to determine the allocation offset if a block group contains a conventional zone. But instead, we can consider the end of the last allocated extent in the block group as an allocation offset. Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> --- fs/btrfs/zoned.c | 80 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 74 insertions(+), 6 deletions(-)