[08/12] btrfs-progs: volume: align chunk allocation to zones
diff mbox series

Message ID 20190607131751.5359-8-naohiro.aota@wdc.com
State New
Headers show
Series
  • [01/12] btrfs-progs: build: Check zoned block device support
Related show

Commit Message

Naohiro Aota June 7, 2019, 1:17 p.m. UTC
To facilitate support for zoned block devices in the extent buffer
allocation, a zoned block device chunk is always aligned to a zone of the
device. With this, the zone write pointer location simply becomes a hint to
allocate new buffers.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 volumes.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 75 insertions(+), 4 deletions(-)

Patch
diff mbox series

diff --git a/volumes.c b/volumes.c
index f6d1b1e9dc7f..64b42643390b 100644
--- a/volumes.c
+++ b/volumes.c
@@ -399,6 +399,34 @@  int btrfs_scan_one_device(int fd, const char *path,
 	return ret;
 }
 
+/* zone size is ensured to be power of 2 */
+static u64 btrfs_zone_align(struct btrfs_zone_info *zinfo, u64 val)
+{
+	if (zinfo && zinfo->zone_size)
+		return (val + zinfo->zone_size - 1) & ~(zinfo->zone_size - 1);
+	return val;
+}
+
+static bool check_dev_zone(struct btrfs_zone_info *zinfo, u64 physical,
+			   u64 num_bytes)
+{
+	u64 zone_size = zinfo->zone_size;
+	int zone_is_random;
+
+	WARN_ON(!IS_ALIGNED(num_bytes, zone_size));
+	zone_is_random = zone_is_random_write(zinfo, physical);
+
+	while (num_bytes) {
+		if (zone_is_random != zone_is_random_write(zinfo, physical))
+			return false;
+
+		physical += zone_size;
+		num_bytes -= zone_size;
+	}
+
+	return true;
+}
+
 /*
  * find_free_dev_extent_start - find free space in the specified device
  * @device:	  the device which we search the free space in
@@ -428,6 +456,7 @@  static int find_free_dev_extent_start(struct btrfs_device *device,
 	struct btrfs_root *root = device->dev_root;
 	struct btrfs_dev_extent *dev_extent;
 	struct btrfs_path *path;
+	struct btrfs_zone_info *zinfo = &device->zinfo;
 	u64 hole_size;
 	u64 max_hole_start;
 	u64 max_hole_size;
@@ -445,6 +474,7 @@  static int find_free_dev_extent_start(struct btrfs_device *device,
 	 */
 	min_search_start = max(root->fs_info->alloc_start, (u64)SZ_1M);
 	search_start = max(search_start, min_search_start);
+	search_start = btrfs_zone_align(zinfo, search_start);
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -497,6 +527,18 @@  static int find_free_dev_extent_start(struct btrfs_device *device,
 			goto next;
 
 		if (key.offset > search_start) {
+			if (zinfo && zinfo->zone_size) {
+				while (key.offset > search_start) {
+					hole_size = key.offset - search_start;
+					if (hole_size < num_bytes)
+						break;
+					if (check_dev_zone(zinfo, search_start,
+							   num_bytes))
+						break;
+					search_start += zinfo->zone_size;
+				}
+			}
+
 			hole_size = key.offset - search_start;
 
 			/*
@@ -527,7 +569,8 @@  static int find_free_dev_extent_start(struct btrfs_device *device,
 		extent_end = key.offset + btrfs_dev_extent_length(l,
 								  dev_extent);
 		if (extent_end > search_start)
-			search_start = extent_end;
+			search_start =  btrfs_zone_align(&device->zinfo,
+							 extent_end);
 next:
 		path->slots[0]++;
 		cond_resched();
@@ -539,6 +582,18 @@  next:
 	 * search_end may be smaller than search_start.
 	 */
 	if (search_end > search_start) {
+		if (zinfo && zinfo->zone_size) {
+			while (search_end > search_start) {
+				hole_size = search_end - search_start;
+				if (hole_size < num_bytes)
+					break;
+				if (check_dev_zone(zinfo, search_start,
+						   num_bytes))
+					break;
+				search_start += zinfo->zone_size;
+			}
+		}
+
 		hole_size = search_end - search_start;
 
 		if (hole_size > max_hole_size) {
@@ -582,6 +637,9 @@  int btrfs_insert_dev_extent(struct btrfs_trans_handle *trans,
 	struct extent_buffer *leaf;
 	struct btrfs_key key;
 
+	/* Align to zone for a zoned block device */
+	start = btrfs_zone_align(&device->zinfo, start);
+
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -1065,9 +1123,15 @@  int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 				    btrfs_super_stripesize(info->super_copy));
 	}
 
-	/* we don't want a chunk larger than 10% of the FS */
-	percent_max = div_factor(btrfs_super_total_bytes(info->super_copy), 1);
-	max_chunk_size = min(percent_max, max_chunk_size);
+	if (info->fs_devices->hmzoned) {
+		/* Zoned mode uses zone aligned chunks */
+		calc_size = info->fs_devices->zone_size;
+		max_chunk_size = calc_size * num_stripes;
+	} else {
+		/* we don't want a chunk larger than 10% of the FS */
+		percent_max = div_factor(btrfs_super_total_bytes(info->super_copy), 1);
+		max_chunk_size = min(percent_max, max_chunk_size);
+	}
 
 again:
 	if (chunk_bytes_by_type(type, calc_size, num_stripes, sub_stripes) >
@@ -1147,7 +1211,9 @@  again:
 	*num_bytes = chunk_bytes_by_type(type, calc_size,
 					 num_stripes, sub_stripes);
 	index = 0;
+	dev_offset = 0;
 	while(index < num_stripes) {
+		size_t zone_size = device->zinfo.zone_size;
 		struct btrfs_stripe *stripe;
 		BUG_ON(list_empty(&private_devs));
 		cur = private_devs.next;
@@ -1158,11 +1224,16 @@  again:
 		    (index == num_stripes - 1))
 			list_move_tail(&device->dev_list, dev_list);
 
+		if (device->zinfo.zone_size)
+			calc_size = device->zinfo.zone_size;
+
 		ret = btrfs_alloc_dev_extent(trans, device, key.offset,
 			     calc_size, &dev_offset);
 		if (ret < 0)
 			goto out_chunk_map;
 
+		WARN_ON(zone_size && !IS_ALIGNED(dev_offset, zone_size));
+
 		device->bytes_used += calc_size;
 		ret = btrfs_update_device(trans, device);
 		if (ret < 0)