diff mbox series

[v4,62/68] btrfs: file: make hole punch and zero range to be page aligned

Message ID 20201021062554.68132-63-wqu@suse.com
State New, archived
Headers show
Series btrfs: add basic rw support for subpage sector size | expand

Commit Message

Qu Wenruo Oct. 21, 2020, 6:25 a.m. UTC
To workaround the fact that we can't yet submit subpage write bio.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/file.c | 42 +++++++++++++++++++++---------------------
 1 file changed, 21 insertions(+), 21 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 8f44bde1d04e..6e342c466fdf 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2455,6 +2455,8 @@  static int btrfs_punch_hole_lock_range(struct inode *inode,
 				       const u64 lockend,
 				       struct extent_state **cached_state)
 {
+	ASSERT(IS_ALIGNED(lockstart, PAGE_SIZE) &&
+	       IS_ALIGNED(lockend + 1, PAGE_SIZE));
 	while (1) {
 		struct btrfs_ordered_extent *ordered;
 		int ret;
@@ -3033,12 +3035,12 @@  enum {
 static int btrfs_zero_range_check_range_boundary(struct inode *inode,
 						 u64 offset)
 {
-	const u64 sectorsize = btrfs_inode_sectorsize(inode);
+	const u32 blocksize = PAGE_SIZE;
 	struct extent_map *em;
 	int ret;
 
-	offset = round_down(offset, sectorsize);
-	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize);
+	offset = round_down(offset, blocksize);
+	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, blocksize);
 	if (IS_ERR(em))
 		return PTR_ERR(em);
 
@@ -3058,14 +3060,13 @@  static int btrfs_zero_range(struct inode *inode,
 			    loff_t len,
 			    const int mode)
 {
-	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
 	struct extent_map *em;
 	struct extent_changeset *data_reserved = NULL;
 	int ret;
+	const u32 blocksize = PAGE_SIZE;
 	u64 alloc_hint = 0;
-	const u64 sectorsize = btrfs_inode_sectorsize(inode);
-	u64 alloc_start = round_down(offset, sectorsize);
-	u64 alloc_end = round_up(offset + len, sectorsize);
+	u64 alloc_start = round_down(offset, blocksize);
+	u64 alloc_end = round_up(offset + len, blocksize);
 	u64 bytes_to_reserve = 0;
 	bool space_reserved = false;
 
@@ -3105,18 +3106,17 @@  static int btrfs_zero_range(struct inode *inode,
 		 * Part of the range is already a prealloc extent, so operate
 		 * only on the remaining part of the range.
 		 */
-		alloc_start = em_end;
-		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
+		alloc_start = round_down(em_end, blocksize);
 		len = offset + len - alloc_start;
 		offset = alloc_start;
 		alloc_hint = em->block_start + em->len;
 	}
 	free_extent_map(em);
 
-	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
-	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
+	if (round_down(offset, blocksize) ==
+	    round_down(offset + len - 1, blocksize)) {
 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
-				      sectorsize);
+				      blocksize);
 		if (IS_ERR(em)) {
 			ret = PTR_ERR(em);
 			goto out;
@@ -3128,7 +3128,7 @@  static int btrfs_zero_range(struct inode *inode,
 							   mode);
 			goto out;
 		}
-		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
+		if (len < blocksize && em->block_start != EXTENT_MAP_HOLE) {
 			free_extent_map(em);
 			ret = btrfs_truncate_block(inode, offset, len, 0);
 			if (!ret)
@@ -3138,13 +3138,13 @@  static int btrfs_zero_range(struct inode *inode,
 			return ret;
 		}
 		free_extent_map(em);
-		alloc_start = round_down(offset, sectorsize);
-		alloc_end = alloc_start + sectorsize;
+		alloc_start = round_down(offset, blocksize);
+		alloc_end = alloc_start + blocksize;
 		goto reserve_space;
 	}
 
-	alloc_start = round_up(offset, sectorsize);
-	alloc_end = round_down(offset + len, sectorsize);
+	alloc_start = round_up(offset, blocksize);
+	alloc_end = round_down(offset + len, blocksize);
 
 	/*
 	 * For unaligned ranges, check the pages at the boundaries, they might
@@ -3152,12 +3152,12 @@  static int btrfs_zero_range(struct inode *inode,
 	 * they might map to a hole, in which case we need our allocation range
 	 * to cover them.
 	 */
-	if (!IS_ALIGNED(offset, sectorsize)) {
+	if (!IS_ALIGNED(offset, blocksize)) {
 		ret = btrfs_zero_range_check_range_boundary(inode, offset);
 		if (ret < 0)
 			goto out;
 		if (ret == RANGE_BOUNDARY_HOLE) {
-			alloc_start = round_down(offset, sectorsize);
+			alloc_start = round_down(offset, blocksize);
 			ret = 0;
 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
 			ret = btrfs_truncate_block(inode, offset, 0, 0);
@@ -3168,13 +3168,13 @@  static int btrfs_zero_range(struct inode *inode,
 		}
 	}
 
-	if (!IS_ALIGNED(offset + len, sectorsize)) {
+	if (!IS_ALIGNED(offset + len, blocksize)) {
 		ret = btrfs_zero_range_check_range_boundary(inode,
 							    offset + len);
 		if (ret < 0)
 			goto out;
 		if (ret == RANGE_BOUNDARY_HOLE) {
-			alloc_end = round_up(offset + len, sectorsize);
+			alloc_end = round_up(offset + len, blocksize);
 			ret = 0;
 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
 			ret = btrfs_truncate_block(inode, offset + len, 0, 1);