diff mbox series

[RFC,22/31] btrfs: Set extents delalloc in iomap_end

Message ID ce5ebbefe63ebdc909fa596a174f870120657472.1623567940.git.rgoldwyn@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs buffered iomap support | expand

Commit Message

Goldwyn Rodrigues June 13, 2021, 1:39 p.m. UTC
From: Goldwyn Rodrigues <rgoldwyn@suse.com>

Since the new code path would not be calling btrfs_dirty_pages(),
set extent delalloc for the extent just written.
In order to make the flow easier, modify btrfs_buffered_iomap_end() to
use written_block_end and block_end to calculate respective written and
length sectorsize boundaries.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/file.c | 58 +++++++++++++++++++++++--------------------------
 1 file changed, 27 insertions(+), 31 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index ab2b1790e0bb..d311b01a2b71 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1682,44 +1682,38 @@  static int btrfs_buffered_iomap_end(struct inode *inode, loff_t pos,
 		loff_t length, ssize_t written, struct btrfs_iomap *bi)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-	size_t num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bi->reserved_bytes);
-	size_t dirty_sectors = 0;
-	int dirty_pages = 0;
-	int sector_offset = pos & (fs_info->sectorsize - 1);
-
-	if (written) {
-		dirty_sectors = round_up(written + sector_offset,
-				fs_info->sectorsize);
-		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
-		dirty_pages = DIV_ROUND_UP(written + offset_in_page(pos),
-				PAGE_SIZE);
-	}
+	int ret = 0;
+	size_t release_bytes = 0;
+	u64 start = round_down(pos, fs_info->sectorsize);
+	u64 written_block_end = round_up(pos + written, fs_info->sectorsize) - 1;
+	u64 block_end = round_up(pos + length, fs_info->sectorsize) - 1;
+        int extra_bits = 0;
 
-	/* Release excess reservations */
-	if (num_sectors > dirty_sectors) {
-		size_t release_bytes = bi->reserved_bytes -
-			(dirty_sectors << fs_info->sb->s_blocksize_bits);
-		if (bi->metadata_only) {
-			btrfs_delalloc_release_metadata(BTRFS_I(inode),
-					release_bytes, true);
-		} else {
-			u64 p;
+	if (written == 0)
+		release_bytes = bi->reserved_bytes;
+	else if (written < length)
+		release_bytes = block_end - written_block_end + 1;
 
-			p = round_down(pos,
-					fs_info->sectorsize) +
-				(dirty_pages << PAGE_SHIFT);
-			btrfs_delalloc_release_space(BTRFS_I(inode),
-					bi->data_reserved, p,
-					release_bytes, true);
-		}
-	}
+	if (bi->metadata_only)
+		extra_bits |= EXTENT_NORESERVE;
+
+	clear_extent_bit(&BTRFS_I(inode)->io_tree, start, written_block_end,
+			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+			 0, 0, &bi->cached_state);
+
+	ret = btrfs_set_extent_delalloc(BTRFS_I(inode), start,
+			written_block_end, extra_bits, &bi->cached_state);
+
+	/* In case of error, release everything in btrfs_iomap_release() */
+	if (ret < 0)
+		release_bytes = bi->reserved_bytes;
 
 	/*
 	 * If we have not locked the extent range, because the range's
 	 * start offset is >= i_size, we might still have a non-NULL
 	 * cached extent state, acquired while marking the extent range
-	 * as delalloc through btrfs_dirty_pages(). Therefore free any
-	 * possible cached extent state to avoid a memory leak.
+	 * as delalloc. Therefore free any possible cached extent state
+	 * to avoid a memory leak.
 	 */
 	if (bi->extents_locked)
 		unlock_extent_cached(&BTRFS_I(inode)->io_tree,
@@ -1732,6 +1726,8 @@  static int btrfs_buffered_iomap_end(struct inode *inode, loff_t pos,
 	if (bi->metadata_only)
 		btrfs_check_nocow_unlock(BTRFS_I(inode));
 
+	btrfs_iomap_release(inode, pos, release_bytes, bi);
+
 	return 0;
 }