diff mbox series

[10/16] btrfs: decide early if range should be async

Message ID a822dc88a07268a9472823a77f17ee2384408e81.1668530684.git.rgoldwyn@suse.com (mailing list archive)
State New, archived
Headers show
Series Lock extents before pages | expand

Commit Message

Goldwyn Rodrigues Nov. 15, 2022, 6 p.m. UTC
This sets the async bit early in the writeback and uses it to decide if
it should write asynchronously.

Since there could be missing pages, check if page is NULL while
performing heuristics.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/compression.c |  4 ++++
 fs/btrfs/inode.c       | 11 +++++++----
 2 files changed, 11 insertions(+), 4 deletions(-)

Comments

Josef Bacik Dec. 13, 2022, 7:07 p.m. UTC | #1
On Tue, Nov 15, 2022 at 12:00:28PM -0600, Goldwyn Rodrigues wrote:
> This sets the async bit early in the writeback and uses it to decide if
> it should write asynchronously.
> 
> Since there could be missing pages, check if page is NULL while
> performing heuristics.
> 
> Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>


Ooooh ok this makes the comment I had previously not matter, so let's re-arrange
and put this change first so that it's not confusing.  Thanks,

Josef
diff mbox series

Patch

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 7f6452c4234e..09b846a516ed 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1610,6 +1610,10 @@  static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
 	curr_sample_pos = 0;
 	while (index < index_end) {
 		page = find_get_page(inode->i_mapping, index);
+		if (!page) {
+			index++;
+			continue;
+		}
 		in_data = kmap_local_page(page);
 		/* Handle case where the start is not aligned to PAGE_SIZE */
 		i = start % PAGE_SIZE;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index aa393219019b..070fb7071e39 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2264,8 +2264,7 @@  int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
 		ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
 		ret = run_delalloc_nocow(inode, locked_page, start, end,
 					 page_started, nr_written);
-	} else if (!btrfs_inode_can_compress(inode) ||
-		   !inode_need_compress(inode, start, end)) {
+	} else if (!test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags)) {
 		if (zoned)
 			ret = run_delalloc_zoned(inode, locked_page, start, end,
 						 page_started, nr_written);
@@ -2273,7 +2272,6 @@  int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
 			ret = cow_file_range(inode, locked_page, start, end,
 					     page_started, nr_written, 1, NULL);
 	} else {
-		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
 		ret = cow_file_range_async(inode, wbc, locked_page, start, end,
 					   page_started, nr_written);
 	}
@@ -8257,9 +8255,14 @@  static int btrfs_writepages(struct address_space *mapping,
 	 */
 	async_wb = btrfs_inode_can_compress(inode) &&
 		   inode_need_compress(inode, start, end);
-	if (!async_wb)
+
+	if (async_wb)
+		set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
+	else
 		lock_extent(&inode->io_tree, start, end, &cached);
+
 	ret = extent_writepages(mapping, wbc);
+
 	if (!async_wb)
 		unlock_extent(&inode->io_tree, start, end, &cached);