diff mbox series

[v2,3/3] btrfs: remove the BUG_ON() inside extent_range_clear_dirty_for_io()

Message ID 167cb36269f6f08bf0e14d8a564ad75a62c102a8.1716421534.git.wqu@suse.com (mailing list archive)
State New
Headers show
Series btrfs: enhance function extent_range_clear_dirty_for_io() | expand

Commit Message

Qu Wenruo May 22, 2024, 11:47 p.m. UTC
Previously we have BUG_ON() inside extent_range_clear_dirty_for_io(), as
we expect all involved folios are still locked, thus no folio should be
missing.

However for extent_range_clear_dirty_for_io() itself, we can skip the
missing folio and handling the remaining ones, and return an error if
there is anything wrong.

So this patch would remove the BUG_ON() and let the caller to handle the
error.
In the caller we do not have a quick way to cleanup the error, but all
the compression routines would handle the missing folio as an error and
properly error out, so we only need to do an ASSERT() for developers,
meanwhile for non-debug build the compression routine would handle the
error correctly.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/inode.c | 26 ++++++++++++++++++++------
 1 file changed, 20 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dda47a273813..18b833e58d19 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -890,24 +890,29 @@  static inline void inode_should_defrag(struct btrfs_inode *inode,
 		btrfs_add_inode_defrag(NULL, inode, small_write);
 }
 
-static void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
+static int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
 {
 	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 	const u64 len = end + 1 - start;
-	unsigned long index = start >> PAGE_SHIFT;
 	unsigned long end_index = end >> PAGE_SHIFT;
+	int ret = 0;
 
 	/* We should not have such large range. */
 	ASSERT(len < U32_MAX);
-	while (index <= end_index) {
+	for (unsigned long index = start >> PAGE_SHIFT;
+	     index <= end_index; index++) {
 		struct folio *folio;
 
 		folio = filemap_get_folio(inode->i_mapping, index);
-		BUG_ON(IS_ERR(folio)); /* Pages should have been locked. */
+		if (IS_ERR(folio)) {
+			if (!ret)
+				ret = PTR_ERR(folio);
+			continue;
+		}
 		btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
 		folio_put(folio);
-		index++;
 	}
+	return ret;
 }
 
 /*
@@ -951,7 +956,16 @@  static void compress_file_range(struct btrfs_work *work)
 	 * Otherwise applications with the file mmap'd can wander in and change
 	 * the page contents while we are compressing them.
 	 */
-	extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+	ret = extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+
+	/*
+	 * All the folios should have been locked thus no failure.
+	 *
+	 * And even some folios are missing, btrfs_compress_folios()
+	 * would handle them correctly, so here just do an ASSERT() check for
+	 * early logic errors.
+	 */
+	ASSERT(ret == 0);
 
 	/*
 	 * We need to save i_size before now because it could change in between