diff mbox series

[v5,1/5] btrfs: make __extent_writepage_io() to write specified range only

Message ID 3505552899f398a1d8959baa8245efac7a2070b6.1716008374.git.wqu@suse.com (mailing list archive)
State New
Headers show
Series btrfs: subpage + zoned fixes | expand

Commit Message

Qu Wenruo May 18, 2024, 5:07 a.m. UTC
Function __extent_writepage_io() is designed to find all dirty range of
a page, and add that dirty range into the bio_ctrl for submission.
It requires all the dirtied range to be covered by an ordered extent.

It get called in two locations, but one call site is not subpage aware:

- __extent_writepage()
  It get called when writepage_delalloc() returned 0, which means
  writepage_delalloc() has handled dellalloc for all subpage sectors
  inside the page.

  So this call site is OK.

- extent_write_locked_range()
  This call site is utilized by zoned support, and in this case, we may
  only run delalloc range for a subset of the page, like this: (64K page
  size)

  0     16K     32K     48K     64K
  |/////|       |///////|       |

  In above case, if extent_write_locked_range() is only triggered for
  range [0, 16K), __extent_writepage_io() would still try to submit
  the dirty range of [32K, 48K), then it would not find any ordered
  extent for it and trigger various ASSERT()s.

Fix this problem by:

- Introducing @start and @len parameters to specify the range

  For the first call site, we just pass the whole page, and the behavior
  is not touched, since run_delalloc_range() for the page should have
  created all ordered extents for the page.

  For the second call site, we would avoid touching anything beyond the
  range, thus avoid the dirty range which is not yet covered by any
  delalloc range.

- Making btrfs_folio_assert_not_dirty() subpage aware
  The only caller is inside __extent_writepage_io(), and since that
  caller now accepts a subpage range, we should also check the subpage
  range other than the whole page.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent_io.c | 18 +++++++++++-------
 fs/btrfs/subpage.c   | 22 ++++++++++++++++------
 fs/btrfs/subpage.h   |  3 ++-
 3 files changed, 29 insertions(+), 14 deletions(-)

Comments

Naohiro Aota May 21, 2024, 7:23 a.m. UTC | #1
On Sat, May 18, 2024 at 02:37:39PM GMT, Qu Wenruo wrote:
> Function __extent_writepage_io() is designed to find all dirty range of
> a page, and add that dirty range into the bio_ctrl for submission.
> It requires all the dirtied range to be covered by an ordered extent.
> 
> It get called in two locations, but one call site is not subpage aware:
> 
> - __extent_writepage()
>   It get called when writepage_delalloc() returned 0, which means
>   writepage_delalloc() has handled dellalloc for all subpage sectors
>   inside the page.
> 
>   So this call site is OK.
> 
> - extent_write_locked_range()
>   This call site is utilized by zoned support, and in this case, we may
>   only run delalloc range for a subset of the page, like this: (64K page
>   size)
> 
>   0     16K     32K     48K     64K
>   |/////|       |///////|       |
> 
>   In above case, if extent_write_locked_range() is only triggered for
>   range [0, 16K), __extent_writepage_io() would still try to submit
>   the dirty range of [32K, 48K), then it would not find any ordered
>   extent for it and trigger various ASSERT()s.
> 
> Fix this problem by:
> 
> - Introducing @start and @len parameters to specify the range
> 
>   For the first call site, we just pass the whole page, and the behavior
>   is not touched, since run_delalloc_range() for the page should have
>   created all ordered extents for the page.
> 
>   For the second call site, we would avoid touching anything beyond the
>   range, thus avoid the dirty range which is not yet covered by any
>   delalloc range.
> 
> - Making btrfs_folio_assert_not_dirty() subpage aware
>   The only caller is inside __extent_writepage_io(), and since that
>   caller now accepts a subpage range, we should also check the subpage
>   range other than the whole page.
> 
> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
>  fs/btrfs/extent_io.c | 18 +++++++++++-------
>  fs/btrfs/subpage.c   | 22 ++++++++++++++++------
>  fs/btrfs/subpage.h   |  3 ++-
>  3 files changed, 29 insertions(+), 14 deletions(-)
> 
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index 597387e9f040..8a4a7d00795f 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -1339,20 +1339,23 @@ static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
>   * < 0 if there were errors (page still locked)
>   */
>  static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
> -				 struct page *page,
> +				 struct page *page, u64 start, u32 len,
>  				 struct btrfs_bio_ctrl *bio_ctrl,
>  				 loff_t i_size,
>  				 int *nr_ret)
>  {
>  	struct btrfs_fs_info *fs_info = inode->root->fs_info;
> -	u64 cur = page_offset(page);
> -	u64 end = cur + PAGE_SIZE - 1;
> +	u64 cur = start;
> +	u64 end = start + len - 1;
>  	u64 extent_offset;
>  	u64 block_start;
>  	struct extent_map *em;
>  	int ret = 0;
>  	int nr = 0;
>  
> +	ASSERT(start >= page_offset(page) &&
> +	       start + len <= page_offset(page) + PAGE_SIZE);
> +
>  	ret = btrfs_writepage_cow_fixup(page);
>  	if (ret) {
>  		/* Fixup worker will requeue */
> @@ -1441,7 +1444,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
>  		nr++;
>  	}
>  
> -	btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
> +	btrfs_folio_assert_not_dirty(fs_info, page_folio(page), start, len);
>  	*nr_ret = nr;
>  	return 0;
>  
> @@ -1499,7 +1502,8 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
>  	if (ret)
>  		goto done;
>  
> -	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
> +	ret = __extent_writepage_io(BTRFS_I(inode), page, page_offset(page),
> +				    PAGE_SIZE, bio_ctrl, i_size, &nr);
>  	if (ret == 1)
>  		return 0;
>  
> @@ -2251,8 +2255,8 @@ void extent_write_locked_range(struct inode *inode, struct page *locked_page,
>  			clear_page_dirty_for_io(page);
>  		}
>  
> -		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
> -					    i_size, &nr);
> +		ret = __extent_writepage_io(BTRFS_I(inode), page, cur, cur_len,
> +					    &bio_ctrl, i_size, &nr);
>  		if (ret == 1)
>  			goto next_page;
>  
> diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
> index 54736f6238e6..183b32f51f51 100644
> --- a/fs/btrfs/subpage.c
> +++ b/fs/btrfs/subpage.c
> @@ -703,19 +703,29 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
>   * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
>   * is cleared.
>   */
> -void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio)
> +void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
> +				  struct folio *folio, u64 start, u32 len)
>  {
> -	struct btrfs_subpage *subpage = folio_get_private(folio);
> +	struct btrfs_subpage *subpage;
> +	int start_bit;
> +	int nbits;

Can we have these as "unsigned int" to be consistent with the function
interface? But, it's not a big deal so

Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>

> +	unsigned long flags;
>  
>  	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
>  		return;
>  
> -	ASSERT(!folio_test_dirty(folio));
> -	if (!btrfs_is_subpage(fs_info, folio->mapping))
> +	if (!btrfs_is_subpage(fs_info, folio->mapping)) {
> +		ASSERT(!folio_test_dirty(folio));
>  		return;
> +	}
>  
> -	ASSERT(folio_test_private(folio) && folio_get_private(folio));
> -	ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
> +	start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
> +	nbits = len >> fs_info->sectorsize_bits;
> +	subpage = folio_get_private(folio);
> +	ASSERT(subpage);
> +	spin_lock_irqsave(&subpage->lock, flags);
> +	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
> +	spin_unlock_irqrestore(&subpage->lock, flags);
>  }
>  
>  /*
> diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
> index b6dc013b0fdc..4b363d9453af 100644
> --- a/fs/btrfs/subpage.h
> +++ b/fs/btrfs/subpage.h
> @@ -156,7 +156,8 @@ DECLARE_BTRFS_SUBPAGE_OPS(checked);
>  bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
>  					struct folio *folio, u64 start, u32 len);
>  
> -void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio);
> +void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
> +				  struct folio *folio, u64 start, u32 len);
>  void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
>  			       struct folio *folio, u64 start, u32 len);
>  void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
> -- 
> 2.45.0
>
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 597387e9f040..8a4a7d00795f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1339,20 +1339,23 @@  static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
  * < 0 if there were errors (page still locked)
  */
 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
-				 struct page *page,
+				 struct page *page, u64 start, u32 len,
 				 struct btrfs_bio_ctrl *bio_ctrl,
 				 loff_t i_size,
 				 int *nr_ret)
 {
 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
-	u64 cur = page_offset(page);
-	u64 end = cur + PAGE_SIZE - 1;
+	u64 cur = start;
+	u64 end = start + len - 1;
 	u64 extent_offset;
 	u64 block_start;
 	struct extent_map *em;
 	int ret = 0;
 	int nr = 0;
 
+	ASSERT(start >= page_offset(page) &&
+	       start + len <= page_offset(page) + PAGE_SIZE);
+
 	ret = btrfs_writepage_cow_fixup(page);
 	if (ret) {
 		/* Fixup worker will requeue */
@@ -1441,7 +1444,7 @@  static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
 		nr++;
 	}
 
-	btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
+	btrfs_folio_assert_not_dirty(fs_info, page_folio(page), start, len);
 	*nr_ret = nr;
 	return 0;
 
@@ -1499,7 +1502,8 @@  static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
 	if (ret)
 		goto done;
 
-	ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
+	ret = __extent_writepage_io(BTRFS_I(inode), page, page_offset(page),
+				    PAGE_SIZE, bio_ctrl, i_size, &nr);
 	if (ret == 1)
 		return 0;
 
@@ -2251,8 +2255,8 @@  void extent_write_locked_range(struct inode *inode, struct page *locked_page,
 			clear_page_dirty_for_io(page);
 		}
 
-		ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
-					    i_size, &nr);
+		ret = __extent_writepage_io(BTRFS_I(inode), page, cur, cur_len,
+					    &bio_ctrl, i_size, &nr);
 		if (ret == 1)
 			goto next_page;
 
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 54736f6238e6..183b32f51f51 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -703,19 +703,29 @@  IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
  * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
  * is cleared.
  */
-void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio)
+void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+				  struct folio *folio, u64 start, u32 len)
 {
-	struct btrfs_subpage *subpage = folio_get_private(folio);
+	struct btrfs_subpage *subpage;
+	int start_bit;
+	int nbits;
+	unsigned long flags;
 
 	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
 		return;
 
-	ASSERT(!folio_test_dirty(folio));
-	if (!btrfs_is_subpage(fs_info, folio->mapping))
+	if (!btrfs_is_subpage(fs_info, folio->mapping)) {
+		ASSERT(!folio_test_dirty(folio));
 		return;
+	}
 
-	ASSERT(folio_test_private(folio) && folio_get_private(folio));
-	ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
+	start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
+	nbits = len >> fs_info->sectorsize_bits;
+	subpage = folio_get_private(folio);
+	ASSERT(subpage);
+	spin_lock_irqsave(&subpage->lock, flags);
+	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
+	spin_unlock_irqrestore(&subpage->lock, flags);
 }
 
 /*
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
index b6dc013b0fdc..4b363d9453af 100644
--- a/fs/btrfs/subpage.h
+++ b/fs/btrfs/subpage.h
@@ -156,7 +156,8 @@  DECLARE_BTRFS_SUBPAGE_OPS(checked);
 bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
 					struct folio *folio, u64 start, u32 len);
 
-void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio);
+void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
+				  struct folio *folio, u64 start, u32 len);
 void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
 			       struct folio *folio, u64 start, u32 len);
 void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,