diff mbox series

[19/21] btrfs: lock extents before pages in relocation

Message ID 8864239884312377b62a36bfa65f1f8f66351855.1677793433.git.rgoldwyn@suse.com (mailing list archive)
State New, archived
Headers show
Series Lock extents before pages | expand

Commit Message

Goldwyn Rodrigues March 2, 2023, 10:25 p.m. UTC
From: Goldwyn Rodrigues <rgoldwyn@suse.com>

While relocating extents, lock the extents first. The locking is
performed before setup_relocation_extent() and unlocked after all pages
have been set as dirty.

All allocation is consolidated into one call to reserve metadata. Call
balance dirty pages outside of locks.

Q: This rearranges the sequence of calls. Not sure if this is correct.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/relocation.c | 44 +++++++++++++++++++------------------------
 1 file changed, 19 insertions(+), 25 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ef13a9d4e370..f15e9b1bfc45 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2911,7 +2911,6 @@  static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod
 				u64 start, u64 end, u64 block_start)
 {
 	struct extent_map *em;
-	struct extent_state *cached_state = NULL;
 	int ret = 0;
 
 	em = alloc_extent_map();
@@ -2924,9 +2923,7 @@  static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod
 	em->block_start = block_start;
 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
 
-	lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
 	ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
-	unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
 	free_extent_map(em);
 
 	return ret;
@@ -2971,8 +2968,6 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 	ASSERT(page_index <= last_index);
 	page = find_lock_page(inode->i_mapping, page_index);
 	if (!page) {
-		page_cache_sync_readahead(inode->i_mapping, ra, NULL,
-				page_index, last_index + 1 - page_index);
 		page = find_or_create_page(inode->i_mapping, page_index, mask);
 		if (!page)
 			return -ENOMEM;
@@ -2981,11 +2976,6 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 	if (ret < 0)
 		goto release_page;
 
-	if (PageReadahead(page))
-		page_cache_async_readahead(inode->i_mapping, ra, NULL,
-				page_folio(page), page_index,
-				last_index + 1 - page_index);
-
 	if (!PageUptodate(page)) {
 		btrfs_read_folio(NULL, page_folio(page));
 		lock_page(page);
@@ -3012,16 +3002,7 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 		u64 clamped_end = min(page_end, extent_end);
 		u32 clamped_len = clamped_end + 1 - clamped_start;
 
-		/* Reserve metadata for this range */
-		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
-						      clamped_len, clamped_len,
-						      false);
-		if (ret)
-			goto release_page;
-
 		/* Mark the range delalloc and dirty for later writeback */
-		lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
-			    &cached_state);
 		ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
 						clamped_end, 0, &cached_state);
 		if (ret) {
@@ -3055,9 +3036,6 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 					boundary_start, boundary_end,
 					EXTENT_BOUNDARY);
 		}
-		unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
-			      &cached_state);
-		btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
 		cur += clamped_len;
 
 		/* Crossed extent end, go to next extent */
@@ -3071,7 +3049,6 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 	unlock_page(page);
 	put_page(page);
 
-	balance_dirty_pages_ratelimited(inode->i_mapping);
 	btrfs_throttle(fs_info);
 	if (btrfs_should_cancel_balance(fs_info))
 		ret = -ECANCELED;
@@ -3092,6 +3069,10 @@  static int relocate_file_extent_cluster(struct inode *inode,
 	struct file_ra_state *ra;
 	int cluster_nr = 0;
 	int ret = 0;
+	u64 start = cluster->start - offset;
+	u64 end = cluster->end - offset;
+	loff_t len = end + 1 - start;
+	struct extent_state *cached_state = NULL;
 
 	if (!cluster->nr)
 		return 0;
@@ -3106,17 +3087,30 @@  static int relocate_file_extent_cluster(struct inode *inode,
 
 	file_ra_state_init(ra, inode->i_mapping);
 
-	ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
-				   cluster->end - offset, cluster->start);
+	page_cache_sync_readahead(inode->i_mapping, ra, NULL,
+			start >> PAGE_SHIFT, len >> PAGE_SHIFT);
+
+	ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len , false);
 	if (ret)
 		goto out;
 
+	lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
+
+	ret = setup_relocation_extent_mapping(inode, start, end, cluster->start);
+	if (ret)
+		goto unlock;
+
 	last_index = (cluster->end - offset) >> PAGE_SHIFT;
 	for (index = (cluster->start - offset) >> PAGE_SHIFT;
 	     index <= last_index && !ret; index++)
 		ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
 	if (ret == 0)
 		WARN_ON(cluster_nr != cluster->nr);
+unlock:
+	unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
+	btrfs_delalloc_release_extents(BTRFS_I(inode), len);
+
+	balance_dirty_pages_ratelimited(inode->i_mapping);
 out:
 	kfree(ra);
 	return ret;