diff mbox series

[11/16] btrfs: lock extents before pages - defrag

Message ID 9d2c5c3625dae4a58dfb42a387f33f1f7be0fe42.1668530684.git.rgoldwyn@suse.com (mailing list archive)
State New, archived
Headers show
Series Lock extents before pages | expand

Commit Message

Goldwyn Rodrigues Nov. 15, 2022, 6 p.m. UTC
lock and flush the range before performing defrag.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/defrag.c | 48 ++++++++++-------------------------------------
 1 file changed, 10 insertions(+), 38 deletions(-)

Comments

Josef Bacik Dec. 13, 2022, 7:08 p.m. UTC | #1
On Tue, Nov 15, 2022 at 12:00:29PM -0600, Goldwyn Rodrigues wrote:
> lock and flush the range before performing defrag.
> 
> Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>

Reviewed-by: Josef Bacik <josef@toxicpanda.com>

Thanks,

Josef
diff mbox series

Patch

diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index 0a3c261b69c9..5345c121ac46 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -719,9 +719,6 @@  static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i
 {
 	struct address_space *mapping = inode->vfs_inode.i_mapping;
 	gfp_t mask = btrfs_alloc_write_mask(mapping);
-	u64 page_start = (u64)index << PAGE_SHIFT;
-	u64 page_end = page_start + PAGE_SIZE - 1;
-	struct extent_state *cached_state = NULL;
 	struct page *page;
 	int ret;
 
@@ -751,32 +748,6 @@  static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i
 		return ERR_PTR(ret);
 	}
 
-	/* Wait for any existing ordered extent in the range */
-	while (1) {
-		struct btrfs_ordered_extent *ordered;
-
-		lock_extent(&inode->io_tree, page_start, page_end, &cached_state);
-		ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
-		unlock_extent(&inode->io_tree, page_start, page_end,
-			      &cached_state);
-		if (!ordered)
-			break;
-
-		unlock_page(page);
-		btrfs_start_ordered_extent(ordered, 1);
-		btrfs_put_ordered_extent(ordered);
-		lock_page(page);
-		/*
-		 * We unlocked the page above, so we need check if it was
-		 * released or not.
-		 */
-		if (page->mapping != mapping || !PagePrivate(page)) {
-			unlock_page(page);
-			put_page(page);
-			goto again;
-		}
-	}
-
 	/*
 	 * Now the page range has no ordered extent any more.  Read the page to
 	 * make it uptodate.
@@ -1074,6 +1045,11 @@  static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
 	if (!pages)
 		return -ENOMEM;
 
+	/* Lock the pages range */
+	btrfs_lock_and_flush_ordered_range(inode, start_index << PAGE_SHIFT,
+		    (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+		    &cached_state);
+
 	/* Prepare all pages */
 	for (i = 0; i < nr_pages; i++) {
 		pages[i] = defrag_prepare_one_page(inode, start_index + i);
@@ -1086,10 +1062,6 @@  static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
 	for (i = 0; i < nr_pages; i++)
 		wait_on_page_writeback(pages[i]);
 
-	/* Lock the pages range */
-	lock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
-		    (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
-		    &cached_state);
 	/*
 	 * Now we have a consistent view about the extent map, re-check
 	 * which range really needs to be defragged.
@@ -1101,7 +1073,7 @@  static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
 				     newer_than, do_compress, true,
 				     &target_list, last_scanned_ret);
 	if (ret < 0)
-		goto unlock_extent;
+		goto free_pages;
 
 	list_for_each_entry(entry, &target_list, list) {
 		ret = defrag_one_locked_target(inode, entry, pages, nr_pages,
@@ -1114,10 +1086,6 @@  static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
 		list_del_init(&entry->list);
 		kfree(entry);
 	}
-unlock_extent:
-	unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
-		      (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
-		      &cached_state);
 free_pages:
 	for (i = 0; i < nr_pages; i++) {
 		if (pages[i]) {
@@ -1126,6 +1094,10 @@  static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
 		}
 	}
 	kfree(pages);
+
+	unlock_extent(&inode->io_tree, start_index << PAGE_SHIFT,
+		      (last_index << PAGE_SHIFT) + PAGE_SIZE - 1,
+		      &cached_state);
 	return ret;
 }