diff mbox series

[v3,46/49] btrfs: extent_io: make lock_extent_buffer_for_io() subpage compatible

Message ID 20200930015539.48867-47-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: add partial rw support for subpage sector size | expand

Commit Message

Qu Wenruo Sept. 30, 2020, 1:55 a.m. UTC
To support subpage metadata locking, the following aspects are modified:
- Locking sequence
  For regular sectorsize, we lock extent buffer first, then lock each
  page.
  For subpage sectorsize, we can't do that anymore, but let the caller
  to lock the whole page first, then lock each extent buffer in the
  page.

- Extent io tree locking
  For subpage metadata, we also lock the range in btree io tree.
  This allow the endio function to get unmerged extent_state, so that in
  endio function we don't need to allocate memory in atomic context.
  This also follows the behavior in metadata read path.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent_io.c | 47 +++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 42 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 736bc33a0e64..be8c863f7806 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3803,6 +3803,9 @@  static void end_extent_buffer_writeback(struct extent_buffer *eb)
  * Lock extent buffer status and pages for write back.
  *
  * May try to flush write bio if we can't get the lock.
+ * For subpage extent buffer, caller is responsible to lock the page, we won't
+ * flush write bio, which can cause extent buffers in the same page submitted
+ * to different bios.
  *
  * Return  0 if the extent buffer doesn't need to be submitted.
  * (E.g. the extent buffer is not dirty)
@@ -3813,26 +3816,47 @@  static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
 			  struct extent_page_data *epd)
 {
 	struct btrfs_fs_info *fs_info = eb->fs_info;
+	struct extent_io_tree *io_tree = info_to_btree_io_tree(fs_info);
 	int i, num_pages, failed_page_nr;
+	bool extent_locked = false;
 	int flush = 0;
 	int ret = 0;
 
+	if (btrfs_is_subpage(fs_info)) {
+		/*
+		 * For subpage extent buffer write, caller is responsible to
+		 * lock the page first.
+		 */
+		ASSERT(PageLocked(eb->pages[0]));
+
+		/*
+		 * Also lock the range so that endio can always get unmerged
+		 * extent_state.
+		 */
+		ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
+		if (ret < 0)
+			goto out;
+		extent_locked = true;
+	}
+
 	if (!btrfs_try_tree_write_lock(eb)) {
 		ret = flush_write_bio(epd);
 		if (ret < 0)
-			return ret;
+			goto out;
 		flush = 1;
 		btrfs_tree_lock(eb);
 	}
 
 	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
 		btrfs_tree_unlock(eb);
-		if (!epd->sync_io)
-			return 0;
+		if (!epd->sync_io) {
+			ret = 0;
+			goto out;
+		}
 		if (!flush) {
 			ret = flush_write_bio(epd);
 			if (ret < 0)
-				return ret;
+				goto out;
 			flush = 1;
 		}
 		while (1) {
@@ -3860,11 +3884,19 @@  static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
 		ret = 1;
 	} else {
 		spin_unlock(&eb->refs_lock);
+		if (extent_locked)
+			unlock_extent(io_tree, eb->start,
+				      eb->start + eb->len - 1);
 	}
 
 	btrfs_tree_unlock(eb);
 
-	if (!ret)
+	/*
+	 * Either the tree does not need to be submitted, or we're
+	 * submitting subpage extent buffer.
+	 * Either we we don't need to lock the page(s).
+	 */
+	if (!ret || btrfs_is_subpage(fs_info))
 		return ret;
 
 	num_pages = num_extent_pages(eb);
@@ -3906,6 +3938,11 @@  static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
 				 fs_info->dirty_metadata_batch);
 	btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
 	btrfs_tree_unlock(eb);
+	/* Subpage should never reach this routine */
+	ASSERT(!btrfs_is_subpage(fs_info));
+out:
+	if (extent_locked)
+		unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
 	return ret;
 }