diff mbox series

[v3,42/49] btrfs: extent_io: make set_extent_buffer_dirty() to support subpage sized metadata

Message ID 20200930015539.48867-43-wqu@suse.com
State New, archived
Headers show
Series btrfs: add partial rw support for subpage sector size | expand

Commit Message

Qu Wenruo Sept. 30, 2020, 1:55 a.m. UTC
For set_extent_buffer_dirty() to support subpage sized metadata, we only
need to call set_extent_dirty().

As any dirty extent buffer in the page would make the whole page dirty,
we can re-use the existing routine without problem, just need to add
above call of set_extent_buffer_dirty().

Now since a page is dirty if any extent buffer in it is dirty, the
WARN_ON() in alloc_extent_buffer() can be falsely triggered, also update
the WARN_ON(PageDirty()) check into assert_eb_range_not_dirty() to
support subpage case.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent_io.c | 35 ++++++++++++++++++++++++++++++++++-
 1 file changed, 34 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d9a05979396d..ae7ab7364115 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5354,6 +5354,22 @@  struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
 }
 #endif
 
+static void assert_eb_range_not_dirty(struct extent_buffer *eb,
+				      struct page *page)
+{
+	struct btrfs_fs_info *fs_info = eb->fs_info;
+
+	if (btrfs_is_subpage(fs_info) && page->mapping) {
+		struct extent_io_tree *io_tree = info_to_btree_io_tree(fs_info);
+
+		WARN_ON(test_range_bit(io_tree, eb->start,
+				eb->start + eb->len - 1, EXTENT_DIRTY, 0,
+				NULL));
+	} else {
+		WARN_ON(PageDirty(page));
+	}
+}
+
 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 					  u64 start)
 {
@@ -5426,12 +5442,13 @@  struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 			 * drop the ref the old guy had.
 			 */
 			ClearPagePrivate(p);
+			assert_eb_range_not_dirty(eb, p);
 			WARN_ON(PageDirty(p));
 			put_page(p);
 		}
 		attach_extent_buffer_page(eb, p);
 		spin_unlock(&mapping->private_lock);
-		WARN_ON(PageDirty(p));
+		assert_eb_range_not_dirty(eb, p);
 		eb->pages[i] = p;
 		if (!PageUptodate(p))
 			uptodate = 0;
@@ -5651,6 +5668,22 @@  bool set_extent_buffer_dirty(struct extent_buffer *eb)
 		for (i = 0; i < num_pages; i++)
 			set_page_dirty(eb->pages[i]);
 
+	/*
+	 * For subpage size, also set the sector aligned EXTENT_DIRTY range for
+	 * btree io tree
+	 */
+	if (btrfs_is_subpage(eb->fs_info)) {
+		struct extent_io_tree *io_tree =
+			info_to_btree_io_tree(eb->fs_info);
+
+		/*
+		 * set_extent_buffer_dirty() can be called with
+		 * path->leave_spinning == 1, in that case we can't sleep.
+		 */
+		set_extent_dirty(io_tree, eb->start, eb->start + eb->len - 1,
+				 GFP_ATOMIC);
+	}
+
 #ifdef CONFIG_BTRFS_DEBUG
 	for (i = 0; i < num_pages; i++)
 		ASSERT(PageDirty(eb->pages[i]));