diff mbox series

[v3,3/5] btrfs: extent_io: Kill the BUG_ON() in flush_write_bio()

Message ID 20190118021956.22949-4-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: Enhancement to tree block validation | expand

Commit Message

Qu Wenruo Jan. 18, 2019, 2:19 a.m. UTC
This BUG_ON() is really just a crappy way to workaround the _must_check
attribute of submit_one_bio().

Now kill the BUG_ON() and allow flush_write_bio() to return error
number.

Also add _must_check attribute to flush_write_bio(), and modify all
callers to handle the possible error returned.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent_io.c | 77 +++++++++++++++++++++++++++++++++-----------
 1 file changed, 58 insertions(+), 19 deletions(-)

Comments

David Sterba Jan. 22, 2019, 5:38 p.m. UTC | #1
On Fri, Jan 18, 2019 at 10:19:54AM +0800, Qu Wenruo wrote:
> This BUG_ON() is really just a crappy way to workaround the _must_check
> attribute of submit_one_bio().
> 
> Now kill the BUG_ON() and allow flush_write_bio() to return error
> number.
> 
> Also add _must_check attribute to flush_write_bio(), and modify all
> callers to handle the possible error returned.

Can you please spit that to several steps:

1. handle errors in submit_one_bio or pass it to the callers
   ie. drop the BUG_ON and move it to all callers

2. in all callers do
   ret = flush_write_bio(...)
   BUG_ON(ret)

So now it's one level up in the call chain and up to all callers to
handle the errors properly. The code is equivalent to the previous
state, though there are more BUG_ONs.

3. one patch per function that handles errors of flush_write_bio, ie.
   actual replacement of BUG_ON with if (ret < 0) etc

As there are several different functions, each has own things to clean
up and it's easier to review them one by one. Sometimes it's necessary
to check more callers and keeping multiple contexts in mind at once does
not work very well.

Counting all the affected functions:

lock_extent_buffer_for_io(struct extent_buffer *eb,
btree_write_cache_pages(struct address_space *mapping,
extent_write_cache_pages(struct address_space *mapping,
extent_write_full_page(struct page *page, struct writeback_control *wbc)
extent_write_locked_range(struct inode *inode, u64 start, u64 end,
extent_writepages(struct address_space *mapping,

the 3rd point would produce 6 patches.
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8a2335713a2d..a60f3ec22053 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -169,15 +169,15 @@  static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
 	return blk_status_to_errno(ret);
 }
 
-static void flush_write_bio(struct extent_page_data *epd)
+static int __must_check flush_write_bio(struct extent_page_data *epd)
 {
-	if (epd->bio) {
-		int ret;
+	int ret = 0;
 
+	if (epd->bio) {
 		ret = submit_one_bio(epd->bio, 0, 0);
-		BUG_ON(ret < 0); /* -ENOMEM */
 		epd->bio = NULL;
 	}
+	return ret;
 }
 
 int __init extent_io_init(void)
@@ -3504,13 +3504,15 @@  lock_extent_buffer_for_io(struct extent_buffer *eb,
 			  struct btrfs_fs_info *fs_info,
 			  struct extent_page_data *epd)
 {
-	int i, num_pages;
+	int i, num_pages, failed_page_nr;
 	int flush = 0;
 	int ret = 0;
 
 	if (!btrfs_try_tree_write_lock(eb)) {
+		ret = flush_write_bio(epd);
+		if (ret < 0)
+			return ret;
 		flush = 1;
-		flush_write_bio(epd);
 		btrfs_tree_lock(eb);
 	}
 
@@ -3519,7 +3521,9 @@  lock_extent_buffer_for_io(struct extent_buffer *eb,
 		if (!epd->sync_io)
 			return 0;
 		if (!flush) {
-			flush_write_bio(epd);
+			ret = flush_write_bio(epd);
+			if (ret < 0)
+				return ret;
 			flush = 1;
 		}
 		while (1) {
@@ -3560,7 +3564,11 @@  lock_extent_buffer_for_io(struct extent_buffer *eb,
 
 		if (!trylock_page(p)) {
 			if (!flush) {
-				flush_write_bio(epd);
+				ret = flush_write_bio(epd);
+				if (ret < 0) {
+					failed_page_nr = i;
+					goto err_unlock;
+				}
 				flush = 1;
 			}
 			lock_page(p);
@@ -3568,6 +3576,15 @@  lock_extent_buffer_for_io(struct extent_buffer *eb,
 	}
 
 	return ret;
+
+err_unlock:
+	/* Unlock these already locked pages */
+	for (i = 0; i < failed_page_nr; i++) {
+		struct page *p = eb->pages[i];
+
+		unlock_page(p);
+	}
+	return ret;
 }
 
 static void end_extent_buffer_writeback(struct extent_buffer *eb)
@@ -3751,6 +3768,7 @@  int btree_write_cache_pages(struct address_space *mapping,
 		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
 	};
 	int ret = 0;
+	int flush_ret;
 	int done = 0;
 	int nr_to_write_done = 0;
 	struct pagevec pvec;
@@ -3818,6 +3836,11 @@  int btree_write_cache_pages(struct address_space *mapping,
 
 			prev_eb = eb;
 			ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
+			if (ret < 0) {
+				free_extent_buffer(eb);
+				done = 1;
+				break;
+			}
 			if (!ret) {
 				free_extent_buffer(eb);
 				continue;
@@ -3850,8 +3873,10 @@  int btree_write_cache_pages(struct address_space *mapping,
 		index = 0;
 		goto retry;
 	}
-	flush_write_bio(&epd);
-	return ret;
+	flush_ret = flush_write_bio(&epd);
+	if (ret)
+		return ret;
+	return flush_ret;
 }
 
 /**
@@ -3947,7 +3972,9 @@  static int extent_write_cache_pages(struct address_space *mapping,
 			 * tmpfs file mapping
 			 */
 			if (!trylock_page(page)) {
-				flush_write_bio(epd);
+				ret = flush_write_bio(epd);
+				if (ret < 0)
+					break;
 				lock_page(page);
 			}
 
@@ -3957,8 +3984,11 @@  static int extent_write_cache_pages(struct address_space *mapping,
 			}
 
 			if (wbc->sync_mode != WB_SYNC_NONE) {
-				if (PageWriteback(page))
-					flush_write_bio(epd);
+				if (PageWriteback(page)) {
+					ret = flush_write_bio(epd);
+					if (ret < 0)
+						break;
+				}
 				wait_on_page_writeback(page);
 			}
 
@@ -4019,6 +4049,7 @@  static int extent_write_cache_pages(struct address_space *mapping,
 int extent_write_full_page(struct page *page, struct writeback_control *wbc)
 {
 	int ret;
+	int flush_ret;
 	struct extent_page_data epd = {
 		.bio = NULL,
 		.tree = &BTRFS_I(page->mapping->host)->io_tree,
@@ -4028,14 +4059,17 @@  int extent_write_full_page(struct page *page, struct writeback_control *wbc)
 
 	ret = __extent_writepage(page, wbc, &epd);
 
-	flush_write_bio(&epd);
-	return ret;
+	flush_ret = flush_write_bio(&epd);
+	if (ret)
+		return ret;
+	return flush_ret;
 }
 
 int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
 			      int mode)
 {
 	int ret = 0;
+	int flush_ret;
 	struct address_space *mapping = inode->i_mapping;
 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 	struct page *page;
@@ -4068,14 +4102,17 @@  int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
 		start += PAGE_SIZE;
 	}
 
-	flush_write_bio(&epd);
-	return ret;
+	flush_ret = flush_write_bio(&epd);
+	if (ret)
+		return ret;
+	return flush_ret;
 }
 
 int extent_writepages(struct address_space *mapping,
 		      struct writeback_control *wbc)
 {
 	int ret = 0;
+	int flush_ret;
 	struct extent_page_data epd = {
 		.bio = NULL,
 		.tree = &BTRFS_I(mapping->host)->io_tree,
@@ -4084,8 +4121,10 @@  int extent_writepages(struct address_space *mapping,
 	};
 
 	ret = extent_write_cache_pages(mapping, wbc, &epd);
-	flush_write_bio(&epd);
-	return ret;
+	flush_ret = flush_write_bio(&epd);
+	if (ret)
+		return ret;
+	return flush_ret;
 }
 
 int extent_readpages(struct address_space *mapping, struct list_head *pages,