diff mbox series

[v2,05/24] btrfs: extent_io: extract the btree page submission code into its own helper function

Message ID 20201113125149.140836-6-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: preparation patches for subpage support | expand

Commit Message

Qu Wenruo Nov. 13, 2020, 12:51 p.m. UTC
In btree_write_cache_pages() we have a btree page submission routine
buried deeply into a nested loop.

This patch will extract that part of code into a helper function,
submit_eb_page(), to do the same work.

Also, since submit_eb_page() now can return >0 for successful extent
buffer submission, remove the "ASSERT(ret <= 0);" line.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/extent_io.c | 116 +++++++++++++++++++++++++------------------
 1 file changed, 69 insertions(+), 47 deletions(-)

Comments

Josef Bacik Nov. 13, 2020, 7:22 p.m. UTC | #1
On 11/13/20 7:51 AM, Qu Wenruo wrote:
> In btree_write_cache_pages() we have a btree page submission routine
> buried deeply into a nested loop.
> 
> This patch will extract that part of code into a helper function,
> submit_eb_page(), to do the same work.
> 
> Also, since submit_eb_page() now can return >0 for successful extent
> buffer submission, remove the "ASSERT(ret <= 0);" line.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> Signed-off-by: David Sterba <dsterba@suse.com>

Reviewed-by: Josef Bacik <josef@toxicpanda.com>

Thanks,

Josef
David Sterba Nov. 18, 2020, 8:04 p.m. UTC | #2
On Fri, Nov 13, 2020 at 08:51:30PM +0800, Qu Wenruo wrote:
> In btree_write_cache_pages() we have a btree page submission routine
> buried deeply into a nested loop.
> 
> This patch will extract that part of code into a helper function,
> submit_eb_page(), to do the same work.
> 
> Also, since submit_eb_page() now can return >0 for successful extent
> buffer submission, remove the "ASSERT(ret <= 0);" line.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> Signed-off-by: David Sterba <dsterba@suse.com>
> ---
>  fs/btrfs/extent_io.c | 116 +++++++++++++++++++++++++------------------
>  1 file changed, 69 insertions(+), 47 deletions(-)
> 
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index caafe44542e8..fd4845b06989 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -3987,10 +3987,75 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
>  	return ret;
>  }
>  
> +/*
> + * Submit one page of one extent buffer.
> + *
> + * Will try to submit all pages of one extent buffer, thus will skip some pages
> + * if it's already submitted.

Note that this paragraph

> + *
> + * @page:	The page of one extent buffer
> + * @eb_context:	To determine if we need to submit this page. If current page
> + *		belongs to this eb, we don't need to submit.

belongs under the parameter description, I hope the example is
https://btrfs.wiki.kernel.org/index.php/Development_notes#Coding_style_preferences
instructive enough.
David Sterba Nov. 18, 2020, 8:09 p.m. UTC | #3
On Fri, Nov 13, 2020 at 08:51:30PM +0800, Qu Wenruo wrote:
> +/*
> + * Submit one page of one extent buffer.
> + *
> + * Will try to submit all pages of one extent buffer, thus will skip some pages
> + * if it's already submitted.

I think I have a deja vu, reading the above is contradictory (submit one
vs submit all) and that I replied that it should be clarified.
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index caafe44542e8..fd4845b06989 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3987,10 +3987,75 @@  static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
 	return ret;
 }
 
+/*
+ * Submit one page of one extent buffer.
+ *
+ * Will try to submit all pages of one extent buffer, thus will skip some pages
+ * if it's already submitted.
+ *
+ * @page:	The page of one extent buffer
+ * @eb_context:	To determine if we need to submit this page. If current page
+ *		belongs to this eb, we don't need to submit.
+ *
+ * Return >0 if we have submitted the extent buffer successfully.
+ * Return 0 if we don't need to do anything for the page.
+ * Return <0 for fatal error.
+ */
+static int submit_eb_page(struct page *page, struct writeback_control *wbc,
+			  struct extent_page_data *epd,
+			  struct extent_buffer **eb_context)
+{
+	struct address_space *mapping = page->mapping;
+	struct extent_buffer *eb;
+	int ret;
+
+	if (!PagePrivate(page))
+		return 0;
+
+	spin_lock(&mapping->private_lock);
+	if (!PagePrivate(page)) {
+		spin_unlock(&mapping->private_lock);
+		return 0;
+	}
+
+	eb = (struct extent_buffer *)page->private;
+
+	/*
+	 * Shouldn't happen and normally this would be a BUG_ON but no sense
+	 * in crashing the users box for something we can survive anyway.
+	 */
+	if (WARN_ON(!eb)) {
+		spin_unlock(&mapping->private_lock);
+		return 0;
+	}
+
+	if (eb == *eb_context) {
+		spin_unlock(&mapping->private_lock);
+		return 0;
+	}
+	ret = atomic_inc_not_zero(&eb->refs);
+	spin_unlock(&mapping->private_lock);
+	if (!ret)
+		return 0;
+
+	*eb_context = eb;
+
+	ret = lock_extent_buffer_for_io(eb, epd);
+	if (ret <= 0) {
+		free_extent_buffer(eb);
+		return ret;
+	}
+	ret = write_one_eb(eb, wbc, epd);
+	free_extent_buffer(eb);
+	if (ret < 0)
+		return ret;
+	return 1;
+}
+
 int btree_write_cache_pages(struct address_space *mapping,
 				   struct writeback_control *wbc)
 {
-	struct extent_buffer *eb, *prev_eb = NULL;
+	struct extent_buffer *eb_context = NULL;
 	struct extent_page_data epd = {
 		.bio = NULL,
 		.extent_locked = 0,
@@ -4036,55 +4101,13 @@  int btree_write_cache_pages(struct address_space *mapping,
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 
-			if (!PagePrivate(page))
-				continue;
-
-			spin_lock(&mapping->private_lock);
-			if (!PagePrivate(page)) {
-				spin_unlock(&mapping->private_lock);
-				continue;
-			}
-
-			eb = (struct extent_buffer *)page->private;
-
-			/*
-			 * Shouldn't happen and normally this would be a BUG_ON
-			 * but no sense in crashing the users box for something
-			 * we can survive anyway.
-			 */
-			if (WARN_ON(!eb)) {
-				spin_unlock(&mapping->private_lock);
+			ret = submit_eb_page(page, wbc, &epd, &eb_context);
+			if (ret == 0)
 				continue;
-			}
-
-			if (eb == prev_eb) {
-				spin_unlock(&mapping->private_lock);
-				continue;
-			}
-
-			ret = atomic_inc_not_zero(&eb->refs);
-			spin_unlock(&mapping->private_lock);
-			if (!ret)
-				continue;
-
-			prev_eb = eb;
-			ret = lock_extent_buffer_for_io(eb, &epd);
-			if (!ret) {
-				free_extent_buffer(eb);
-				continue;
-			} else if (ret < 0) {
-				done = 1;
-				free_extent_buffer(eb);
-				break;
-			}
-
-			ret = write_one_eb(eb, wbc, &epd);
-			if (ret) {
+			if (ret < 0) {
 				done = 1;
-				free_extent_buffer(eb);
 				break;
 			}
-			free_extent_buffer(eb);
 
 			/*
 			 * the filesystem may choose to bump up nr_to_write.
@@ -4105,7 +4128,6 @@  int btree_write_cache_pages(struct address_space *mapping,
 		index = 0;
 		goto retry;
 	}
-	ASSERT(ret <= 0);
 	if (ret < 0) {
 		end_write_bio(&epd, ret);
 		return ret;