diff mbox series

[06/13] iomap: move all remaining per-folio logic into xfs_writepage_map

Message ID 20231126124720.1249310-7-hch@lst.de (mailing list archive)
State New
Headers show
Series [01/13] iomap: clear the per-folio dirty bits on all writeback failures | expand

Commit Message

Christoph Hellwig Nov. 26, 2023, 12:47 p.m. UTC
Move the tracepoint and the iomap check from iomap_do_writepage into
iomap_writepage_map.  This keeps all logic in one places, and leaves
iomap_do_writepage just as the wrapper for the callback conventions of
write_cache_pages, which will go away when that is convertd to an
iterator.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/iomap/buffered-io.c | 34 +++++++++++-----------------------
 1 file changed, 11 insertions(+), 23 deletions(-)

Comments

Ritesh Harjani (IBM) Nov. 27, 2023, 7:36 a.m. UTC | #1
Christoph Hellwig <hch@lst.de> writes:

> Move the tracepoint and the iomap check from iomap_do_writepage into
> iomap_writepage_map.  This keeps all logic in one places, and leaves
> iomap_do_writepage just as the wrapper for the callback conventions of
> write_cache_pages, which will go away when that is convertd to an
                                                     ^^^ converted
> iterator.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  fs/iomap/buffered-io.c | 34 +++++++++++-----------------------
>  1 file changed, 11 insertions(+), 23 deletions(-)

Straight forward refactoring. The change looks good to me. Please feel
free to add - 

Reivewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Josef Bacik Nov. 27, 2023, 7:20 p.m. UTC | #2
The subject should read "iomap: move all remaining per-folio logic into
 iomap_writepage_map".  Thanks,

Josef
Darrick J. Wong Nov. 29, 2023, 4:50 a.m. UTC | #3
On Sun, Nov 26, 2023 at 01:47:13PM +0100, Christoph Hellwig wrote:
> Move the tracepoint and the iomap check from iomap_do_writepage into
> iomap_writepage_map.  This keeps all logic in one places, and leaves
> iomap_do_writepage just as the wrapper for the callback conventions of
> write_cache_pages, which will go away when that is convertd to an
> iterator.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

With the two fixes from Ritesh and Josef added, I think this looks like
a simple enough movement.
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/iomap/buffered-io.c | 34 +++++++++++-----------------------
>  1 file changed, 11 insertions(+), 23 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 4a5a21809b0182..5834aa46bdb8cf 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -1842,19 +1842,25 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
>   * At the end of a writeback pass, there will be a cached ioend remaining on the
>   * writepage context that the caller will need to submit.
>   */
> -static int
> -iomap_writepage_map(struct iomap_writepage_ctx *wpc,
> -		struct writeback_control *wbc, struct inode *inode,
> -		struct folio *folio, u64 end_pos)
> +static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
> +		struct writeback_control *wbc, struct folio *folio)
>  {
>  	struct iomap_folio_state *ifs = folio->private;
> +	struct inode *inode = folio->mapping->host;
>  	struct iomap_ioend *ioend, *next;
>  	unsigned len = i_blocksize(inode);
>  	unsigned nblocks = i_blocks_per_folio(inode, folio);
>  	u64 pos = folio_pos(folio);
> +	u64 end_pos = pos + folio_size(folio);
>  	int error = 0, count = 0, i;
>  	LIST_HEAD(submit_list);
>  
> +	trace_iomap_writepage(inode, pos, folio_size(folio));
> +
> +	if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
> +		folio_unlock(folio);
> +		return 0;
> +	}
>  	WARN_ON_ONCE(end_pos <= pos);
>  
>  	if (!ifs && nblocks > 1) {
> @@ -1952,28 +1958,10 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>  	return error;
>  }
>  
> -/*
> - * Write out a dirty page.
> - *
> - * For delalloc space on the page, we need to allocate space and flush it.
> - * For unwritten space on the page, we need to start the conversion to
> - * regular allocated space.
> - */
>  static int iomap_do_writepage(struct folio *folio,
>  		struct writeback_control *wbc, void *data)
>  {
> -	struct iomap_writepage_ctx *wpc = data;
> -	struct inode *inode = folio->mapping->host;
> -	u64 end_pos = folio_pos(folio) + folio_size(folio);
> -
> -	trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
> -
> -	if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
> -		folio_unlock(folio);
> -		return 0;
> -	}
> -
> -	return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
> +	return iomap_writepage_map(data, wbc, folio);
>  }
>  
>  int
> -- 
> 2.39.2
> 
>
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 4a5a21809b0182..5834aa46bdb8cf 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1842,19 +1842,25 @@  static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
  * At the end of a writeback pass, there will be a cached ioend remaining on the
  * writepage context that the caller will need to submit.
  */
-static int
-iomap_writepage_map(struct iomap_writepage_ctx *wpc,
-		struct writeback_control *wbc, struct inode *inode,
-		struct folio *folio, u64 end_pos)
+static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+		struct writeback_control *wbc, struct folio *folio)
 {
 	struct iomap_folio_state *ifs = folio->private;
+	struct inode *inode = folio->mapping->host;
 	struct iomap_ioend *ioend, *next;
 	unsigned len = i_blocksize(inode);
 	unsigned nblocks = i_blocks_per_folio(inode, folio);
 	u64 pos = folio_pos(folio);
+	u64 end_pos = pos + folio_size(folio);
 	int error = 0, count = 0, i;
 	LIST_HEAD(submit_list);
 
+	trace_iomap_writepage(inode, pos, folio_size(folio));
+
+	if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
+		folio_unlock(folio);
+		return 0;
+	}
 	WARN_ON_ONCE(end_pos <= pos);
 
 	if (!ifs && nblocks > 1) {
@@ -1952,28 +1958,10 @@  iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	return error;
 }
 
-/*
- * Write out a dirty page.
- *
- * For delalloc space on the page, we need to allocate space and flush it.
- * For unwritten space on the page, we need to start the conversion to
- * regular allocated space.
- */
 static int iomap_do_writepage(struct folio *folio,
 		struct writeback_control *wbc, void *data)
 {
-	struct iomap_writepage_ctx *wpc = data;
-	struct inode *inode = folio->mapping->host;
-	u64 end_pos = folio_pos(folio) + folio_size(folio);
-
-	trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
-
-	if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
-		folio_unlock(folio);
-		return 0;
-	}
-
-	return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
+	return iomap_writepage_map(data, wbc, folio);
 }
 
 int