diff mbox series

[4/4] xfs: remove xfs_page_mkwrite_iomap_ops

Message ID 20241029151214.255015-5-hch@lst.de (mailing list archive)
State New
Headers show
Series [1/4] xfs: split the page fault trace event | expand

Commit Message

Christoph Hellwig Oct. 29, 2024, 3:12 p.m. UTC
Shared the regular buffered write iomap_ops with the page fault path
and just check for the IOMAP_FAULT flag to skip delalloc punching.

This keeps the delalloc punching checks in one place, and will make it
easier to convert iomap to an iter model where the begin and end
handlers are merged into a single callback.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/xfs/xfs_file.c  |  2 +-
 fs/xfs/xfs_iomap.c | 17 ++++++++---------
 fs/xfs/xfs_iomap.h |  1 -
 3 files changed, 9 insertions(+), 11 deletions(-)

Comments

Darrick J. Wong Oct. 29, 2024, 3:56 p.m. UTC | #1
On Tue, Oct 29, 2024 at 04:12:00PM +0100, Christoph Hellwig wrote:
> Shared the regular buffered write iomap_ops with the page fault path
> and just check for the IOMAP_FAULT flag to skip delalloc punching.
> 
> This keeps the delalloc punching checks in one place, and will make it
> easier to convert iomap to an iter model where the begin and end
> handlers are merged into a single callback.

"merged into a single callback"?  What plans are these? :)

> Signed-off-by: Christoph Hellwig <hch@lst.de>

Code changes here look ok to me, so
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/xfs/xfs_file.c  |  2 +-
>  fs/xfs/xfs_iomap.c | 17 ++++++++---------
>  fs/xfs/xfs_iomap.h |  1 -
>  3 files changed, 9 insertions(+), 11 deletions(-)
> 
> diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
> index 7464d874e766..c6de6b865ef1 100644
> --- a/fs/xfs/xfs_file.c
> +++ b/fs/xfs/xfs_file.c
> @@ -1474,7 +1474,7 @@ xfs_write_fault(
>  	if (IS_DAX(inode))
>  		ret = xfs_dax_fault_locked(vmf, order, true);
>  	else
> -		ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
> +		ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops);
>  	xfs_iunlock(ip, lock_mode);
>  
>  	sb_end_pagefault(inode->i_sb);
> diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
> index 916531d9f83c..bfc5b0a4d633 100644
> --- a/fs/xfs/xfs_iomap.c
> +++ b/fs/xfs/xfs_iomap.c
> @@ -1234,6 +1234,14 @@ xfs_buffered_write_iomap_end(
>  	if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW))
>  		return 0;
>  
> +	/*
> +	 * iomap_page_mkwrite() will never fail in a way that requires delalloc
> +	 * extents that it allocated to be revoked.  Hence never try to release
> +	 * them here.
> +	 */
> +	if (flags & IOMAP_FAULT)
> +		return 0;
> +
>  	/* Nothing to do if we've written the entire delalloc extent */
>  	start_byte = iomap_last_written_block(inode, offset, written);
>  	end_byte = round_up(offset + length, i_blocksize(inode));
> @@ -1260,15 +1268,6 @@ const struct iomap_ops xfs_buffered_write_iomap_ops = {
>  	.iomap_end		= xfs_buffered_write_iomap_end,
>  };
>  
> -/*
> - * iomap_page_mkwrite() will never fail in a way that requires delalloc extents
> - * that it allocated to be revoked. Hence we do not need an .iomap_end method
> - * for this operation.
> - */
> -const struct iomap_ops xfs_page_mkwrite_iomap_ops = {
> -	.iomap_begin		= xfs_buffered_write_iomap_begin,
> -};
> -
>  static int
>  xfs_read_iomap_begin(
>  	struct inode		*inode,
> diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
> index 4da13440bae9..8347268af727 100644
> --- a/fs/xfs/xfs_iomap.h
> +++ b/fs/xfs/xfs_iomap.h
> @@ -48,7 +48,6 @@ xfs_aligned_fsb_count(
>  }
>  
>  extern const struct iomap_ops xfs_buffered_write_iomap_ops;
> -extern const struct iomap_ops xfs_page_mkwrite_iomap_ops;
>  extern const struct iomap_ops xfs_direct_write_iomap_ops;
>  extern const struct iomap_ops xfs_read_iomap_ops;
>  extern const struct iomap_ops xfs_seek_iomap_ops;
> -- 
> 2.45.2
> 
>
Christoph Hellwig Oct. 30, 2024, 4:45 a.m. UTC | #2
On Tue, Oct 29, 2024 at 08:56:49AM -0700, Darrick J. Wong wrote:
> On Tue, Oct 29, 2024 at 04:12:00PM +0100, Christoph Hellwig wrote:
> > Shared the regular buffered write iomap_ops with the page fault path
> > and just check for the IOMAP_FAULT flag to skip delalloc punching.
> > 
> > This keeps the delalloc punching checks in one place, and will make it
> > easier to convert iomap to an iter model where the begin and end
> > handlers are merged into a single callback.
> 
> "merged into a single callback"?  What plans are these? :)

Well, the good old iterator conversion originally from willy.  His
intent I think was to avoid the indirect call entirely for performance
critical calls.  I'm more interested in allowing the caller to keep
more state, and to allow nested iterations to e.g. make buffered
out of place overwrites suck less, and to maybe not do synchronous
reads one block at a time in unshare.

This was my last spin on the idea:

https://www.spinics.net/lists/linux-btrfs/msg122508.html

I can't find willy's two older takes right now.
diff mbox series

Patch

diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 7464d874e766..c6de6b865ef1 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1474,7 +1474,7 @@  xfs_write_fault(
 	if (IS_DAX(inode))
 		ret = xfs_dax_fault_locked(vmf, order, true);
 	else
-		ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
+		ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops);
 	xfs_iunlock(ip, lock_mode);
 
 	sb_end_pagefault(inode->i_sb);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 916531d9f83c..bfc5b0a4d633 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1234,6 +1234,14 @@  xfs_buffered_write_iomap_end(
 	if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW))
 		return 0;
 
+	/*
+	 * iomap_page_mkwrite() will never fail in a way that requires delalloc
+	 * extents that it allocated to be revoked.  Hence never try to release
+	 * them here.
+	 */
+	if (flags & IOMAP_FAULT)
+		return 0;
+
 	/* Nothing to do if we've written the entire delalloc extent */
 	start_byte = iomap_last_written_block(inode, offset, written);
 	end_byte = round_up(offset + length, i_blocksize(inode));
@@ -1260,15 +1268,6 @@  const struct iomap_ops xfs_buffered_write_iomap_ops = {
 	.iomap_end		= xfs_buffered_write_iomap_end,
 };
 
-/*
- * iomap_page_mkwrite() will never fail in a way that requires delalloc extents
- * that it allocated to be revoked. Hence we do not need an .iomap_end method
- * for this operation.
- */
-const struct iomap_ops xfs_page_mkwrite_iomap_ops = {
-	.iomap_begin		= xfs_buffered_write_iomap_begin,
-};
-
 static int
 xfs_read_iomap_begin(
 	struct inode		*inode,
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 4da13440bae9..8347268af727 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -48,7 +48,6 @@  xfs_aligned_fsb_count(
 }
 
 extern const struct iomap_ops xfs_buffered_write_iomap_ops;
-extern const struct iomap_ops xfs_page_mkwrite_iomap_ops;
 extern const struct iomap_ops xfs_direct_write_iomap_ops;
 extern const struct iomap_ops xfs_read_iomap_ops;
 extern const struct iomap_ops xfs_seek_iomap_ops;