diff mbox series

[4/9] xfs: use byte ranges for write cleanup ranges

Message ID 20221115013043.360610-5-david@fromorbit.com (mailing list archive)
State Superseded, archived
Headers show
Series xfs, iomap: fix data corrupton due to stale cached iomaps | expand

Commit Message

Dave Chinner Nov. 15, 2022, 1:30 a.m. UTC
From: Dave Chinner <dchinner@redhat.com>

xfs_buffered_write_iomap_end() currently converts the byte ranges
passed to it to filesystem blocks to pass them to the bmap code to
punch out delalloc blocks, but then has to convert filesytem
blocks back to byte ranges for page cache truncate.

We're about to make the page cache truncate go away and replace it
with a page cache walk, so having to convert everything to/from/to
filesystem blocks is messy and error-prone. It is much easier to
pass around byte ranges and convert to page indexes and/or
filesystem blocks only where those units are needed.

In preparation for the page cache walk being added, add a helper
that converts byte ranges to filesystem blocks and calls
xfs_bmap_punch_delalloc_range() and convert
xfs_buffered_write_iomap_end() to calculate limits in byte ranges.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
---
 fs/xfs/xfs_iomap.c | 40 +++++++++++++++++++++++++---------------
 1 file changed, 25 insertions(+), 15 deletions(-)

Comments

Christoph Hellwig Nov. 15, 2022, 8:42 a.m. UTC | #1
On Tue, Nov 15, 2022 at 12:30:38PM +1100, Dave Chinner wrote:
> +static int
> +xfs_buffered_write_delalloc_punch(
> +	struct inode		*inode,
> +	loff_t			start_byte,
> +	loff_t			end_byte)
> +{
> +	struct xfs_mount	*mp = XFS_M(inode->i_sb);
> +	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
> +	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
> +
> +	return xfs_bmap_punch_delalloc_range(XFS_I(inode), start_fsb,
> +				end_fsb - start_fsb);
> +}

What is the reason to not switch xfs_bmap_punch_delalloc_range to
just take the byte range as discussed last round?
Darrick J. Wong Nov. 15, 2022, 11:57 p.m. UTC | #2
On Tue, Nov 15, 2022 at 12:30:38PM +1100, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> xfs_buffered_write_iomap_end() currently converts the byte ranges
> passed to it to filesystem blocks to pass them to the bmap code to
> punch out delalloc blocks, but then has to convert filesytem
> blocks back to byte ranges for page cache truncate.
> 
> We're about to make the page cache truncate go away and replace it
> with a page cache walk, so having to convert everything to/from/to
> filesystem blocks is messy and error-prone. It is much easier to
> pass around byte ranges and convert to page indexes and/or
> filesystem blocks only where those units are needed.
> 
> In preparation for the page cache walk being added, add a helper
> that converts byte ranges to filesystem blocks and calls
> xfs_bmap_punch_delalloc_range() and convert
> xfs_buffered_write_iomap_end() to calculate limits in byte ranges.
> 
> Signed-off-by: Dave Chinner <dchinner@redhat.com>

Looks ok,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/xfs/xfs_iomap.c | 40 +++++++++++++++++++++++++---------------
>  1 file changed, 25 insertions(+), 15 deletions(-)
> 
> diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
> index a2e45ea1b0cb..7bb55dbc19d3 100644
> --- a/fs/xfs/xfs_iomap.c
> +++ b/fs/xfs/xfs_iomap.c
> @@ -1120,6 +1120,20 @@ xfs_buffered_write_iomap_begin(
>  	return error;
>  }
>  
> +static int
> +xfs_buffered_write_delalloc_punch(
> +	struct inode		*inode,
> +	loff_t			start_byte,
> +	loff_t			end_byte)
> +{
> +	struct xfs_mount	*mp = XFS_M(inode->i_sb);
> +	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
> +	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
> +
> +	return xfs_bmap_punch_delalloc_range(XFS_I(inode), start_fsb,
> +				end_fsb - start_fsb);
> +}
> +
>  static int
>  xfs_buffered_write_iomap_end(
>  	struct inode		*inode,
> @@ -1129,10 +1143,9 @@ xfs_buffered_write_iomap_end(
>  	unsigned		flags,
>  	struct iomap		*iomap)
>  {
> -	struct xfs_inode	*ip = XFS_I(inode);
> -	struct xfs_mount	*mp = ip->i_mount;
> -	xfs_fileoff_t		start_fsb;
> -	xfs_fileoff_t		end_fsb;
> +	struct xfs_mount	*mp = XFS_M(inode->i_sb);
> +	loff_t			start_byte;
> +	loff_t			end_byte;
>  	int			error = 0;
>  
>  	if (iomap->type != IOMAP_DELALLOC)
> @@ -1157,13 +1170,13 @@ xfs_buffered_write_iomap_end(
>  	 * the range.
>  	 */
>  	if (unlikely(!written))
> -		start_fsb = XFS_B_TO_FSBT(mp, offset);
> +		start_byte = round_down(offset, mp->m_sb.sb_blocksize);
>  	else
> -		start_fsb = XFS_B_TO_FSB(mp, offset + written);
> -	end_fsb = XFS_B_TO_FSB(mp, offset + length);
> +		start_byte = round_up(offset + written, mp->m_sb.sb_blocksize);
> +	end_byte = round_up(offset + length, mp->m_sb.sb_blocksize);
>  
>  	/* Nothing to do if we've written the entire delalloc extent */
> -	if (start_fsb >= end_fsb)
> +	if (start_byte >= end_byte)
>  		return 0;
>  
>  	/*
> @@ -1173,15 +1186,12 @@ xfs_buffered_write_iomap_end(
>  	 * leave dirty pages with no space reservation in the cache.
>  	 */
>  	filemap_invalidate_lock(inode->i_mapping);
> -	truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
> -				 XFS_FSB_TO_B(mp, end_fsb) - 1);
> -
> -	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
> -				       end_fsb - start_fsb);
> +	truncate_pagecache_range(inode, start_byte, end_byte - 1);
> +	error = xfs_buffered_write_delalloc_punch(inode, start_byte, end_byte);
>  	filemap_invalidate_unlock(inode->i_mapping);
>  	if (error && !xfs_is_shutdown(mp)) {
> -		xfs_alert(mp, "%s: unable to clean up ino %lld",
> -			__func__, ip->i_ino);
> +		xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
> +			__func__, XFS_I(inode)->i_ino);
>  		return error;
>  	}
>  	return 0;
> -- 
> 2.37.2
>
diff mbox series

Patch

diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index a2e45ea1b0cb..7bb55dbc19d3 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1120,6 +1120,20 @@  xfs_buffered_write_iomap_begin(
 	return error;
 }
 
+static int
+xfs_buffered_write_delalloc_punch(
+	struct inode		*inode,
+	loff_t			start_byte,
+	loff_t			end_byte)
+{
+	struct xfs_mount	*mp = XFS_M(inode->i_sb);
+	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
+	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
+
+	return xfs_bmap_punch_delalloc_range(XFS_I(inode), start_fsb,
+				end_fsb - start_fsb);
+}
+
 static int
 xfs_buffered_write_iomap_end(
 	struct inode		*inode,
@@ -1129,10 +1143,9 @@  xfs_buffered_write_iomap_end(
 	unsigned		flags,
 	struct iomap		*iomap)
 {
-	struct xfs_inode	*ip = XFS_I(inode);
-	struct xfs_mount	*mp = ip->i_mount;
-	xfs_fileoff_t		start_fsb;
-	xfs_fileoff_t		end_fsb;
+	struct xfs_mount	*mp = XFS_M(inode->i_sb);
+	loff_t			start_byte;
+	loff_t			end_byte;
 	int			error = 0;
 
 	if (iomap->type != IOMAP_DELALLOC)
@@ -1157,13 +1170,13 @@  xfs_buffered_write_iomap_end(
 	 * the range.
 	 */
 	if (unlikely(!written))
-		start_fsb = XFS_B_TO_FSBT(mp, offset);
+		start_byte = round_down(offset, mp->m_sb.sb_blocksize);
 	else
-		start_fsb = XFS_B_TO_FSB(mp, offset + written);
-	end_fsb = XFS_B_TO_FSB(mp, offset + length);
+		start_byte = round_up(offset + written, mp->m_sb.sb_blocksize);
+	end_byte = round_up(offset + length, mp->m_sb.sb_blocksize);
 
 	/* Nothing to do if we've written the entire delalloc extent */
-	if (start_fsb >= end_fsb)
+	if (start_byte >= end_byte)
 		return 0;
 
 	/*
@@ -1173,15 +1186,12 @@  xfs_buffered_write_iomap_end(
 	 * leave dirty pages with no space reservation in the cache.
 	 */
 	filemap_invalidate_lock(inode->i_mapping);
-	truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
-				 XFS_FSB_TO_B(mp, end_fsb) - 1);
-
-	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
-				       end_fsb - start_fsb);
+	truncate_pagecache_range(inode, start_byte, end_byte - 1);
+	error = xfs_buffered_write_delalloc_punch(inode, start_byte, end_byte);
 	filemap_invalidate_unlock(inode->i_mapping);
 	if (error && !xfs_is_shutdown(mp)) {
-		xfs_alert(mp, "%s: unable to clean up ino %lld",
-			__func__, ip->i_ino);
+		xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
+			__func__, XFS_I(inode)->i_ino);
 		return error;
 	}
 	return 0;