diff mbox series

[03/10] iomap: move locking out of iomap_write_delalloc_release

Message ID 20240923152904.1747117-4-hch@lst.de (mailing list archive)
State Not Applicable, archived
Headers show
Series [01/10] iomap: factor out a iomap_last_written_block helper | expand

Commit Message

Christoph Hellwig Sept. 23, 2024, 3:28 p.m. UTC
XFS (which currently is the only user of iomap_write_delalloc_release)
already holds invalidate_lock for most zeroing operations.  To be able
to avoid a deadlock it needs to stop taking the lock, but doing so
in iomap would leak XFS locking details into iomap.

To avoid this require the caller to hold invalidate_lock when calling
iomap_write_delalloc_release instead of taking it there.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/iomap/buffered-io.c | 17 ++++++++---------
 fs/xfs/xfs_iomap.c     |  2 ++
 2 files changed, 10 insertions(+), 9 deletions(-)

Comments

Darrick J. Wong Sept. 23, 2024, 4:19 p.m. UTC | #1
On Mon, Sep 23, 2024 at 05:28:17PM +0200, Christoph Hellwig wrote:
> XFS (which currently is the only user of iomap_write_delalloc_release)
> already holds invalidate_lock for most zeroing operations.  To be able
> to avoid a deadlock it needs to stop taking the lock, but doing so
> in iomap would leak XFS locking details into iomap.
> 
> To avoid this require the caller to hold invalidate_lock when calling
> iomap_write_delalloc_release instead of taking it there.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Yes, I like this much better.  I'm glad that Dave pointed out the
inconsistency of the locking (iomap doesn't take locks, filesystems take
locks) model in this one odd case.

Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/iomap/buffered-io.c | 17 ++++++++---------
>  fs/xfs/xfs_iomap.c     |  2 ++
>  2 files changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 237aeb883166df..232aaa1e86451a 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -1211,12 +1211,13 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
>  	loff_t scan_end_byte = min(i_size_read(inode), end_byte);
>  
>  	/*
> -	 * Lock the mapping to avoid races with page faults re-instantiating
> -	 * folios and dirtying them via ->page_mkwrite whilst we walk the
> -	 * cache and perform delalloc extent removal. Failing to do this can
> -	 * leave dirty pages with no space reservation in the cache.
> +	 * The caller must hold invalidate_lock to avoid races with page faults
> +	 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
> +	 * we walk the cache and perform delalloc extent removal.  Failing to do
> +	 * this can leave dirty pages with no space reservation in the cache.
>  	 */
> -	filemap_invalidate_lock(inode->i_mapping);
> +	lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
> +
>  	while (start_byte < scan_end_byte) {
>  		loff_t		data_end;
>  
> @@ -1233,7 +1234,7 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
>  		if (start_byte == -ENXIO || start_byte == scan_end_byte)
>  			break;
>  		if (WARN_ON_ONCE(start_byte < 0))
> -			goto out_unlock;
> +			return;
>  		WARN_ON_ONCE(start_byte < punch_start_byte);
>  		WARN_ON_ONCE(start_byte > scan_end_byte);
>  
> @@ -1244,7 +1245,7 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
>  		data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
>  				scan_end_byte, SEEK_HOLE);
>  		if (WARN_ON_ONCE(data_end < 0))
> -			goto out_unlock;
> +			return;
>  
>  		/*
>  		 * If we race with post-direct I/O invalidation of the page cache,
> @@ -1266,8 +1267,6 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
>  	if (punch_start_byte < end_byte)
>  		punch(inode, punch_start_byte, end_byte - punch_start_byte,
>  				iomap);
> -out_unlock:
> -	filemap_invalidate_unlock(inode->i_mapping);
>  }
>  EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
>  
> diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
> index 30f2530b6d5461..01324da63fcfc7 100644
> --- a/fs/xfs/xfs_iomap.c
> +++ b/fs/xfs/xfs_iomap.c
> @@ -1239,8 +1239,10 @@ xfs_buffered_write_iomap_end(
>  	if (start_byte >= end_byte)
>  		return 0;
>  
> +	filemap_invalidate_lock(inode->i_mapping);
>  	iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap,
>  			xfs_buffered_write_delalloc_punch);
> +	filemap_invalidate_unlock(inode->i_mapping);
>  	return 0;
>  }
>  
> -- 
> 2.45.2
>
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 237aeb883166df..232aaa1e86451a 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1211,12 +1211,13 @@  void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
 	loff_t scan_end_byte = min(i_size_read(inode), end_byte);
 
 	/*
-	 * Lock the mapping to avoid races with page faults re-instantiating
-	 * folios and dirtying them via ->page_mkwrite whilst we walk the
-	 * cache and perform delalloc extent removal. Failing to do this can
-	 * leave dirty pages with no space reservation in the cache.
+	 * The caller must hold invalidate_lock to avoid races with page faults
+	 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
+	 * we walk the cache and perform delalloc extent removal.  Failing to do
+	 * this can leave dirty pages with no space reservation in the cache.
 	 */
-	filemap_invalidate_lock(inode->i_mapping);
+	lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
+
 	while (start_byte < scan_end_byte) {
 		loff_t		data_end;
 
@@ -1233,7 +1234,7 @@  void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
 		if (start_byte == -ENXIO || start_byte == scan_end_byte)
 			break;
 		if (WARN_ON_ONCE(start_byte < 0))
-			goto out_unlock;
+			return;
 		WARN_ON_ONCE(start_byte < punch_start_byte);
 		WARN_ON_ONCE(start_byte > scan_end_byte);
 
@@ -1244,7 +1245,7 @@  void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
 		data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
 				scan_end_byte, SEEK_HOLE);
 		if (WARN_ON_ONCE(data_end < 0))
-			goto out_unlock;
+			return;
 
 		/*
 		 * If we race with post-direct I/O invalidation of the page cache,
@@ -1266,8 +1267,6 @@  void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
 	if (punch_start_byte < end_byte)
 		punch(inode, punch_start_byte, end_byte - punch_start_byte,
 				iomap);
-out_unlock:
-	filemap_invalidate_unlock(inode->i_mapping);
 }
 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
 
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 30f2530b6d5461..01324da63fcfc7 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1239,8 +1239,10 @@  xfs_buffered_write_iomap_end(
 	if (start_byte >= end_byte)
 		return 0;
 
+	filemap_invalidate_lock(inode->i_mapping);
 	iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap,
 			xfs_buffered_write_delalloc_punch);
+	filemap_invalidate_unlock(inode->i_mapping);
 	return 0;
 }