diff mbox series

[v3,8/9] iomap: make iomap_write_end() return a boolean

Message ID 20240319011102.2929635-9-yi.zhang@huaweicloud.com (mailing list archive)
State New
Headers show
Series xfs/iomap: fix non-atomic clone operation and don't update size when zeroing range post eof | expand

Commit Message

Zhang Yi March 19, 2024, 1:11 a.m. UTC
From: Zhang Yi <yi.zhang@huawei.com>

For now, we can make sure iomap_write_end() always return 0 or copied
bytes, so instead of return written bytes, convert to return a boolean
to indicate the copied bytes have been written to the pagecache.

Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
---
 fs/iomap/buffered-io.c | 50 +++++++++++++++++++++++++++---------------
 1 file changed, 32 insertions(+), 18 deletions(-)

Comments

Darrick J. Wong March 19, 2024, 9:08 p.m. UTC | #1
On Tue, Mar 19, 2024 at 09:11:01AM +0800, Zhang Yi wrote:
> From: Zhang Yi <yi.zhang@huawei.com>
> 
> For now, we can make sure iomap_write_end() always return 0 or copied
> bytes, so instead of return written bytes, convert to return a boolean
> to indicate the copied bytes have been written to the pagecache.
> 
> Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> ---
>  fs/iomap/buffered-io.c | 50 +++++++++++++++++++++++++++---------------
>  1 file changed, 32 insertions(+), 18 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 291648c61a32..004673ea8bc1 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -790,7 +790,7 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
>  	return status;
>  }
>  
> -static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
> +static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  		size_t copied, struct folio *folio)
>  {
>  	flush_dcache_folio(folio);
> @@ -807,14 +807,14 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  	 * redo the whole thing.
>  	 */
>  	if (unlikely(copied < len && !folio_test_uptodate(folio)))
> -		return 0;
> +		return false;
>  	iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
>  	iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
>  	filemap_dirty_folio(inode->i_mapping, folio);
> -	return copied;
> +	return true;
>  }
>  
> -static size_t iomap_write_end_inline(const struct iomap_iter *iter,
> +static void iomap_write_end_inline(const struct iomap_iter *iter,
>  		struct folio *folio, loff_t pos, size_t copied)
>  {
>  	const struct iomap *iomap = &iter->iomap;
> @@ -829,21 +829,32 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
>  	kunmap_local(addr);
>  
>  	mark_inode_dirty(iter->inode);
> -	return copied;
>  }
>  
> -/* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
> -static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
> +/*
> + * Returns true if all copied bytes have been written to the pagecache,
> + * otherwise return false.
> + */
> +static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
>  		size_t copied, struct folio *folio)
>  {
>  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
> +	bool ret = true;
>  
> -	if (srcmap->type == IOMAP_INLINE)
> -		return iomap_write_end_inline(iter, folio, pos, copied);
> -	if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
> -		return block_write_end(NULL, iter->inode->i_mapping, pos, len,
> -				       copied, &folio->page, NULL);
> -	return __iomap_write_end(iter->inode, pos, len, copied, folio);
> +	if (srcmap->type == IOMAP_INLINE) {
> +		iomap_write_end_inline(iter, folio, pos, copied);
> +	} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
> +		size_t bh_written;
> +
> +		bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
> +					len, copied, &folio->page, NULL);
> +		WARN_ON_ONCE(bh_written != copied && bh_written != 0);
> +		ret = bh_written == copied;
> +	} else {
> +		ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
> +	}
> +
> +	return ret;

This could be cleaned up even further:

	if (srcmap->type == IOMAP_INLINE) {
		iomap_write_end_inline(iter, folio, pos, copied);
		return true;
	}

	if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
		size_t bh_written;

		bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
					len, copied, &folio->page, NULL);
		WARN_ON_ONCE(bh_written != copied && bh_written != 0);
		return bh_written == copied;
	}

	return __iomap_write_end(iter->inode, pos, len, copied, folio);

>  }
>  
>  static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
> @@ -907,7 +918,8 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
>  			flush_dcache_folio(folio);
>  
>  		copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
> -		written = iomap_write_end(iter, pos, bytes, copied, folio);
> +		written = iomap_write_end(iter, pos, bytes, copied, folio) ?
> +			  copied : 0;
>  
>  		/*
>  		 * Update the in-memory inode size after copying the data into
> @@ -1285,6 +1297,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
>  		int status;
>  		size_t offset;
>  		size_t bytes = min_t(u64, SIZE_MAX, length);
> +		bool ret;
>  
>  		status = iomap_write_begin(iter, pos, bytes, &folio);
>  		if (unlikely(status))
> @@ -1296,9 +1309,9 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
>  		if (bytes > folio_size(folio) - offset)
>  			bytes = folio_size(folio) - offset;
>  
> -		bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
> +		ret = iomap_write_end(iter, pos, bytes, bytes, folio);
>  		__iomap_put_folio(iter, pos, bytes, folio);
> -		if (WARN_ON_ONCE(bytes == 0))
> +		if (WARN_ON_ONCE(!ret))

If you named this variable "write_end_ok" then the diagnostic output
from the WARN_ONs would say that.  That said, it also encodes the line
number so it's not a big deal to leave this as it is.

With at least the first cleanup applied,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

>  			return -EIO;
>  
>  		cond_resched();
> @@ -1347,6 +1360,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
>  		int status;
>  		size_t offset;
>  		size_t bytes = min_t(u64, SIZE_MAX, length);
> +		bool ret;
>  
>  		status = iomap_write_begin(iter, pos, bytes, &folio);
>  		if (status)
> @@ -1361,9 +1375,9 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
>  		folio_zero_range(folio, offset, bytes);
>  		folio_mark_accessed(folio);
>  
> -		bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
> +		ret = iomap_write_end(iter, pos, bytes, bytes, folio);
>  		__iomap_put_folio(iter, pos, bytes, folio);
> -		if (WARN_ON_ONCE(bytes == 0))
> +		if (WARN_ON_ONCE(!ret))
>  			return -EIO;
>  
>  		pos += bytes;
> -- 
> 2.39.2
> 
>
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 291648c61a32..004673ea8bc1 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -790,7 +790,7 @@  static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 	return status;
 }
 
-static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 		size_t copied, struct folio *folio)
 {
 	flush_dcache_folio(folio);
@@ -807,14 +807,14 @@  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 * redo the whole thing.
 	 */
 	if (unlikely(copied < len && !folio_test_uptodate(folio)))
-		return 0;
+		return false;
 	iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
 	iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
 	filemap_dirty_folio(inode->i_mapping, folio);
-	return copied;
+	return true;
 }
 
-static size_t iomap_write_end_inline(const struct iomap_iter *iter,
+static void iomap_write_end_inline(const struct iomap_iter *iter,
 		struct folio *folio, loff_t pos, size_t copied)
 {
 	const struct iomap *iomap = &iter->iomap;
@@ -829,21 +829,32 @@  static size_t iomap_write_end_inline(const struct iomap_iter *iter,
 	kunmap_local(addr);
 
 	mark_inode_dirty(iter->inode);
-	return copied;
 }
 
-/* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
-static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
+/*
+ * Returns true if all copied bytes have been written to the pagecache,
+ * otherwise return false.
+ */
+static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
 		size_t copied, struct folio *folio)
 {
 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
+	bool ret = true;
 
-	if (srcmap->type == IOMAP_INLINE)
-		return iomap_write_end_inline(iter, folio, pos, copied);
-	if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
-		return block_write_end(NULL, iter->inode->i_mapping, pos, len,
-				       copied, &folio->page, NULL);
-	return __iomap_write_end(iter->inode, pos, len, copied, folio);
+	if (srcmap->type == IOMAP_INLINE) {
+		iomap_write_end_inline(iter, folio, pos, copied);
+	} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
+		size_t bh_written;
+
+		bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
+					len, copied, &folio->page, NULL);
+		WARN_ON_ONCE(bh_written != copied && bh_written != 0);
+		ret = bh_written == copied;
+	} else {
+		ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
+	}
+
+	return ret;
 }
 
 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
@@ -907,7 +918,8 @@  static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
 			flush_dcache_folio(folio);
 
 		copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
-		written = iomap_write_end(iter, pos, bytes, copied, folio);
+		written = iomap_write_end(iter, pos, bytes, copied, folio) ?
+			  copied : 0;
 
 		/*
 		 * Update the in-memory inode size after copying the data into
@@ -1285,6 +1297,7 @@  static loff_t iomap_unshare_iter(struct iomap_iter *iter)
 		int status;
 		size_t offset;
 		size_t bytes = min_t(u64, SIZE_MAX, length);
+		bool ret;
 
 		status = iomap_write_begin(iter, pos, bytes, &folio);
 		if (unlikely(status))
@@ -1296,9 +1309,9 @@  static loff_t iomap_unshare_iter(struct iomap_iter *iter)
 		if (bytes > folio_size(folio) - offset)
 			bytes = folio_size(folio) - offset;
 
-		bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+		ret = iomap_write_end(iter, pos, bytes, bytes, folio);
 		__iomap_put_folio(iter, pos, bytes, folio);
-		if (WARN_ON_ONCE(bytes == 0))
+		if (WARN_ON_ONCE(!ret))
 			return -EIO;
 
 		cond_resched();
@@ -1347,6 +1360,7 @@  static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
 		int status;
 		size_t offset;
 		size_t bytes = min_t(u64, SIZE_MAX, length);
+		bool ret;
 
 		status = iomap_write_begin(iter, pos, bytes, &folio);
 		if (status)
@@ -1361,9 +1375,9 @@  static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
 		folio_zero_range(folio, offset, bytes);
 		folio_mark_accessed(folio);
 
-		bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+		ret = iomap_write_end(iter, pos, bytes, bytes, folio);
 		__iomap_put_folio(iter, pos, bytes, folio);
-		if (WARN_ON_ONCE(bytes == 0))
+		if (WARN_ON_ONCE(!ret))
 			return -EIO;
 
 		pos += bytes;