diff mbox series

[v3,1/3] direct-io: clean up error paths of do_blockdev_direct_IO

Message ID 20200905052023.3719585-2-krisman@collabora.com (mailing list archive)
State New, archived
Headers show
Series [v3,1/3] direct-io: clean up error paths of do_blockdev_direct_IO | expand

Commit Message

Gabriel Krisman Bertazi Sept. 5, 2020, 5:20 a.m. UTC
In preparation to resort DIO checks, reduce code duplication of error
handling in do_blockdev_direct_IO.

Changes since V1:
  - Remove fail_dio_unlocked (Me)
  - Ensure fail_dio won't call inode_unlock() for writes (Jan Kara)

Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
---
 fs/direct-io.c | 35 ++++++++++++++---------------------
 1 file changed, 14 insertions(+), 21 deletions(-)

Comments

Jan Kara Sept. 7, 2020, 9:32 a.m. UTC | #1
On Sat 05-09-20 01:20:21, Gabriel Krisman Bertazi wrote:
> In preparation to resort DIO checks, reduce code duplication of error
> handling in do_blockdev_direct_IO.
> 
> Changes since V1:
>   - Remove fail_dio_unlocked (Me)
>   - Ensure fail_dio won't call inode_unlock() for writes (Jan Kara)

Please add the patch changelogs below the diffstat. That way they won't be
in the final changelog (which is the right thing to do because they are
mostly irrelevant for the final patch).

Otherwise the patch looks good to me so feel free to add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza 

> 
> Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
> ---
>  fs/direct-io.c | 35 ++++++++++++++---------------------
>  1 file changed, 14 insertions(+), 21 deletions(-)
> 
> diff --git a/fs/direct-io.c b/fs/direct-io.c
> index 183299892465..6c11db1cec27 100644
> --- a/fs/direct-io.c
> +++ b/fs/direct-io.c
> @@ -1170,7 +1170,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
>  			blkbits = blksize_bits(bdev_logical_block_size(bdev));
>  		blocksize_mask = (1 << blkbits) - 1;
>  		if (align & blocksize_mask)
> -			goto out;
> +			return -EINVAL;
>  	}
>  
>  	/* watch out for a 0 len io from a tricksy fs */
> @@ -1178,9 +1178,8 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
>  		return 0;
>  
>  	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
> -	retval = -ENOMEM;
>  	if (!dio)
> -		goto out;
> +		return -ENOMEM;
>  	/*
>  	 * Believe it or not, zeroing out the page array caused a .5%
>  	 * performance regression in a database benchmark.  So, we take
> @@ -1199,22 +1198,16 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
>  
>  			retval = filemap_write_and_wait_range(mapping, offset,
>  							      end - 1);
> -			if (retval) {
> -				inode_unlock(inode);
> -				kmem_cache_free(dio_cache, dio);
> -				goto out;
> -			}
> +			if (retval)
> +				goto fail_dio;
>  		}
>  	}
>  
>  	/* Once we sampled i_size check for reads beyond EOF */
>  	dio->i_size = i_size_read(inode);
>  	if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
> -		if (dio->flags & DIO_LOCKING)
> -			inode_unlock(inode);
> -		kmem_cache_free(dio_cache, dio);
>  		retval = 0;
> -		goto out;
> +		goto fail_dio;
>  	}
>  
>  	/*
> @@ -1258,14 +1251,8 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
>  			 */
>  			retval = sb_init_dio_done_wq(dio->inode->i_sb);
>  		}
> -		if (retval) {
> -			/*
> -			 * We grab i_mutex only for reads so we don't have
> -			 * to release it here
> -			 */
> -			kmem_cache_free(dio_cache, dio);
> -			goto out;
> -		}
> +		if (retval)
> +			goto fail_dio;
>  	}
>  
>  	/*
> @@ -1368,7 +1355,13 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
>  	} else
>  		BUG_ON(retval != -EIOCBQUEUED);
>  
> -out:
> +	return retval;
> +
> +fail_dio:
> +	if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
> +		inode_unlock(inode);
> +
> +	kmem_cache_free(dio_cache, dio);
>  	return retval;
>  }
>  
> -- 
> 2.28.0
>
diff mbox series

Patch

diff --git a/fs/direct-io.c b/fs/direct-io.c
index 183299892465..6c11db1cec27 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1170,7 +1170,7 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
 		blocksize_mask = (1 << blkbits) - 1;
 		if (align & blocksize_mask)
-			goto out;
+			return -EINVAL;
 	}
 
 	/* watch out for a 0 len io from a tricksy fs */
@@ -1178,9 +1178,8 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 		return 0;
 
 	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
-	retval = -ENOMEM;
 	if (!dio)
-		goto out;
+		return -ENOMEM;
 	/*
 	 * Believe it or not, zeroing out the page array caused a .5%
 	 * performance regression in a database benchmark.  So, we take
@@ -1199,22 +1198,16 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 
 			retval = filemap_write_and_wait_range(mapping, offset,
 							      end - 1);
-			if (retval) {
-				inode_unlock(inode);
-				kmem_cache_free(dio_cache, dio);
-				goto out;
-			}
+			if (retval)
+				goto fail_dio;
 		}
 	}
 
 	/* Once we sampled i_size check for reads beyond EOF */
 	dio->i_size = i_size_read(inode);
 	if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
-		if (dio->flags & DIO_LOCKING)
-			inode_unlock(inode);
-		kmem_cache_free(dio_cache, dio);
 		retval = 0;
-		goto out;
+		goto fail_dio;
 	}
 
 	/*
@@ -1258,14 +1251,8 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 			 */
 			retval = sb_init_dio_done_wq(dio->inode->i_sb);
 		}
-		if (retval) {
-			/*
-			 * We grab i_mutex only for reads so we don't have
-			 * to release it here
-			 */
-			kmem_cache_free(dio_cache, dio);
-			goto out;
-		}
+		if (retval)
+			goto fail_dio;
 	}
 
 	/*
@@ -1368,7 +1355,13 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	} else
 		BUG_ON(retval != -EIOCBQUEUED);
 
-out:
+	return retval;
+
+fail_dio:
+	if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
+		inode_unlock(inode);
+
+	kmem_cache_free(dio_cache, dio);
 	return retval;
 }