diff mbox series

[v2,1/3] direct-io: clean up error paths of do_blockdev_direct_IO

Message ID 20200903200414.673105-2-krisman@collabora.com (mailing list archive)
State New, archived
Headers show
Series Unaligned DIO read error path fix and clean ups | expand

Commit Message

Gabriel Krisman Bertazi Sept. 3, 2020, 8:04 p.m. UTC
In preparation to resort DIO checks, reduce code duplication of error
handling in do_blockdev_direct_IO.

Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
---
 fs/direct-io.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

Comments

Jan Kara Sept. 4, 2020, 7:47 a.m. UTC | #1
On Thu 03-09-20 16:04:12, Gabriel Krisman Bertazi wrote:
> In preparation to resort DIO checks, reduce code duplication of error
> handling in do_blockdev_direct_IO.
> 
> Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>

Two comments below:

> @@ -1368,7 +1360,15 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
>  	} else
>  		BUG_ON(retval != -EIOCBQUEUED);
>  
> -out:
> +	return retval;
> +
> +fail_dio:
> +	if (dio->flags & DIO_LOCKING) {
> +		inode_unlock(inode);
> +	}

No need for braces here. Also please add '&& iov_iter_rw(iter) == READ' to
the condition for unlocking to make fail_dio safe also for writes.
Currently you jump to fail_dio only for reads but in 3/3 you can jump to it
also for writes and that is a bug.

								Honza

> +fail_dio_unlocked:
> +	kmem_cache_free(dio_cache, dio);
> +
>  	return retval;
>  }
>  
> -- 
> 2.28.0
>
diff mbox series

Patch

diff --git a/fs/direct-io.c b/fs/direct-io.c
index 183299892465..04aae41323d7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1170,7 +1170,7 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
 		blocksize_mask = (1 << blkbits) - 1;
 		if (align & blocksize_mask)
-			goto out;
+			return -EINVAL;
 	}
 
 	/* watch out for a 0 len io from a tricksy fs */
@@ -1178,9 +1178,8 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 		return 0;
 
 	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
-	retval = -ENOMEM;
 	if (!dio)
-		goto out;
+		return -ENOMEM;
 	/*
 	 * Believe it or not, zeroing out the page array caused a .5%
 	 * performance regression in a database benchmark.  So, we take
@@ -1199,22 +1198,16 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 
 			retval = filemap_write_and_wait_range(mapping, offset,
 							      end - 1);
-			if (retval) {
-				inode_unlock(inode);
-				kmem_cache_free(dio_cache, dio);
-				goto out;
-			}
+			if (retval)
+				goto fail_dio;
 		}
 	}
 
 	/* Once we sampled i_size check for reads beyond EOF */
 	dio->i_size = i_size_read(inode);
 	if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
-		if (dio->flags & DIO_LOCKING)
-			inode_unlock(inode);
-		kmem_cache_free(dio_cache, dio);
 		retval = 0;
-		goto out;
+		goto fail_dio;
 	}
 
 	/*
@@ -1263,8 +1256,7 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 			 * We grab i_mutex only for reads so we don't have
 			 * to release it here
 			 */
-			kmem_cache_free(dio_cache, dio);
-			goto out;
+			goto fail_dio_unlocked;
 		}
 	}
 
@@ -1368,7 +1360,15 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	} else
 		BUG_ON(retval != -EIOCBQUEUED);
 
-out:
+	return retval;
+
+fail_dio:
+	if (dio->flags & DIO_LOCKING) {
+		inode_unlock(inode);
+	}
+fail_dio_unlocked:
+	kmem_cache_free(dio_cache, dio);
+
 	return retval;
 }