diff mbox series

fs/direct-io: Avoid duplicating the check whether or not locking is required

Message ID 20190111001451.77567-1-bvanassche@acm.org (mailing list archive)
State New, archived
Headers show
Series fs/direct-io: Avoid duplicating the check whether or not locking is required | expand

Commit Message

Bart Van Assche Jan. 11, 2019, 12:14 a.m. UTC
Instead of repeating the check whether inode locking is required, perform
that check once and store the result in a variable. Remove the obsolete
comment that refers to direct_io_worker() since that function has been
removed. This patch does not change the behavior of the direct I/O code.

Cc: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 fs/direct-io.c | 30 +++++++++++++-----------------
 1 file changed, 13 insertions(+), 17 deletions(-)
diff mbox series

Patch

diff --git a/fs/direct-io.c b/fs/direct-io.c
index 76d1295ed0a4..b8e7729c9cb1 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1185,6 +1185,8 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	struct buffer_head map_bh = { 0, };
 	struct blk_plug plug;
 	unsigned long align = offset | iov_iter_alignment(iter);
+	const bool lock_inode = flags & DIO_LOCKING &&
+		iov_iter_rw(iter) == READ;
 
 	/*
 	 * Avoid references to bdev if not absolutely needed to give
@@ -1215,28 +1217,22 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	memset(dio, 0, offsetof(struct dio, pages));
 
 	dio->flags = flags;
-	if (dio->flags & DIO_LOCKING) {
-		if (iov_iter_rw(iter) == READ) {
-			struct address_space *mapping =
-					iocb->ki_filp->f_mapping;
-
-			/* will be released by direct_io_worker */
-			inode_lock(inode);
-
-			retval = filemap_write_and_wait_range(mapping, offset,
-							      end - 1);
-			if (retval) {
-				inode_unlock(inode);
-				kmem_cache_free(dio_cache, dio);
-				goto out;
-			}
+	if (lock_inode) {
+		struct address_space *mapping = iocb->ki_filp->f_mapping;
+
+		inode_lock(inode);
+		retval = filemap_write_and_wait_range(mapping, offset, end - 1);
+		if (retval) {
+			inode_unlock(inode);
+			kmem_cache_free(dio_cache, dio);
+			goto out;
 		}
 	}
 
 	/* Once we sampled i_size check for reads beyond EOF */
 	dio->i_size = i_size_read(inode);
 	if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
-		if (dio->flags & DIO_LOCKING)
+		if (lock_inode)
 			inode_unlock(inode);
 		kmem_cache_free(dio_cache, dio);
 		retval = 0;
@@ -1372,7 +1368,7 @@  do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	 * we can let i_mutex go now that its achieved its purpose
 	 * of protecting us from looking up uninitialized blocks.
 	 */
-	if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
+	if (lock_inode)
 		inode_unlock(dio->inode);
 
 	/*