diff mbox series

[RFC] ceph: do not direct write executes in parallel if O_APPEND is set

Message ID 20200131133619.14209-1-xiubli@redhat.com (mailing list archive)
State New, archived
Headers show
Series [RFC] ceph: do not direct write executes in parallel if O_APPEND is set | expand

Commit Message

Xiubo Li Jan. 31, 2020, 1:36 p.m. UTC
From: Xiubo Li <xiubli@redhat.com>

In O_APPEND & O_DIRECT mode, the data from different writers will
be possiblly overlapping each other. Just use the exclusive clock
instead in O_APPEND & O_DIRECT mode.

Signed-off-by: Xiubo Li <xiubli@redhat.com>
---
 fs/ceph/file.c | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)

Comments

Christoph Hellwig Feb. 3, 2020, 8:36 a.m. UTC | #1
On Fri, Jan 31, 2020 at 08:36:19AM -0500, xiubli@redhat.com wrote:
> From: Xiubo Li <xiubli@redhat.com>
> 
> In O_APPEND & O_DIRECT mode, the data from different writers will
> be possiblly overlapping each other. Just use the exclusive clock
> instead in O_APPEND & O_DIRECT mode.

s/clock/lock/
Xiubo Li Feb. 3, 2020, 9:14 a.m. UTC | #2
On 2020/2/3 16:36, Christoph Hellwig wrote:
> On Fri, Jan 31, 2020 at 08:36:19AM -0500, xiubli@redhat.com wrote:
>> From: Xiubo Li <xiubli@redhat.com>
>>
>> In O_APPEND & O_DIRECT mode, the data from different writers will
>> be possiblly overlapping each other. Just use the exclusive clock
>> instead in O_APPEND & O_DIRECT mode.
> s/clock/lock/
>
Will fix it :-)

Thanks.

BRs
diff mbox series

Patch

diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 77f8e58cbb99..1cedba452a66 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1443,6 +1443,7 @@  static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	struct ceph_cap_flush *prealloc_cf;
 	ssize_t count, written = 0;
 	int err, want, got;
+	bool direct_lock = false;
 	loff_t pos;
 	loff_t limit = max(i_size_read(inode), fsc->max_file_size);
 
@@ -1453,8 +1454,11 @@  static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	if (!prealloc_cf)
 		return -ENOMEM;
 
+	if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
+		direct_lock = true;
+
 retry_snap:
-	if (iocb->ki_flags & IOCB_DIRECT)
+	if (direct_lock)
 		ceph_start_io_direct(inode);
 	else
 		ceph_start_io_write(inode);
@@ -1544,14 +1548,15 @@  static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
 		/* we might need to revert back to that point */
 		data = *from;
-		if (iocb->ki_flags & IOCB_DIRECT) {
+		if (iocb->ki_flags & IOCB_DIRECT)
 			written = ceph_direct_read_write(iocb, &data, snapc,
 							 &prealloc_cf);
-			ceph_end_io_direct(inode);
-		} else {
+		else
 			written = ceph_sync_write(iocb, &data, pos, snapc);
+		if (direct_lock)
+			ceph_end_io_direct(inode);
+		else
 			ceph_end_io_write(inode);
-		}
 		if (written > 0)
 			iov_iter_advance(from, written);
 		ceph_put_snap_context(snapc);
@@ -1602,7 +1607,7 @@  static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
 	goto out_unlocked;
 out:
-	if (iocb->ki_flags & IOCB_DIRECT)
+	if (direct_lock)
 		ceph_end_io_direct(inode);
 	else
 		ceph_end_io_write(inode);