diff mbox series

[v8,11/11] zonefs: use REQ_OP_ZONE_APPEND for sync DIO

Message ID 20200427113153.31246-12-johannes.thumshirn@wdc.com (mailing list archive)
State New, archived
Headers show
Series Introduce Zone Append for writing to zoned block devices | expand

Commit Message

Johannes Thumshirn April 27, 2020, 11:31 a.m. UTC
Synchronous direct I/O to a sequential write only zone can be issued using
the new REQ_OP_ZONE_APPEND request operation. As dispatching multiple
BIOs can potentially result in reordering, we cannot support asynchronous
IO via this interface.

We also can only dispatch up to queue_max_zone_append_sectors() via the
new zone-append method and have to return a short write back to user-space
in case an IO larger than queue_max_zone_append_sectors() has been issued.

Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
---
 fs/zonefs/super.c | 80 ++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 72 insertions(+), 8 deletions(-)

Comments

Damien Le Moal April 27, 2020, 12:27 p.m. UTC | #1
On 2020/04/27 20:32, Johannes Thumshirn wrote:
> Synchronous direct I/O to a sequential write only zone can be issued using
> the new REQ_OP_ZONE_APPEND request operation. As dispatching multiple
> BIOs can potentially result in reordering, we cannot support asynchronous
> IO via this interface.
> 
> We also can only dispatch up to queue_max_zone_append_sectors() via the
> new zone-append method and have to return a short write back to user-space
> in case an IO larger than queue_max_zone_append_sectors() has been issued.
> 
> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> ---
>  fs/zonefs/super.c | 80 ++++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 72 insertions(+), 8 deletions(-)
> 
> diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
> index 3ce9829a6936..0bf7009f50a2 100644
> --- a/fs/zonefs/super.c
> +++ b/fs/zonefs/super.c
> @@ -20,6 +20,7 @@
>  #include <linux/mman.h>
>  #include <linux/sched/mm.h>
>  #include <linux/crc32.h>
> +#include <linux/task_io_accounting_ops.h>
>  
>  #include "zonefs.h"
>  
> @@ -596,6 +597,61 @@ static const struct iomap_dio_ops zonefs_write_dio_ops = {
>  	.end_io			= zonefs_file_write_dio_end_io,
>  };
>  
> +static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
> +{
> +	struct inode *inode = file_inode(iocb->ki_filp);
> +	struct zonefs_inode_info *zi = ZONEFS_I(inode);
> +	struct block_device *bdev = inode->i_sb->s_bdev;
> +	unsigned int max;
> +	struct bio *bio;
> +	ssize_t size;
> +	int nr_pages;
> +	ssize_t ret;
> +
> +	nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
> +	if (!nr_pages)
> +		return 0;
> +
> +	max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
> +	max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
> +	iov_iter_truncate(from, max);
> +
> +	bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
> +	if (!bio)
> +		return -ENOMEM;
> +
> +	bio_set_dev(bio, bdev);
> +	bio->bi_iter.bi_sector = zi->i_zsector;
> +	bio->bi_write_hint = iocb->ki_hint;
> +	bio->bi_ioprio = iocb->ki_ioprio;
> +	bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
> +	if (iocb->ki_flags & IOCB_DSYNC)
> +		bio->bi_opf |= REQ_FUA;
> +
> +	ret = bio_iov_iter_get_pages(bio, from);
> +	if (unlikely(ret)) {
> +		bio_io_error(bio);
> +		return ret;
> +	}
> +	size = bio->bi_iter.bi_size;
> +	task_io_account_write(ret);
> +
> +	if (iocb->ki_flags & IOCB_HIPRI)
> +		bio_set_polled(bio, iocb);
> +
> +	ret = submit_bio_wait(bio);
> +
> +	bio_put(bio);
> +
> +	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
> +	if (ret >= 0) {
> +		iocb->ki_pos += size;
> +		return size;
> +	}
> +
> +	return ret;
> +}
> +
>  /*
>   * Handle direct writes. For sequential zone files, this is the only possible
>   * write path. For these files, check that the user is issuing writes
> @@ -611,6 +667,8 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
>  	struct inode *inode = file_inode(iocb->ki_filp);
>  	struct zonefs_inode_info *zi = ZONEFS_I(inode);
>  	struct super_block *sb = inode->i_sb;
> +	bool sync = is_sync_kiocb(iocb);
> +	bool append = false;
>  	size_t count;
>  	ssize_t ret;
>  
> @@ -619,7 +677,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
>  	 * as this can cause write reordering (e.g. the first aio gets EAGAIN
>  	 * on the inode lock but the second goes through but is now unaligned).
>  	 */
> -	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) &&
> +	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
>  	    (iocb->ki_flags & IOCB_NOWAIT))
>  		return -EOPNOTSUPP;
>  
> @@ -643,16 +701,22 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
>  	}
>  
>  	/* Enforce sequential writes (append only) in sequential zones */
> -	mutex_lock(&zi->i_truncate_mutex);
> -	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && iocb->ki_pos != zi->i_wpoffset) {
> +	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
> +		mutex_lock(&zi->i_truncate_mutex);
> +		if (iocb->ki_pos != zi->i_wpoffset) {
> +			mutex_unlock(&zi->i_truncate_mutex);
> +			ret = -EINVAL;
> +			goto inode_unlock;
> +		}
>  		mutex_unlock(&zi->i_truncate_mutex);
> -		ret = -EINVAL;
> -		goto inode_unlock;
> +		append = sync;
>  	}
> -	mutex_unlock(&zi->i_truncate_mutex);
>  
> -	ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
> -			   &zonefs_write_dio_ops, is_sync_kiocb(iocb));
> +	if (append)
> +		ret = zonefs_file_dio_append(iocb, from);
> +	else
> +		ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
> +				   &zonefs_write_dio_ops, sync);
>  	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
>  	    (ret > 0 || ret == -EIOCBQUEUED)) {
>  		if (ret > 0)
> 

Acked-by: Damien Le Moal <damien.lemoal@wdc.com>
diff mbox series

Patch

diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 3ce9829a6936..0bf7009f50a2 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -20,6 +20,7 @@ 
 #include <linux/mman.h>
 #include <linux/sched/mm.h>
 #include <linux/crc32.h>
+#include <linux/task_io_accounting_ops.h>
 
 #include "zonefs.h"
 
@@ -596,6 +597,61 @@  static const struct iomap_dio_ops zonefs_write_dio_ops = {
 	.end_io			= zonefs_file_write_dio_end_io,
 };
 
+static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+{
+	struct inode *inode = file_inode(iocb->ki_filp);
+	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+	struct block_device *bdev = inode->i_sb->s_bdev;
+	unsigned int max;
+	struct bio *bio;
+	ssize_t size;
+	int nr_pages;
+	ssize_t ret;
+
+	nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
+	if (!nr_pages)
+		return 0;
+
+	max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
+	max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
+	iov_iter_truncate(from, max);
+
+	bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
+	if (!bio)
+		return -ENOMEM;
+
+	bio_set_dev(bio, bdev);
+	bio->bi_iter.bi_sector = zi->i_zsector;
+	bio->bi_write_hint = iocb->ki_hint;
+	bio->bi_ioprio = iocb->ki_ioprio;
+	bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+	if (iocb->ki_flags & IOCB_DSYNC)
+		bio->bi_opf |= REQ_FUA;
+
+	ret = bio_iov_iter_get_pages(bio, from);
+	if (unlikely(ret)) {
+		bio_io_error(bio);
+		return ret;
+	}
+	size = bio->bi_iter.bi_size;
+	task_io_account_write(ret);
+
+	if (iocb->ki_flags & IOCB_HIPRI)
+		bio_set_polled(bio, iocb);
+
+	ret = submit_bio_wait(bio);
+
+	bio_put(bio);
+
+	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+	if (ret >= 0) {
+		iocb->ki_pos += size;
+		return size;
+	}
+
+	return ret;
+}
+
 /*
  * Handle direct writes. For sequential zone files, this is the only possible
  * write path. For these files, check that the user is issuing writes
@@ -611,6 +667,8 @@  static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
 	struct inode *inode = file_inode(iocb->ki_filp);
 	struct zonefs_inode_info *zi = ZONEFS_I(inode);
 	struct super_block *sb = inode->i_sb;
+	bool sync = is_sync_kiocb(iocb);
+	bool append = false;
 	size_t count;
 	ssize_t ret;
 
@@ -619,7 +677,7 @@  static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
 	 * as this can cause write reordering (e.g. the first aio gets EAGAIN
 	 * on the inode lock but the second goes through but is now unaligned).
 	 */
-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) &&
+	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
 	    (iocb->ki_flags & IOCB_NOWAIT))
 		return -EOPNOTSUPP;
 
@@ -643,16 +701,22 @@  static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
 	}
 
 	/* Enforce sequential writes (append only) in sequential zones */
-	mutex_lock(&zi->i_truncate_mutex);
-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && iocb->ki_pos != zi->i_wpoffset) {
+	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
+		mutex_lock(&zi->i_truncate_mutex);
+		if (iocb->ki_pos != zi->i_wpoffset) {
+			mutex_unlock(&zi->i_truncate_mutex);
+			ret = -EINVAL;
+			goto inode_unlock;
+		}
 		mutex_unlock(&zi->i_truncate_mutex);
-		ret = -EINVAL;
-		goto inode_unlock;
+		append = sync;
 	}
-	mutex_unlock(&zi->i_truncate_mutex);
 
-	ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
-			   &zonefs_write_dio_ops, is_sync_kiocb(iocb));
+	if (append)
+		ret = zonefs_file_dio_append(iocb, from);
+	else
+		ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
+				   &zonefs_write_dio_ops, sync);
 	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
 	    (ret > 0 || ret == -EIOCBQUEUED)) {
 		if (ret > 0)