diff mbox series

[v9,02/41] iomap: support REQ_OP_ZONE_APPEND

Message ID cb409918a22b8f15ec20d7efad2281cb4c99d18c.1604065694.git.naohiro.aota@wdc.com (mailing list archive)
State New, archived
Headers show
Series btrfs: zoned block device support | expand

Commit Message

Naohiro Aota Oct. 30, 2020, 1:51 p.m. UTC
A ZONE_APPEND bio must follow hardware restrictions (e.g. not exceeding
max_zone_append_sectors) not to be split. bio_iov_iter_get_pages builds
such restricted bio using __bio_iov_append_get_pages if bio_op(bio) ==
REQ_OP_ZONE_APPEND.

To utilize it, we need to set the bio_op before calling
bio_iov_iter_get_pages(). This commit introduces IOMAP_F_ZONE_APPEND, so
that iomap user can set the flag to indicate they want REQ_OP_ZONE_APPEND
and restricted bio.

Cc: Christoph Hellwig <hch@infradead.org>
Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
Cc: linux-xfs@vger.kernel.org
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 fs/iomap/direct-io.c  | 22 ++++++++++++++++------
 include/linux/iomap.h |  1 +
 2 files changed, 17 insertions(+), 6 deletions(-)

Comments

Naohiro Aota Nov. 2, 2020, 5:34 a.m. UTC | #1
Note: this patch is for this series.

https://lore.kernel.org/linux-btrfs/cover.1604065156.git.naohiro.aota@wdc.com/T/

The patch below uses IOMAP_F_ZONE_APPEND flag to make the iomap bio to be
REQ_OP_ZONE_APPEND.

https://lore.kernel.org/linux-btrfs/cover.1604065156.git.naohiro.aota@wdc.com/T/#m7b3f2d01ab554ea4cba244608655268211928799

On Fri, Oct 30, 2020 at 10:51:09PM +0900, Naohiro Aota wrote:
>A ZONE_APPEND bio must follow hardware restrictions (e.g. not exceeding
>max_zone_append_sectors) not to be split. bio_iov_iter_get_pages builds
>such restricted bio using __bio_iov_append_get_pages if bio_op(bio) ==
>REQ_OP_ZONE_APPEND.
>
>To utilize it, we need to set the bio_op before calling
>bio_iov_iter_get_pages(). This commit introduces IOMAP_F_ZONE_APPEND, so
>that iomap user can set the flag to indicate they want REQ_OP_ZONE_APPEND
>and restricted bio.
>
>Cc: Christoph Hellwig <hch@infradead.org>
>Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
>Cc: linux-xfs@vger.kernel.org
>Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
>---
> fs/iomap/direct-io.c  | 22 ++++++++++++++++------
> include/linux/iomap.h |  1 +
> 2 files changed, 17 insertions(+), 6 deletions(-)
>
>diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
>index c1aafb2ab990..e534703c5594 100644
>--- a/fs/iomap/direct-io.c
>+++ b/fs/iomap/direct-io.c
>@@ -210,6 +210,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
> 	struct bio *bio;
> 	bool need_zeroout = false;
> 	bool use_fua = false;
>+	bool zone_append = false;
> 	int nr_pages, ret = 0;
> 	size_t copied = 0;
> 	size_t orig_count;
>@@ -241,6 +242,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
> 			use_fua = true;
> 	}
>
>+	zone_append = iomap->flags & IOMAP_F_ZONE_APPEND;
>+
> 	/*
> 	 * Save the original count and trim the iter to just the extent we
> 	 * are operating on right now.  The iter will be re-expanded once
>@@ -278,6 +281,19 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
> 		bio->bi_private = dio;
> 		bio->bi_end_io = iomap_dio_bio_end_io;
>
>+		if (dio->flags & IOMAP_DIO_WRITE) {
>+			bio->bi_opf = (zone_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE) |
>+				      REQ_SYNC | REQ_IDLE;
>+			if (use_fua)
>+				bio->bi_opf |= REQ_FUA;
>+			else
>+				dio->flags &= ~IOMAP_DIO_WRITE_FUA;
>+		} else {
>+			WARN_ON_ONCE(zone_append);
>+
>+			bio->bi_opf = REQ_OP_READ;
>+		}
>+
> 		ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
> 		if (unlikely(ret)) {
> 			/*
>@@ -292,14 +308,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
>
> 		n = bio->bi_iter.bi_size;
> 		if (dio->flags & IOMAP_DIO_WRITE) {
>-			bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
>-			if (use_fua)
>-				bio->bi_opf |= REQ_FUA;
>-			else
>-				dio->flags &= ~IOMAP_DIO_WRITE_FUA;
> 			task_io_account_write(n);
> 		} else {
>-			bio->bi_opf = REQ_OP_READ;
> 			if (dio->flags & IOMAP_DIO_DIRTY)
> 				bio_set_pages_dirty(bio);
> 		}
>diff --git a/include/linux/iomap.h b/include/linux/iomap.h
>index 4d1d3c3469e9..1bccd1880d0d 100644
>--- a/include/linux/iomap.h
>+++ b/include/linux/iomap.h
>@@ -54,6 +54,7 @@ struct vm_fault;
> #define IOMAP_F_SHARED		0x04
> #define IOMAP_F_MERGED		0x08
> #define IOMAP_F_BUFFER_HEAD	0x10
>+#define IOMAP_F_ZONE_APPEND	0x20
>
> /*
>  * Flags set by the core iomap code during operations:
>-- 
>2.27.0
>
Darrick J. Wong Nov. 2, 2020, 4:55 p.m. UTC | #2
On Fri, Oct 30, 2020 at 10:51:09PM +0900, Naohiro Aota wrote:
> A ZONE_APPEND bio must follow hardware restrictions (e.g. not exceeding
> max_zone_append_sectors) not to be split. bio_iov_iter_get_pages builds
> such restricted bio using __bio_iov_append_get_pages if bio_op(bio) ==
> REQ_OP_ZONE_APPEND.
> 
> To utilize it, we need to set the bio_op before calling
> bio_iov_iter_get_pages(). This commit introduces IOMAP_F_ZONE_APPEND, so
> that iomap user can set the flag to indicate they want REQ_OP_ZONE_APPEND
> and restricted bio.

Would you mind putting this paragraph in a comment before the
bio_iov_iter_get_pages call?  I could easily see someone (i.e. 2021 me)
forgetting there's a data dependency and breaking this.

> Cc: Christoph Hellwig <hch@infradead.org>
> Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
> Cc: linux-xfs@vger.kernel.org
> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
> ---
>  fs/iomap/direct-io.c  | 22 ++++++++++++++++------
>  include/linux/iomap.h |  1 +
>  2 files changed, 17 insertions(+), 6 deletions(-)
> 
> diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
> index c1aafb2ab990..e534703c5594 100644
> --- a/fs/iomap/direct-io.c
> +++ b/fs/iomap/direct-io.c
> @@ -210,6 +210,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
>  	struct bio *bio;
>  	bool need_zeroout = false;
>  	bool use_fua = false;
> +	bool zone_append = false;
>  	int nr_pages, ret = 0;
>  	size_t copied = 0;
>  	size_t orig_count;
> @@ -241,6 +242,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
>  			use_fua = true;
>  	}
>  
> +	zone_append = ;
> +
>  	/*
>  	 * Save the original count and trim the iter to just the extent we
>  	 * are operating on right now.  The iter will be re-expanded once
> @@ -278,6 +281,19 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
>  		bio->bi_private = dio;
>  		bio->bi_end_io = iomap_dio_bio_end_io;
>  
> +		if (dio->flags & IOMAP_DIO_WRITE) {
> +			bio->bi_opf = (zone_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE) |
> +				      REQ_SYNC | REQ_IDLE;
> +			if (use_fua)
> +				bio->bi_opf |= REQ_FUA;
> +			else
> +				dio->flags &= ~IOMAP_DIO_WRITE_FUA;
> +		} else {
> +			WARN_ON_ONCE(zone_append);
> +
> +			bio->bi_opf = REQ_OP_READ;
> +		}

Also, I think this should be hoisted into a separate helper to return
bi_opf rather than making iomap_dio_bio_actor even longer...

/*
 * Figure out the bio's operation flags from the dio request, the
 * mapping, and whether or not we want FUA.  Note that we can end up
 * clearing the WRITE_FUA flag in the dio request.
 */
static inline unsigned int
iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua)
{
	unsigned int opflags = REQ_SYNC | REQ_IDLE;

	if (!(dio->flags & IOMAP_DIO_WRITE)) {
		WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND);
		return REQ_OP_READ;
	}

	if (iomap->flags & IOMAP_F_ZONE_APPEND)
		opflags |= REQ_OP_ZONE_APPEND;
	else
		opflags |= REQ_OP_WRITE;

	if (use_fua)
		opflags |= REQ_FUA;
	else
		dio->flags &= ~IOMAP_DIO_WRITE_FUA;

	return opflags;
}

	/*
	 * Set the operation flags early so that bio_iov_iter_get_pages can
	 * set up the page vector appropriately for a ZONE_APPEND operation.
	 */
	bio->bi_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);

	ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
	if (unlikely(ret)) {

--D

> +
>  		ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
>  		if (unlikely(ret)) {
>  			/*
> @@ -292,14 +308,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
>  
>  		n = bio->bi_iter.bi_size;
>  		if (dio->flags & IOMAP_DIO_WRITE) {
> -			bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
> -			if (use_fua)
> -				bio->bi_opf |= REQ_FUA;
> -			else
> -				dio->flags &= ~IOMAP_DIO_WRITE_FUA;
>  			task_io_account_write(n);
>  		} else {
> -			bio->bi_opf = REQ_OP_READ;
>  			if (dio->flags & IOMAP_DIO_DIRTY)
>  				bio_set_pages_dirty(bio);
>  		}
> diff --git a/include/linux/iomap.h b/include/linux/iomap.h
> index 4d1d3c3469e9..1bccd1880d0d 100644
> --- a/include/linux/iomap.h
> +++ b/include/linux/iomap.h
> @@ -54,6 +54,7 @@ struct vm_fault;
>  #define IOMAP_F_SHARED		0x04
>  #define IOMAP_F_MERGED		0x08
>  #define IOMAP_F_BUFFER_HEAD	0x10
> +#define IOMAP_F_ZONE_APPEND	0x20
>  
>  /*
>   * Flags set by the core iomap code during operations:
> -- 
> 2.27.0
>
Johannes Thumshirn Nov. 2, 2020, 5:39 p.m. UTC | #3
On 02/11/2020 17:55, Darrick J. Wong wrote:
> Also, I think this should be hoisted into a separate helper to return
> bi_opf rather than making iomap_dio_bio_actor even longer...


Fixed up, thanks.

(Speaking for Naohiro here)
diff mbox series

Patch

diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index c1aafb2ab990..e534703c5594 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -210,6 +210,7 @@  iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
 	struct bio *bio;
 	bool need_zeroout = false;
 	bool use_fua = false;
+	bool zone_append = false;
 	int nr_pages, ret = 0;
 	size_t copied = 0;
 	size_t orig_count;
@@ -241,6 +242,8 @@  iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
 			use_fua = true;
 	}
 
+	zone_append = iomap->flags & IOMAP_F_ZONE_APPEND;
+
 	/*
 	 * Save the original count and trim the iter to just the extent we
 	 * are operating on right now.  The iter will be re-expanded once
@@ -278,6 +281,19 @@  iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
 		bio->bi_private = dio;
 		bio->bi_end_io = iomap_dio_bio_end_io;
 
+		if (dio->flags & IOMAP_DIO_WRITE) {
+			bio->bi_opf = (zone_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE) |
+				      REQ_SYNC | REQ_IDLE;
+			if (use_fua)
+				bio->bi_opf |= REQ_FUA;
+			else
+				dio->flags &= ~IOMAP_DIO_WRITE_FUA;
+		} else {
+			WARN_ON_ONCE(zone_append);
+
+			bio->bi_opf = REQ_OP_READ;
+		}
+
 		ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
 		if (unlikely(ret)) {
 			/*
@@ -292,14 +308,8 @@  iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
 
 		n = bio->bi_iter.bi_size;
 		if (dio->flags & IOMAP_DIO_WRITE) {
-			bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
-			if (use_fua)
-				bio->bi_opf |= REQ_FUA;
-			else
-				dio->flags &= ~IOMAP_DIO_WRITE_FUA;
 			task_io_account_write(n);
 		} else {
-			bio->bi_opf = REQ_OP_READ;
 			if (dio->flags & IOMAP_DIO_DIRTY)
 				bio_set_pages_dirty(bio);
 		}
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 4d1d3c3469e9..1bccd1880d0d 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -54,6 +54,7 @@  struct vm_fault;
 #define IOMAP_F_SHARED		0x04
 #define IOMAP_F_MERGED		0x08
 #define IOMAP_F_BUFFER_HEAD	0x10
+#define IOMAP_F_ZONE_APPEND	0x20
 
 /*
  * Flags set by the core iomap code during operations: