diff mbox series

[V9,5/9] nvmet: add bio get helper for different backends

Message ID 20210112042623.6316-6-chaitanya.kulkarni@wdc.com (mailing list archive)
State New, archived
Headers show
Series nvmet: add ZBD backend support | expand

Commit Message

Chaitanya Kulkarni Jan. 12, 2021, 4:26 a.m. UTC
With the addition of the zns backend now we have three different
backends with inline bio optimization. That leads to having duplicate
code for allocating or initializing the bio in all three backends:
generic bdev, passsthru, and generic zns.

Add a helper function to reduce the duplicate code such that helper
function accepts the bi_end_io callback which gets initialize for the
non-inline bio_alloc() case. This is due to the special case needed for
the passthru backend non-inline bio allocation bio_alloc() where we set
the bio->bi_end_io = bio_put, having this parameter avoids the extra
branch in the passthru fast path. For rest of the backends, we set the
same bi_end_io callback for inline and non-inline cases, that is for
generic bdev we set to nvmet_bio_done() and for generic zns we set to
NULL.                            

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
 drivers/nvme/target/io-cmd-bdev.c |  7 +------
 drivers/nvme/target/nvmet.h       | 16 ++++++++++++++++
 drivers/nvme/target/passthru.c    |  8 +-------
 drivers/nvme/target/zns.c         |  8 +-------
 4 files changed, 19 insertions(+), 20 deletions(-)

Comments

Damien Le Moal Jan. 12, 2021, 5:37 a.m. UTC | #1
On 2021/01/12 13:27, Chaitanya Kulkarni wrote:
> With the addition of the zns backend now we have three different
> backends with inline bio optimization. That leads to having duplicate
> code for allocating or initializing the bio in all three backends:
> generic bdev, passsthru, and generic zns.
> 
> Add a helper function to reduce the duplicate code such that helper
> function accepts the bi_end_io callback which gets initialize for the
> non-inline bio_alloc() case. This is due to the special case needed for
> the passthru backend non-inline bio allocation bio_alloc() where we set
> the bio->bi_end_io = bio_put, having this parameter avoids the extra
> branch in the passthru fast path. For rest of the backends, we set the
> same bi_end_io callback for inline and non-inline cases, that is for
> generic bdev we set to nvmet_bio_done() and for generic zns we set to
> NULL.                            
> 
> Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
> ---
>  drivers/nvme/target/io-cmd-bdev.c |  7 +------
>  drivers/nvme/target/nvmet.h       | 16 ++++++++++++++++
>  drivers/nvme/target/passthru.c    |  8 +-------
>  drivers/nvme/target/zns.c         |  8 +-------
>  4 files changed, 19 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
> index 6178ef643962..72746e29cb0d 100644
> --- a/drivers/nvme/target/io-cmd-bdev.c
> +++ b/drivers/nvme/target/io-cmd-bdev.c
> @@ -266,12 +266,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
>  
>  	sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
>  
> -	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
> -		bio = &req->b.inline_bio;
> -		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
> -	} else {
> -		bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
> -	}
> +	bio = nvmet_req_bio_get(req, NULL);
>  	bio_set_dev(bio, req->ns->bdev);
>  	bio->bi_iter.bi_sector = sector;
>  	bio->bi_private = req;
> diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
> index 7361665585a2..3fc84f79cce1 100644
> --- a/drivers/nvme/target/nvmet.h
> +++ b/drivers/nvme/target/nvmet.h
> @@ -652,4 +652,20 @@ nvmet_bdev_execute_zone_append(struct nvmet_req *req)
>  }
>  #endif /* CONFIG_BLK_DEV_ZONED */
>  
> +static inline struct bio *nvmet_req_bio_get(struct nvmet_req *req,
> +					    bio_end_io_t *bi_end_io)
> +{
> +	struct bio *bio;
> +
> +	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
> +		bio = &req->b.inline_bio;
> +		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
> +		return bio;
> +	}
> +
> +	bio = bio_alloc(GFP_KERNEL, req->sg_cnt);

I have a doubt about the use of GFP_KERNEL here... Shouldn't these be GFP_NOIO ?
The code was like this so it is may be OK, but without GFP_NOIO, is forward
progress guaranteed ? No recursion possible ?

> +	bio->bi_end_io = bi_end_io;
> +	return bio;
> +}
> +
>  #endif /* _NVMET_H */
> diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
> index b9776fc8f08f..54f765b566ee 100644
> --- a/drivers/nvme/target/passthru.c
> +++ b/drivers/nvme/target/passthru.c
> @@ -194,13 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
>  	if (req->sg_cnt > BIO_MAX_PAGES)
>  		return -EINVAL;
>  
> -	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
> -		bio = &req->p.inline_bio;
> -		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
> -	} else {
> -		bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES));
> -		bio->bi_end_io = bio_put;
> -	}
> +	bio = nvmet_req_bio_get(req, bio_put);
>  	bio->bi_opf = req_op(rq);
>  
>  	for_each_sg(req->sg, sg, req->sg_cnt, i) {
> diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
> index 2a71f56e568d..c32e93a3c7e1 100644
> --- a/drivers/nvme/target/zns.c
> +++ b/drivers/nvme/target/zns.c
> @@ -296,13 +296,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
>  		return;
>  	}
>  
> -	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
> -		bio = &req->b.inline_bio;
> -		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
> -	} else {
> -		bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
> -	}
> -
> +	bio = nvmet_req_bio_get(req, NULL);
>  	bio_set_dev(bio, req->ns->bdev);
>  	bio->bi_iter.bi_sector = sect;
>  	bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
>
Chaitanya Kulkarni Jan. 12, 2021, 5:55 a.m. UTC | #2
On 1/11/21 21:37, Damien Le Moal wrote:
>> diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
>> index 7361665585a2..3fc84f79cce1 100644
>> --- a/drivers/nvme/target/nvmet.h
>> +++ b/drivers/nvme/target/nvmet.h
>> @@ -652,4 +652,20 @@ nvmet_bdev_execute_zone_append(struct nvmet_req *req)
>>  }
>>  #endif /* CONFIG_BLK_DEV_ZONED */
>>  
>> +static inline struct bio *nvmet_req_bio_get(struct nvmet_req *req,
>> +					    bio_end_io_t *bi_end_io)
>> +{
>> +	struct bio *bio;
>> +
>> +	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
>> +		bio = &req->b.inline_bio;
>> +		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
>> +		return bio;
>> +	}
>> +
>> +	bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
> I have a doubt about the use of GFP_KERNEL here... Shouldn't these be GFP_NOIO ?
> The code was like this so it is may be OK, but without GFP_NOIO, is forward
> progress guaranteed ? No recursion possible ?
>
I've kept the original behavior, let check if this needs to be
GFP_KERNEL or not,
if so I'll send a separate patch with a proper message.

Thanks for pointing this out.
Christoph Hellwig Jan. 12, 2021, 7:33 a.m. UTC | #3
I'm not a huge fan of this helper, especially as it sets an end_io
callback only for the allocated case, which is a weird calling
convention.
Chaitanya Kulkarni Jan. 13, 2021, 5:03 a.m. UTC | #4
On 1/11/21 23:33, Christoph Hellwig wrote:
> I'm not a huge fan of this helper, especially as it sets an end_io
> callback only for the allocated case, which is a weird calling
> convention.
>

The patch has a right documentation and that end_io is needed
for passthru case. To get rid of the weirdness I can remove
passthru case and make itend_io assign to inline and non-inline bio
to make it non-weired.

Since this eliminates exactly identical lines of the code in atleast two
backend which we should try to use the helper in non-wried way.
Christoph Hellwig Jan. 18, 2021, 6:28 p.m. UTC | #5
On Wed, Jan 13, 2021 at 05:03:05AM +0000, Chaitanya Kulkarni wrote:
> On 1/11/21 23:33, Christoph Hellwig wrote:
> > I'm not a huge fan of this helper, especially as it sets an end_io
> > callback only for the allocated case, which is a weird calling
> > convention.
> >
> 
> The patch has a right documentation and that end_io is needed
> for passthru case. To get rid of the weirdness I can remove
> passthru case and make itend_io assign to inline and non-inline bio
> to make it non-weired.
> 
> Since this eliminates exactly identical lines of the code in atleast two
> backend which we should try to use the helper in non-wried way.

I really do not like helper that just eliminate "duplicate lines" vs
encapsulating logic that makes sense one its own.
Chaitanya Kulkarni Jan. 19, 2021, 4:57 a.m. UTC | #6
On 1/18/21 10:28 AM, Christoph Hellwig wrote:
> On Wed, Jan 13, 2021 at 05:03:05AM +0000, Chaitanya Kulkarni wrote:
>> On 1/11/21 23:33, Christoph Hellwig wrote:
>>> I'm not a huge fan of this helper, especially as it sets an end_io
>>> callback only for the allocated case, which is a weird calling
>>> convention.
>>>
>> The patch has a right documentation and that end_io is needed
>> for passthru case. To get rid of the weirdness I can remove
>> passthru case and make itend_io assign to inline and non-inline bio
>> to make it non-weired.
>>
>> Since this eliminates exactly identical lines of the code in atleast two
>> backend which we should try to use the helper in non-wried way.
> I really do not like helper that just eliminate "duplicate lines" vs
> encapsulating logic that makes sense one its own.
>
Okay, I'll drop it, next version.
diff mbox series

Patch

diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 6178ef643962..72746e29cb0d 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -266,12 +266,7 @@  static void nvmet_bdev_execute_rw(struct nvmet_req *req)
 
 	sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
 
-	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
-		bio = &req->b.inline_bio;
-		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
-	} else {
-		bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
-	}
+	bio = nvmet_req_bio_get(req, NULL);
 	bio_set_dev(bio, req->ns->bdev);
 	bio->bi_iter.bi_sector = sector;
 	bio->bi_private = req;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7361665585a2..3fc84f79cce1 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -652,4 +652,20 @@  nvmet_bdev_execute_zone_append(struct nvmet_req *req)
 }
 #endif /* CONFIG_BLK_DEV_ZONED */
 
+static inline struct bio *nvmet_req_bio_get(struct nvmet_req *req,
+					    bio_end_io_t *bi_end_io)
+{
+	struct bio *bio;
+
+	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+		bio = &req->b.inline_bio;
+		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+		return bio;
+	}
+
+	bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
+	bio->bi_end_io = bi_end_io;
+	return bio;
+}
+
 #endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b9776fc8f08f..54f765b566ee 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -194,13 +194,7 @@  static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
 	if (req->sg_cnt > BIO_MAX_PAGES)
 		return -EINVAL;
 
-	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
-		bio = &req->p.inline_bio;
-		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
-	} else {
-		bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES));
-		bio->bi_end_io = bio_put;
-	}
+	bio = nvmet_req_bio_get(req, bio_put);
 	bio->bi_opf = req_op(rq);
 
 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 2a71f56e568d..c32e93a3c7e1 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -296,13 +296,7 @@  void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
 		return;
 	}
 
-	if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
-		bio = &req->b.inline_bio;
-		bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
-	} else {
-		bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
-	}
-
+	bio = nvmet_req_bio_get(req, NULL);
 	bio_set_dev(bio, req->ns->bdev);
 	bio->bi_iter.bi_sector = sect;
 	bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;