diff mbox series

[4/6] blk-mq: open code __blk_mq_alloc_request in blk_mq_alloc_request_hctx

Message ID 20200520170635.2094101-5-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [1/6] blk-mq: remove the bio argument to ->prepare_request | expand

Commit Message

Christoph Hellwig May 20, 2020, 5:06 p.m. UTC
blk_mq_alloc_request_hctx is only used for NVMeoF connect commands, so
tailor it to the specific requirements, and don't both the general
fast path code with its special twinkles.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 44 +++++++++++++++++++++++---------------------
 1 file changed, 23 insertions(+), 21 deletions(-)

Comments

Hannes Reinecke May 22, 2020, 9:17 a.m. UTC | #1
On 5/20/20 7:06 PM, Christoph Hellwig wrote:
> blk_mq_alloc_request_hctx is only used for NVMeoF connect commands, so
> tailor it to the specific requirements, and don't both the general

bother?

> fast path code with its special twinkles.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-mq.c | 44 +++++++++++++++++++++++---------------------
>   1 file changed, 23 insertions(+), 21 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 1ffbc5d9e7cfe..42aee2978464b 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -351,21 +351,13 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
>   {
>   	struct request_queue *q = data->q;
>   	struct elevator_queue *e = q->elevator;
> -	unsigned int tag;
> -	bool clear_ctx_on_error = false;
>   	u64 alloc_time_ns = 0;
> +	unsigned int tag;
>   
>   	/* alloc_time includes depth and tag waits */
>   	if (blk_queue_rq_alloc_time(q))
>   		alloc_time_ns = ktime_get_ns();
>   
> -	if (likely(!data->ctx)) {
> -		data->ctx = blk_mq_get_ctx(q);
> -		clear_ctx_on_error = true;
> -	}
> -	if (likely(!data->hctx))
> -		data->hctx = blk_mq_map_queue(q, data->cmd_flags,
> -						data->ctx);
>   	if (data->cmd_flags & REQ_NOWAIT)
>   		data->flags |= BLK_MQ_REQ_NOWAIT;
>   
> @@ -381,17 +373,16 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
>   		    e->type->ops.limit_depth &&
>   		    !(data->flags & BLK_MQ_REQ_RESERVED))
>   			e->type->ops.limit_depth(data->cmd_flags, data);
> -	} else {
> -		blk_mq_tag_busy(data->hctx);
>   	}
>   
> +	data->ctx = blk_mq_get_ctx(q);
> +	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
> +	if (!(data->flags & BLK_MQ_REQ_INTERNAL))
> +		blk_mq_tag_busy(data->hctx);
> +
>   	tag = blk_mq_get_tag(data);
> -	if (tag == BLK_MQ_TAG_FAIL) {
> -		if (clear_ctx_on_error)
> -			data->ctx = NULL;
> +	if (tag == BLK_MQ_TAG_FAIL)
>   		return NULL;
> -	}
> -
>   	return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
>   }
>   
> @@ -431,17 +422,22 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
>   		.flags		= flags,
>   		.cmd_flags	= op,
>   	};
> -	struct request *rq;
> +	u64 alloc_time_ns = 0;
>   	unsigned int cpu;
> +	unsigned int tag;
>   	int ret;
>   
> +	/* alloc_time includes depth and tag waits */
> +	if (blk_queue_rq_alloc_time(q))
> +		alloc_time_ns = ktime_get_ns();
> +
>   	/*
>   	 * If the tag allocator sleeps we could get an allocation for a
>   	 * different hardware context.  No need to complicate the low level
>   	 * allocator for this for the rare use case of a command tied to
>   	 * a specific queue.
>   	 */
> -	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
> +	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
>   		return ERR_PTR(-EINVAL);
>   
>   	if (hctx_idx >= q->nr_hw_queues)
> @@ -462,11 +458,17 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
>   	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
>   	data.ctx = __blk_mq_get_ctx(q, cpu);
>   
> +	if (q->elevator)
> +		data.flags |= BLK_MQ_REQ_INTERNAL;
> +	else
> +		blk_mq_tag_busy(data.hctx);
> +
>   	ret = -EWOULDBLOCK;
> -	rq = __blk_mq_alloc_request(&data);
> -	if (!rq)
> +	tag = blk_mq_get_tag(&data);
> +	if (tag == BLK_MQ_TAG_FAIL)
>   		goto out_queue_exit;
> -	return rq;
> +	return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
> +
>   out_queue_exit:
>   	blk_queue_exit(q);
>   	return ERR_PTR(ret);
> 
Other than that:

Reviewed-by: Hannes Reinecke <hare@suse.de

Cheers,

Hannes
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1ffbc5d9e7cfe..42aee2978464b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -351,21 +351,13 @@  static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
 {
 	struct request_queue *q = data->q;
 	struct elevator_queue *e = q->elevator;
-	unsigned int tag;
-	bool clear_ctx_on_error = false;
 	u64 alloc_time_ns = 0;
+	unsigned int tag;
 
 	/* alloc_time includes depth and tag waits */
 	if (blk_queue_rq_alloc_time(q))
 		alloc_time_ns = ktime_get_ns();
 
-	if (likely(!data->ctx)) {
-		data->ctx = blk_mq_get_ctx(q);
-		clear_ctx_on_error = true;
-	}
-	if (likely(!data->hctx))
-		data->hctx = blk_mq_map_queue(q, data->cmd_flags,
-						data->ctx);
 	if (data->cmd_flags & REQ_NOWAIT)
 		data->flags |= BLK_MQ_REQ_NOWAIT;
 
@@ -381,17 +373,16 @@  static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
 		    e->type->ops.limit_depth &&
 		    !(data->flags & BLK_MQ_REQ_RESERVED))
 			e->type->ops.limit_depth(data->cmd_flags, data);
-	} else {
-		blk_mq_tag_busy(data->hctx);
 	}
 
+	data->ctx = blk_mq_get_ctx(q);
+	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+	if (!(data->flags & BLK_MQ_REQ_INTERNAL))
+		blk_mq_tag_busy(data->hctx);
+
 	tag = blk_mq_get_tag(data);
-	if (tag == BLK_MQ_TAG_FAIL) {
-		if (clear_ctx_on_error)
-			data->ctx = NULL;
+	if (tag == BLK_MQ_TAG_FAIL)
 		return NULL;
-	}
-
 	return blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
 }
 
@@ -431,17 +422,22 @@  struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 		.flags		= flags,
 		.cmd_flags	= op,
 	};
-	struct request *rq;
+	u64 alloc_time_ns = 0;
 	unsigned int cpu;
+	unsigned int tag;
 	int ret;
 
+	/* alloc_time includes depth and tag waits */
+	if (blk_queue_rq_alloc_time(q))
+		alloc_time_ns = ktime_get_ns();
+
 	/*
 	 * If the tag allocator sleeps we could get an allocation for a
 	 * different hardware context.  No need to complicate the low level
 	 * allocator for this for the rare use case of a command tied to
 	 * a specific queue.
 	 */
-	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
+	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
 		return ERR_PTR(-EINVAL);
 
 	if (hctx_idx >= q->nr_hw_queues)
@@ -462,11 +458,17 @@  struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
 	data.ctx = __blk_mq_get_ctx(q, cpu);
 
+	if (q->elevator)
+		data.flags |= BLK_MQ_REQ_INTERNAL;
+	else
+		blk_mq_tag_busy(data.hctx);
+
 	ret = -EWOULDBLOCK;
-	rq = __blk_mq_alloc_request(&data);
-	if (!rq)
+	tag = blk_mq_get_tag(&data);
+	if (tag == BLK_MQ_TAG_FAIL)
 		goto out_queue_exit;
-	return rq;
+	return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns);
+
 out_queue_exit:
 	blk_queue_exit(q);
 	return ERR_PTR(ret);