diff mbox

blk-mq-sched: don't run the queue async from blk_mq_try_issue_directly()

Message ID 3ac4f49e-3bc0-7fc6-e93a-db9ec7ad21cf@kernel.dk (mailing list archive)
State New, archived
Headers show

Commit Message

Jens Axboe March 14, 2017, 2:57 p.m. UTC
If we have scheduling enabled, we jump directly to insert-and-run.
That's fine, but we run the queue async and we don't pass in information
on whether we can block from this context or not. Fixup both these
cases.

Signed-off-by: Jens Axboe <axboe@fb.com>

Comments

Bart Van Assche March 14, 2017, 3:17 p.m. UTC | #1
On 03/14/2017 07:57 AM, Jens Axboe wrote:
> If we have scheduling enabled, we jump directly to insert-and-run.
> That's fine, but we run the queue async and we don't pass in information
> on whether we can block from this context or not. Fixup both these
> cases.

How about renaming "can_block" into "may_sleep"? Otherwise this patch
looks fine to me.

Bart.
Jens Axboe March 14, 2017, 3:26 p.m. UTC | #2
On 03/14/2017 09:17 AM, Bart Van Assche wrote:
> On 03/14/2017 07:57 AM, Jens Axboe wrote:
>> If we have scheduling enabled, we jump directly to insert-and-run.
>> That's fine, but we run the queue async and we don't pass in information
>> on whether we can block from this context or not. Fixup both these
>> cases.
> 
> How about renaming "can_block" into "may_sleep"? Otherwise this patch
> looks fine to me.

Sure, either one is fine with me, at least to me they convey the same
information.
Omar Sandoval March 14, 2017, 5:48 p.m. UTC | #3
On Tue, Mar 14, 2017 at 08:57:50AM -0600, Jens Axboe wrote:
> If we have scheduling enabled, we jump directly to insert-and-run.
> That's fine, but we run the queue async and we don't pass in information
> on whether we can block from this context or not. Fixup both these
> cases.

Reviewed-by: Omar Sandoval <osandov@fb.com>

Just one question: we call blk_mq_get_driver_tag() with wait=false in
blk_mq_try_issue_directly(). Should we change that to wait=can_block?
Maybe it's pointless to try a direct issue if we'd have to wait for a
tag anyways, though.

> Signed-off-by: Jens Axboe <axboe@fb.com>
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 159187a28d66..4196d6bee92d 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
>  	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
>  }
>  
> -static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
> +static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
> +				      bool can_block)
>  {
>  	struct request_queue *q = rq->q;
>  	struct blk_mq_queue_data bd = {
> @@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
>  	}
>  
>  insert:
> -	blk_mq_sched_insert_request(rq, false, true, true, false);
> +	blk_mq_sched_insert_request(rq, false, true, false, can_block);
>  }
>  
>  /*
> @@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
>  
>  		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
>  			rcu_read_lock();
> -			blk_mq_try_issue_directly(old_rq, &cookie);
> +			blk_mq_try_issue_directly(old_rq, &cookie, false);
>  			rcu_read_unlock();
>  		} else {
>  			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
> -			blk_mq_try_issue_directly(old_rq, &cookie);
> +			blk_mq_try_issue_directly(old_rq, &cookie, true);
>  			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
>  		}
>  		goto done;
> 
> -- 
> Jens Axboe
>
Jens Axboe March 14, 2017, 5:51 p.m. UTC | #4
On 03/14/2017 11:48 AM, Omar Sandoval wrote:
> On Tue, Mar 14, 2017 at 08:57:50AM -0600, Jens Axboe wrote:
>> If we have scheduling enabled, we jump directly to insert-and-run.
>> That's fine, but we run the queue async and we don't pass in information
>> on whether we can block from this context or not. Fixup both these
>> cases.
> 
> Reviewed-by: Omar Sandoval <osandov@fb.com>
> 
> Just one question: we call blk_mq_get_driver_tag() with wait=false in
> blk_mq_try_issue_directly(). Should we change that to wait=can_block?
> Maybe it's pointless to try a direct issue if we'd have to wait for a
> tag anyways, though.

Exactly, don't want to wait for a tag, at that point we are just
pointlessly stalling an app that could perhaps be submitting more IO.
So I don't think we should factor that in here, better to let the
blocking vs non-blocking drivers behave the same in that regard.
diff mbox

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 159187a28d66..4196d6bee92d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1434,7 +1434,8 @@  static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
 	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
+static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
+				      bool can_block)
 {
 	struct request_queue *q = rq->q;
 	struct blk_mq_queue_data bd = {
@@ -1475,7 +1476,7 @@  static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
 	}
 
 insert:
-	blk_mq_sched_insert_request(rq, false, true, true, false);
+	blk_mq_sched_insert_request(rq, false, true, false, can_block);
 }
 
 /*
@@ -1569,11 +1570,11 @@  static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
 			rcu_read_lock();
-			blk_mq_try_issue_directly(old_rq, &cookie);
+			blk_mq_try_issue_directly(old_rq, &cookie, false);
 			rcu_read_unlock();
 		} else {
 			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
-			blk_mq_try_issue_directly(old_rq, &cookie);
+			blk_mq_try_issue_directly(old_rq, &cookie, true);
 			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
 		}
 		goto done;