diff mbox

[4/4] blk-mq: streamline blk_mq_make_request

Message ID 20170313154833.14165-5-hch@lst.de (mailing list archive)
State New, archived
Headers show

Commit Message

Christoph Hellwig March 13, 2017, 3:48 p.m. UTC
Turn the different ways of merging or issuing I/O into a series of if/else
statements instead of the current maze of gotos.  Note that this means we
pin the CPU a little longer for some cases as the CTX put is moved to
common code at the end of the function.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq.c | 67 +++++++++++++++++++++++-----------------------------------
 1 file changed, 27 insertions(+), 40 deletions(-)

Comments

Bart Van Assche March 14, 2017, 3:40 p.m. UTC | #1
On Mon, 2017-03-13 at 09:48 -0600, Christoph Hellwig wrote:
> Turn the different ways of merging or issuing I/O into a series of if/else
> statements instead of the current maze of gotos.  Note that this means we
> pin the CPU a little longer for some cases as the CTX put is moved to
> common code at the end of the function.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  block/blk-mq.c | 67 +++++++++++++++++++++++-----------------------------------
>  1 file changed, 27 insertions(+), 40 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 48748cb799ed..18e449cc832f 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -1534,16 +1534,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
>  
>  	cookie = request_to_qc_t(data.hctx, rq);
>  
> +	plug = current->plug;
>  	if (unlikely(is_flush_fua)) {
> -		if (q->elevator)
> -			goto elv_insert;
>  		blk_mq_bio_to_request(rq, bio);
> -		blk_insert_flush(rq);
> -		goto run_queue;
> -	}
> -
> -	plug = current->plug;
> -	if (plug && q->nr_hw_queues == 1) {
> +		if (q->elevator) {
> +			blk_mq_sched_insert_request(rq, false, true,
> +						!is_sync || is_flush_fua, true);
> +		} else {
> +			blk_insert_flush(rq);
> +			blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
> +		}
> +	} else if (plug && q->nr_hw_queues == 1) {
>  		struct request *last = NULL;
>  
>  		blk_mq_bio_to_request(rq, bio);

This change introduces the following construct:

if (is_flush_fua) {
	/* use is_flush_fua */
} else ...

Have you considered to simplify the code that uses is_flush_fua further?

> @@ -1562,8 +1563,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
>  		else
>  			last = list_entry_rq(plug->mq_list.prev);
>  
> -		blk_mq_put_ctx(data.ctx);
> -
>  		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
>  		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
>  			blk_flush_plug_list(plug, false);
> @@ -1571,56 +1570,44 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
>  		}
>  
>  		list_add_tail(&rq->queuelist, &plug->mq_list);
> -		goto done;
> -	} else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
> -		struct request *old_rq = NULL;
> -
> +	} else if (plug && !blk_queue_nomerges(q)) {
>  		blk_mq_bio_to_request(rq, bio);
>  
>  		/*
>  		 * We do limited plugging. If the bio can be merged, do that.
>  		 * Otherwise the existing request in the plug list will be
>  		 * issued. So the plug list will have one request at most
> +		 *
> +		 * The plug list might get flushed before this. If that happens,
> +		 * the plug list is emptry and same_queue_rq is invalid.
>  		 */

"emptry" looks like a typo?

> -		if (plug) {
> -			/*
> -			 * The plug list might get flushed before this. If that
> -			 * happens, same_queue_rq is invalid and plug list is
> -			 * empty
> -			 */
> -			if (same_queue_rq && !list_empty(&plug->mq_list)) {
> -				old_rq = same_queue_rq;
> -				list_del_init(&old_rq->queuelist);
> -			}
> -			list_add_tail(&rq->queuelist, &plug->mq_list);
> -		} else /* is_sync */
> -			old_rq = rq;
> -		blk_mq_put_ctx(data.ctx);
> -		if (old_rq)
> -			blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
> -		goto done;
> -	}
> +		if (!list_empty(&plug->mq_list))
> +			list_del_init(&same_queue_rq->queuelist);
> +		else
> +			same_queue_rq = NULL;
>  
> -	if (q->elevator) {
> -elv_insert:
> -		blk_mq_put_ctx(data.ctx);
> +		list_add_tail(&rq->queuelist, &plug->mq_list);
> +		if (same_queue_rq)
> +			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
> +					&cookie);
> +	} else if (is_sync) {
> +		blk_mq_bio_to_request(rq, bio);
> +		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
> +	} else if (q->elevator) {
>  		blk_mq_bio_to_request(rq, bio);
>  		blk_mq_sched_insert_request(rq, false, true,
>  						!is_sync || is_flush_fua, true);
> -		goto done;
> -	}
> -	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
> +	} else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
>  		/*
>  		 * For a SYNC request, send it to the hardware immediately. For
>  		 * an ASYNC request, just ensure that we run it later on. The
>  		 * latter allows for merging opportunities and more efficient
>  		 * dispatching.
>  		 */
> -run_queue:
>  		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
>  	}

Since this code occurs in the else branch of an if (is_flush_fua) statement,
can "|| is_flush_fua" be left out?

What I noticed while reviewing this patch is that there are multiple changes in
this patch: not only the goto statements have been eliminated but the old_rq
variable has been eliminated too. I think this patch would be easier to review
if it would be split in three patches: one that removes the old_rq variable,
one that eliminates the goto statements and one that removes dead code.

Thanks,

Bart.
diff mbox

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 48748cb799ed..18e449cc832f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1534,16 +1534,17 @@  static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
 	cookie = request_to_qc_t(data.hctx, rq);
 
+	plug = current->plug;
 	if (unlikely(is_flush_fua)) {
-		if (q->elevator)
-			goto elv_insert;
 		blk_mq_bio_to_request(rq, bio);
-		blk_insert_flush(rq);
-		goto run_queue;
-	}
-
-	plug = current->plug;
-	if (plug && q->nr_hw_queues == 1) {
+		if (q->elevator) {
+			blk_mq_sched_insert_request(rq, false, true,
+						!is_sync || is_flush_fua, true);
+		} else {
+			blk_insert_flush(rq);
+			blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
+		}
+	} else if (plug && q->nr_hw_queues == 1) {
 		struct request *last = NULL;
 
 		blk_mq_bio_to_request(rq, bio);
@@ -1562,8 +1563,6 @@  static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 		else
 			last = list_entry_rq(plug->mq_list.prev);
 
-		blk_mq_put_ctx(data.ctx);
-
 		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
 		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
 			blk_flush_plug_list(plug, false);
@@ -1571,56 +1570,44 @@  static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 		}
 
 		list_add_tail(&rq->queuelist, &plug->mq_list);
-		goto done;
-	} else if (((plug && !blk_queue_nomerges(q)) || is_sync)) {
-		struct request *old_rq = NULL;
-
+	} else if (plug && !blk_queue_nomerges(q)) {
 		blk_mq_bio_to_request(rq, bio);
 
 		/*
 		 * We do limited plugging. If the bio can be merged, do that.
 		 * Otherwise the existing request in the plug list will be
 		 * issued. So the plug list will have one request at most
+		 *
+		 * The plug list might get flushed before this. If that happens,
+		 * the plug list is emptry and same_queue_rq is invalid.
 		 */
-		if (plug) {
-			/*
-			 * The plug list might get flushed before this. If that
-			 * happens, same_queue_rq is invalid and plug list is
-			 * empty
-			 */
-			if (same_queue_rq && !list_empty(&plug->mq_list)) {
-				old_rq = same_queue_rq;
-				list_del_init(&old_rq->queuelist);
-			}
-			list_add_tail(&rq->queuelist, &plug->mq_list);
-		} else /* is_sync */
-			old_rq = rq;
-		blk_mq_put_ctx(data.ctx);
-		if (old_rq)
-			blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
-		goto done;
-	}
+		if (!list_empty(&plug->mq_list))
+			list_del_init(&same_queue_rq->queuelist);
+		else
+			same_queue_rq = NULL;
 
-	if (q->elevator) {
-elv_insert:
-		blk_mq_put_ctx(data.ctx);
+		list_add_tail(&rq->queuelist, &plug->mq_list);
+		if (same_queue_rq)
+			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
+					&cookie);
+	} else if (is_sync) {
+		blk_mq_bio_to_request(rq, bio);
+		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+	} else if (q->elevator) {
 		blk_mq_bio_to_request(rq, bio);
 		blk_mq_sched_insert_request(rq, false, true,
 						!is_sync || is_flush_fua, true);
-		goto done;
-	}
-	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+	} else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
 		/*
 		 * For a SYNC request, send it to the hardware immediately. For
 		 * an ASYNC request, just ensure that we run it later on. The
 		 * latter allows for merging opportunities and more efficient
 		 * dispatching.
 		 */
-run_queue:
 		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
 	}
+
 	blk_mq_put_ctx(data.ctx);
-done:
 	return cookie;
 }