diff mbox series

[V6,1/8] blk-mq: assign rq->tag in blk_mq_get_driver_tag

Message ID 20200407092901.314228-2-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: improvement CPU hotplug | expand

Commit Message

Ming Lei April 7, 2020, 9:28 a.m. UTC
Especially for none elevator, rq->tag is assigned after the request is
allocated, so there isn't any way to figure out if one request is in
being dispatched. Also the code path wrt. driver tag becomes a bit
difference between none and io scheduler.

When one hctx becomes inactive, we have to prevent any request from
being dispatched to LLD. And get driver tag provides one perfect chance
to do that. Meantime we can drain any such requests by checking if
rq->tag is assigned.

So only assign rq->tag until blk_mq_get_driver_tag() is called.

This way also simplifies code of dealing with driver tag a lot.

Cc: Bart Van Assche <bvanassche@acm.org>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Garry <john.garry@huawei.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-flush.c | 18 +++---------------
 block/blk-mq.c    | 41 +++++++++++++++++++++--------------------
 block/blk-mq.h    | 21 ++++++++++-----------
 block/blk.h       |  5 -----
 4 files changed, 34 insertions(+), 51 deletions(-)

Comments

Christoph Hellwig April 7, 2020, 5:14 p.m. UTC | #1
On Tue, Apr 07, 2020 at 05:28:54PM +0800, Ming Lei wrote:
> @@ -472,14 +462,18 @@ static void __blk_mq_free_request(struct request *rq)
>  	struct request_queue *q = rq->q;
>  	struct blk_mq_ctx *ctx = rq->mq_ctx;
>  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
> -	const int sched_tag = rq->internal_tag;
> +	const int tag = rq->internal_tag;
> +	bool has_sched = !!hctx->sched_tags;
>  
>  	blk_pm_mark_last_busy(rq);
>  	rq->mq_hctx = NULL;
> +	if (!has_sched)
> +		blk_mq_put_tag(hctx->tags, ctx, tag);
> +	else if (rq->tag >= 0)
>  		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
> +
> +	if (has_sched)
> +		blk_mq_put_tag(hctx->sched_tags, ctx, tag);

This looks weird to me.  Why not simply:

	if (hctx->sched_tags) {
		if (rq->tag >= 0)
			blk_mq_put_tag(hctx->tags, ctx, rq->tag);
		blk_mq_put_tag(hctx->sched_tags, ctx, rq->internal_tag);
	} else {
		blk_mq_put_tag(hctx->tags, ctx, rq->internal_tag);
	}


> @@ -1037,14 +1031,21 @@ bool blk_mq_get_driver_tag(struct request *rq)

FYI, it seems like blk_mq_get_driver_tag can be marked static.

Otherwise this looks pretty sensible to me.
Ming Lei April 8, 2020, 1:38 a.m. UTC | #2
On Tue, Apr 07, 2020 at 07:14:05PM +0200, Christoph Hellwig wrote:
> On Tue, Apr 07, 2020 at 05:28:54PM +0800, Ming Lei wrote:
> > @@ -472,14 +462,18 @@ static void __blk_mq_free_request(struct request *rq)
> >  	struct request_queue *q = rq->q;
> >  	struct blk_mq_ctx *ctx = rq->mq_ctx;
> >  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
> > -	const int sched_tag = rq->internal_tag;
> > +	const int tag = rq->internal_tag;
> > +	bool has_sched = !!hctx->sched_tags;
> >  
> >  	blk_pm_mark_last_busy(rq);
> >  	rq->mq_hctx = NULL;
> > +	if (!has_sched)
> > +		blk_mq_put_tag(hctx->tags, ctx, tag);
> > +	else if (rq->tag >= 0)
> >  		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
> > +
> > +	if (has_sched)
> > +		blk_mq_put_tag(hctx->sched_tags, ctx, tag);
> 
> This looks weird to me.  Why not simply:
> 
> 	if (hctx->sched_tags) {
> 		if (rq->tag >= 0)
> 			blk_mq_put_tag(hctx->tags, ctx, rq->tag);
> 		blk_mq_put_tag(hctx->sched_tags, ctx, rq->internal_tag);
> 	} else {
> 		blk_mq_put_tag(hctx->tags, ctx, rq->internal_tag);
> 	}

Nice!

> 
> 
> > @@ -1037,14 +1031,21 @@ bool blk_mq_get_driver_tag(struct request *rq)
> 
> FYI, it seems like blk_mq_get_driver_tag can be marked static.
> 
> Otherwise this looks pretty sensible to me.

Indeed, just forgot to do that.


Thanks,
Ming
diff mbox series

Patch

diff --git a/block/blk-flush.c b/block/blk-flush.c
index 5cc775bdb06a..7b247c0470c0 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -239,13 +239,8 @@  static void flush_end_io(struct request *flush_rq, blk_status_t error)
 		error = fq->rq_status;
 
 	hctx = flush_rq->mq_hctx;
-	if (!q->elevator) {
-		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
-		flush_rq->tag = -1;
-	} else {
-		blk_mq_put_driver_tag(flush_rq);
-		flush_rq->internal_tag = -1;
-	}
+	flush_rq->internal_tag = -1;
+	blk_mq_put_driver_tag(flush_rq);
 
 	running = &fq->flush_queue[fq->flush_running_idx];
 	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
@@ -320,14 +315,7 @@  static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 	flush_rq->mq_ctx = first_rq->mq_ctx;
 	flush_rq->mq_hctx = first_rq->mq_hctx;
 
-	if (!q->elevator) {
-		fq->orig_rq = first_rq;
-		flush_rq->tag = first_rq->tag;
-		blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
-	} else {
-		flush_rq->internal_tag = first_rq->internal_tag;
-	}
-
+	flush_rq->internal_tag = first_rq->internal_tag;
 	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
 	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
 	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d92088dec6c3..f6f1ba3ff783 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -276,18 +276,8 @@  static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 	struct request *rq = tags->static_rqs[tag];
 	req_flags_t rq_flags = 0;
 
-	if (data->flags & BLK_MQ_REQ_INTERNAL) {
-		rq->tag = -1;
-		rq->internal_tag = tag;
-	} else {
-		if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
-			rq_flags = RQF_MQ_INFLIGHT;
-			atomic_inc(&data->hctx->nr_active);
-		}
-		rq->tag = tag;
-		rq->internal_tag = -1;
-		data->hctx->tags->rqs[rq->tag] = rq;
-	}
+	rq->internal_tag = tag;
+	rq->tag = -1;
 
 	/* csd/requeue_work/fifo_time is initialized before use */
 	rq->q = data->q;
@@ -472,14 +462,18 @@  static void __blk_mq_free_request(struct request *rq)
 	struct request_queue *q = rq->q;
 	struct blk_mq_ctx *ctx = rq->mq_ctx;
 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
-	const int sched_tag = rq->internal_tag;
+	const int tag = rq->internal_tag;
+	bool has_sched = !!hctx->sched_tags;
 
 	blk_pm_mark_last_busy(rq);
 	rq->mq_hctx = NULL;
-	if (rq->tag != -1)
+	if (!has_sched)
+		blk_mq_put_tag(hctx->tags, ctx, tag);
+	else if (rq->tag >= 0)
 		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
-	if (sched_tag != -1)
-		blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
+
+	if (has_sched)
+		blk_mq_put_tag(hctx->sched_tags, ctx, tag);
 	blk_mq_sched_restart(hctx);
 	blk_queue_exit(q);
 }
@@ -527,7 +521,7 @@  inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 		blk_stat_add(rq, now);
 	}
 
-	if (rq->internal_tag != -1)
+	if (rq->q->elevator && rq->internal_tag != -1)
 		blk_mq_sched_completed_request(rq, now);
 
 	blk_account_io_done(rq, now);
@@ -1037,14 +1031,21 @@  bool blk_mq_get_driver_tag(struct request *rq)
 	};
 	bool shared;
 
-	if (rq->tag != -1)
-		return true;
+	if (rq->tag >= 0)
+		goto allocated;
+
+	if (!rq->q->elevator) {
+		rq->tag = rq->internal_tag;
+		goto allocated;
+	}
 
 	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
 		data.flags |= BLK_MQ_REQ_RESERVED;
 
-	shared = blk_mq_tag_busy(data.hctx);
 	rq->tag = blk_mq_get_tag(&data);
+
+allocated:
+	shared = blk_mq_tag_busy(data.hctx);
 	if (rq->tag >= 0) {
 		if (shared) {
 			rq->rq_flags |= RQF_MQ_INFLIGHT;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 10bfdfb494fa..d25429a4932c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -197,26 +197,25 @@  static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
 	return true;
 }
 
-static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
-					   struct request *rq)
+static inline void blk_mq_put_driver_tag(struct request *rq)
 {
-	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
+	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+	int tag = rq->tag;
+
+	if (tag < 0)
+		return;
+
 	rq->tag = -1;
 
+	if (rq->q->elevator)
+		blk_mq_put_tag(hctx->tags, rq->mq_ctx, tag);
+
 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
 		atomic_dec(&hctx->nr_active);
 	}
 }
 
-static inline void blk_mq_put_driver_tag(struct request *rq)
-{
-	if (rq->tag == -1 || rq->internal_tag == -1)
-		return;
-
-	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
-}
-
 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
 {
 	int cpu;
diff --git a/block/blk.h b/block/blk.h
index 0b8884353f6b..c824d66f24e2 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -25,11 +25,6 @@  struct blk_flush_queue {
 	struct list_head	flush_data_in_flight;
 	struct request		*flush_rq;
 
-	/*
-	 * flush_rq shares tag with this rq, both can't be active
-	 * at the same time
-	 */
-	struct request		*orig_rq;
 	struct lock_class_key	key;
 	spinlock_t		mq_flush_lock;
 };