diff mbox series

block: account flush request in inflight sysfs files

Message ID 20200526020139.21464-1-houtao1@huawei.com (mailing list archive)
State New, archived
Headers show
Series block: account flush request in inflight sysfs files | expand

Commit Message

Hou Tao May 26, 2020, 2:01 a.m. UTC
So sysfs files which show the number of inflight IOs
(e.g. /sys/block/xxx/inflight) will account the flush
request. It's specially useful for debugging purpose
when the completion of flush request is slow, but
the containing request is fast.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 block/blk-flush.c |  1 +
 block/blk-mq.c    | 12 ++++++++++--
 2 files changed, 11 insertions(+), 2 deletions(-)

Comments

Hou Tao June 3, 2020, 7:36 a.m. UTC | #1
ping ?

On 2020/5/26 10:01, Hou Tao wrote:
> So sysfs files which show the number of inflight IOs
> (e.g. /sys/block/xxx/inflight) will account the flush
> request. It's specially useful for debugging purpose
> when the completion of flush request is slow, but
> the containing request is fast.
> 
> Signed-off-by: Hou Tao <houtao1@huawei.com>
> ---
>  block/blk-flush.c |  1 +
>  block/blk-mq.c    | 12 ++++++++++--
>  2 files changed, 11 insertions(+), 2 deletions(-)
> 
> diff --git a/block/blk-flush.c b/block/blk-flush.c
> index c7f396e3d5e2..14606f1e9273 100644
> --- a/block/blk-flush.c
> +++ b/block/blk-flush.c
> @@ -329,6 +329,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
>  	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
>  	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
>  	flush_rq->rq_disk = first_rq->rq_disk;
> +	flush_rq->part = first_rq->part;
>  	flush_rq->end_io = flush_end_io;
>  
>  	blk_flush_queue_rq(flush_rq, false);
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index a7785df2c944..1ed420a9a316 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -96,14 +96,22 @@ struct mq_inflight {
>  	unsigned int inflight[2];
>  };
>  
> +static inline int op_is_write_or_flush(unsigned int op)
> +{
> +	return op_is_write(op) || op == REQ_OP_FLUSH;
> +}
> +
>  static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
>  				  struct request *rq, void *priv,
>  				  bool reserved)
>  {
>  	struct mq_inflight *mi = priv;
>  
> -	if (rq->part == mi->part)
> -		mi->inflight[rq_data_dir(rq)]++;
> +	if (rq->part == mi->part) {
> +		int rw = op_is_write_or_flush(req_op(rq));
> +
> +		mi->inflight[rw]++;
> +	}
>  
>  	return true;
>  }
>
diff mbox series

Patch

diff --git a/block/blk-flush.c b/block/blk-flush.c
index c7f396e3d5e2..14606f1e9273 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -329,6 +329,7 @@  static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
 	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
 	flush_rq->rq_disk = first_rq->rq_disk;
+	flush_rq->part = first_rq->part;
 	flush_rq->end_io = flush_end_io;
 
 	blk_flush_queue_rq(flush_rq, false);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a7785df2c944..1ed420a9a316 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -96,14 +96,22 @@  struct mq_inflight {
 	unsigned int inflight[2];
 };
 
+static inline int op_is_write_or_flush(unsigned int op)
+{
+	return op_is_write(op) || op == REQ_OP_FLUSH;
+}
+
 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
 				  struct request *rq, void *priv,
 				  bool reserved)
 {
 	struct mq_inflight *mi = priv;
 
-	if (rq->part == mi->part)
-		mi->inflight[rq_data_dir(rq)]++;
+	if (rq->part == mi->part) {
+		int rw = op_is_write_or_flush(req_op(rq));
+
+		mi->inflight[rw]++;
+	}
 
 	return true;
 }