diff mbox series

[V2,2/2] block: track per requests type merged count

Message ID 20190918005454.6872-3-chaitanya.kulkarni@wdc.com (mailing list archive)
State New, archived
Headers show
Series block: track per requests type merged count | expand

Commit Message

Chaitanya Kulkarni Sept. 18, 2019, 12:54 a.m. UTC
With current debugfs block layer infrastructure, we only get the total
merge count which includes all the requests types, but we don't get
the per request type merge count.

This patch replaces the rq_merged variable into the rq_merged array
so that we can track the per request type merged stats.

Instead of having one number for all the requests which are merged,
with this patch we can get the detailed number of the merged requests
per request type which is mergeable.

This is helpful in the understanding merging of the requests under
different workloads and for the special requests such as discard which
implements request specific merging mechanism.

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
 block/blk-mq-debugfs.c | 12 ++++++++++--
 block/blk-mq-sched.c   |  2 +-
 block/blk-mq.h         |  2 +-
 3 files changed, 12 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index b3f2ba483992..6eb7ac9c6a02 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -679,8 +679,16 @@  static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
 static int ctx_merged_show(void *data, struct seq_file *m)
 {
 	struct blk_mq_ctx *ctx = data;
+	unsigned long *rm = ctx->rq_merged;
+	unsigned int i;
 
-	seq_printf(m, "%lu\n", ctx->rq_merged);
+	for (i = 0; i < REQ_OP_LAST; i++) {
+		const char *op_str = blk_op_str(i);
+
+		if (!rq_mergeable_op(i) || strcmp(op_str, "UNKNOWN") == 0)
+			continue;
+		seq_printf(m, "%-20s    %8lu\n", op_str, rm[i]);
+	}
 	return 0;
 }
 
@@ -689,7 +697,7 @@  static ssize_t ctx_merged_write(void *data, const char __user *buf,
 {
 	struct blk_mq_ctx *ctx = data;
 
-	ctx->rq_merged = 0;
+	memset(ctx->rq_merged, 0, sizeof(ctx->rq_merged));
 	return count;
 }
 
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c9d183d6c499..664f8a056e96 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -314,7 +314,7 @@  static bool blk_mq_attempt_merge(struct request_queue *q,
 	lockdep_assert_held(&ctx->lock);
 
 	if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
-		ctx->rq_merged++;
+		ctx->rq_merged[bio_op(bio)]++;
 		return true;
 	}
 
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 32c62c64e6c2..d485dde6e090 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -27,7 +27,7 @@  struct blk_mq_ctx {
 
 	/* incremented at dispatch time */
 	unsigned long		rq_dispatched[2];
-	unsigned long		rq_merged;
+	unsigned long		rq_merged[REQ_OP_LAST];
 
 	/* incremented at completion time */
 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];