diff mbox series

[4/7] blk-stats: Add left mean deviation to blk_stats

Message ID 243815abd0a89d660c56739172365556a8f94546.1556609582.git.asml.silence@gmail.com (mailing list archive)
State New, archived
Headers show
Series Adjust hybrid polling sleep time | expand

Commit Message

Pavel Begunkov April 30, 2019, 7:34 a.m. UTC
From: Pavel Begunkov <asml.silence@gmail.com>

The basic idea is to use the 3-sigma rule to guess adaptive polling
sleep time. Effective standard deviation calculation could easily
overflow u64, thus decided to use mean absolute deviation (MAD) as an
approximation. As only the left bound is needed, to increase accuracy
MAD is replaced by the left mean deviation (LMD).

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 block/blk-mq-debugfs.c    | 10 ++++++----
 block/blk-stat.c          | 21 +++++++++++++++++++--
 block/blk-stat.h          |  6 ++++++
 include/linux/blk_types.h |  3 +++
 4 files changed, 34 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index ec1d18cb643c..b62bd4468db3 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -27,12 +27,14 @@ 
 
 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
 {
-	if (stat->nr_samples) {
-		seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
-			   stat->nr_samples, stat->mean, stat->min, stat->max);
-	} else {
+	if (!stat->nr_samples) {
 		seq_puts(m, "samples=0");
+		return;
 	}
+
+	seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu, lmd=%llu",
+		   stat->nr_samples, stat->mean, stat->min, stat->max,
+		   stat->lmd);
 }
 
 static int queue_poll_stat_show(void *data, struct seq_file *m)
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 13f93249fd5f..e1915a4e41b9 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -17,14 +17,21 @@  struct blk_queue_stats {
 	bool enable_accounting;
 };
 
-void blk_rq_stat_init_staging(struct blk_rq_stat_staging *stat)
+void blk_rq_stat_reset(struct blk_rq_stat_staging *stat)
 {
 	stat->min = -1ULL;
 	stat->max = 0;
 	stat->batch = 0;
+	stat->lmd_batch = 0;
 	stat->nr_samples = 0;
 }
 
+void blk_rq_stat_init_staging(struct blk_rq_stat_staging *stat)
+{
+	blk_rq_stat_reset(stat);
+	stat->mean_last = 0;
+}
+
 void blk_rq_stat_init(struct blk_rq_stat *stat)
 {
 	stat->min = -1ULL;
@@ -42,8 +49,12 @@  void blk_rq_stat_collect(struct blk_rq_stat *dst,
 
 	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
 				dst->nr_samples + src->nr_samples);
+	dst->lmd = div_u64(src->lmd_batch + dst->lmd * dst->nr_samples,
+				dst->nr_samples + src->nr_samples);
 
 	dst->nr_samples += src->nr_samples;
+	/* pass mean back for lmd computation */
+	src->mean_last = dst->mean;
 }
 
 void blk_rq_stat_merge(struct blk_rq_stat *dst, struct blk_rq_stat *src)
@@ -57,6 +68,9 @@  void blk_rq_stat_merge(struct blk_rq_stat *dst, struct blk_rq_stat *src)
 	dst->mean = div_u64(src->mean * src->nr_samples +
 				dst->mean * dst->nr_samples,
 				dst->nr_samples + src->nr_samples);
+	dst->lmd = div_u64(src->lmd * src->nr_samples +
+				dst->lmd * dst->nr_samples,
+				dst->nr_samples + src->nr_samples);
 
 	dst->nr_samples += src->nr_samples;
 }
@@ -67,6 +81,9 @@  void blk_rq_stat_add(struct blk_rq_stat_staging *stat, u64 value)
 	stat->max = max(stat->max, value);
 	stat->batch += value;
 	stat->nr_samples++;
+
+	if (value < stat->mean_last)
+		stat->lmd_batch += stat->mean_last - value;
 }
 
 void blk_stat_add(struct request *rq, u64 now)
@@ -113,7 +130,7 @@  static void blk_stat_timer_fn(struct timer_list *t)
 		for (bucket = 0; bucket < cb->buckets; bucket++) {
 			blk_rq_stat_collect(&cb->stat[bucket],
 					    &cpu_stat[bucket]);
-			blk_rq_stat_init_staging(&cpu_stat[bucket]);
+			blk_rq_stat_reset(&cpu_stat[bucket]);
 		}
 	}
 
diff --git a/block/blk-stat.h b/block/blk-stat.h
index e5c753fbd6e6..ad81b2ce58bf 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -170,5 +170,11 @@  void blk_rq_stat_collect(struct blk_rq_stat *dst,
 void blk_rq_stat_merge(struct blk_rq_stat *dst, struct blk_rq_stat *src);
 void blk_rq_stat_init(struct blk_rq_stat *);
 void blk_rq_stat_init_staging(struct blk_rq_stat_staging *stat);
+/*
+ * Prepare stat to the next statistics round. Similar to
+ * blk_rq_stat_init_staging, but retains some information
+ * about the previous round (see last_mean).
+ */
+void blk_rq_stat_reset(struct blk_rq_stat_staging *stat);
 
 #endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5718a4e2e731..fe0ad7b2e6ca 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -445,13 +445,16 @@  struct blk_rq_stat {
 	u64 mean;
 	u64 min;
 	u64 max;
+	u64 lmd; /* left mean deviation */
 	u32 nr_samples;
 };
 
 struct blk_rq_stat_staging {
+	u64 mean_last;
 	u64 min;
 	u64 max;
 	u64 batch;
+	u64 lmd_batch;
 	u32 nr_samples;
 };