diff mbox series

[v2,2/4] ceph: track average/stdev r/w/m latency

Message ID 20210914084902.1618064-3-vshankar@redhat.com (mailing list archive)
State New, archived
Headers show
Series ceph: forward average read/write/metadata latency | expand

Commit Message

Venky Shankar Sept. 14, 2021, 8:49 a.m. UTC
The math involved in tracking average and standard deviation
for r/w/m latencies looks incorrect. Fix that up. Also, change
the variable name that tracks standard deviation (*_sq_sum) to
*_stdev.

Signed-off-by: Venky Shankar <vshankar@redhat.com>
---
 fs/ceph/debugfs.c | 14 +++++-----
 fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
 fs/ceph/metric.h  |  9 ++++--
 3 files changed, 45 insertions(+), 48 deletions(-)

Comments

Xiubo Li Sept. 14, 2021, 12:52 p.m. UTC | #1
On 9/14/21 4:49 PM, Venky Shankar wrote:
> The math involved in tracking average and standard deviation
> for r/w/m latencies looks incorrect. Fix that up. Also, change
> the variable name that tracks standard deviation (*_sq_sum) to
> *_stdev.
>
> Signed-off-by: Venky Shankar <vshankar@redhat.com>
> ---
>   fs/ceph/debugfs.c | 14 +++++-----
>   fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
>   fs/ceph/metric.h  |  9 ++++--
>   3 files changed, 45 insertions(+), 48 deletions(-)
>
> diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> index 38b78b45811f..3abfa7ae8220 100644
> --- a/fs/ceph/debugfs.c
> +++ b/fs/ceph/debugfs.c
> @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
>   	struct ceph_mds_client *mdsc = fsc->mdsc;
>   	struct ceph_client_metric *m = &mdsc->metric;
>   	int nr_caps = 0;
> -	s64 total, sum, avg, min, max, sq;
> +	s64 total, sum, avg, min, max, stdev;
>   	u64 sum_sz, avg_sz, min_sz, max_sz;
>   
>   	sum = percpu_counter_sum(&m->total_inodes);
> @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->read_latency_min;
>   	max = m->read_latency_max;
> -	sq = m->read_latency_sq_sum;
> +	stdev = m->read_latency_stdev;
>   	spin_unlock(&m->read_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
>   
>   	spin_lock(&m->write_metric_lock);
>   	total = m->total_writes;
> @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->write_latency_min;
>   	max = m->write_latency_max;
> -	sq = m->write_latency_sq_sum;
> +	stdev = m->write_latency_stdev;
>   	spin_unlock(&m->write_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);

Hi Venky,

Sorry I missed you V1 patch set.

Previously the "sq_sum" just counting the square sum and only when 
showing them in the debug file will it to compute the stdev in 
CEPH_LAT_METRIC_SHOW().

So with this patch I think you also need to fix the 
CEPH_LAT_METRIC_SHOW(), no need to compute it twice ?

Thanks.

>   
>   	spin_lock(&m->metadata_metric_lock);
>   	total = m->total_metadatas;
> @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->metadata_latency_min;
>   	max = m->metadata_latency_max;
> -	sq = m->metadata_latency_sq_sum;
> +	stdev = m->metadata_latency_stdev;
>   	spin_unlock(&m->metadata_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
>   
>   	seq_printf(s, "\n");
>   	seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
> diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> index 226dc38e2909..6b774b1a88ce 100644
> --- a/fs/ceph/metric.c
> +++ b/fs/ceph/metric.c
> @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   		goto err_i_caps_mis;
>   
>   	spin_lock_init(&m->read_metric_lock);
> -	m->read_latency_sq_sum = 0;
> +	m->read_latency_stdev = 0;
> +	m->avg_read_latency = 0;
>   	m->read_latency_min = KTIME_MAX;
>   	m->read_latency_max = 0;
>   	m->total_reads = 0;
> @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   	m->read_size_sum = 0;
>   
>   	spin_lock_init(&m->write_metric_lock);
> -	m->write_latency_sq_sum = 0;
> +	m->write_latency_stdev = 0;
> +	m->avg_write_latency = 0;
>   	m->write_latency_min = KTIME_MAX;
>   	m->write_latency_max = 0;
>   	m->total_writes = 0;
> @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   	m->write_size_sum = 0;
>   
>   	spin_lock_init(&m->metadata_metric_lock);
> -	m->metadata_latency_sq_sum = 0;
> +	m->metadata_latency_stdev = 0;
> +	m->avg_metadata_latency = 0;
>   	m->metadata_latency_min = KTIME_MAX;
>   	m->metadata_latency_max = 0;
>   	m->total_metadatas = 0;
> @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
>   		max = new;			\
>   }
>   
> -static inline void __update_stdev(ktime_t total, ktime_t lsum,
> -				  ktime_t *sq_sump, ktime_t lat)
> +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
> +				    ktime_t *lavg, ktime_t *min, ktime_t *max,
> +				    ktime_t *lstdev, ktime_t lat)
>   {
> -	ktime_t avg, sq;
> +	ktime_t total, avg, stdev;
>   
> -	if (unlikely(total == 1))
> -		return;
> +	total = ++(*ctotal);
> +	*lsum += lat;
> +
> +	METRIC_UPDATE_MIN_MAX(*min, *max, lat);
>   
> -	/* the sq is (lat - old_avg) * (lat - new_avg) */
> -	avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
> -	sq = lat - avg;
> -	avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
> -	sq = sq * (lat - avg);
> -	*sq_sump += sq;
> +	if (unlikely(total == 1)) {
> +		*lavg = lat;
> +		*lstdev = 0;
> +	} else {
> +		avg = *lavg + div64_s64(lat - *lavg, total);
> +		stdev = *lstdev + (lat - *lavg)*(lat - avg);
> +		*lstdev = int_sqrt(div64_u64(stdev, total - 1));
> +		*lavg = avg;
> +	}
>   }
>   
>   void ceph_update_read_metrics(struct ceph_client_metric *m,
> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
>   			      unsigned int size, int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
>   		return;
>   
>   	spin_lock(&m->read_metric_lock);
> -	total = ++m->total_reads;
>   	m->read_size_sum += size;
> -	m->read_latency_sum += lat;
>   	METRIC_UPDATE_MIN_MAX(m->read_size_min,
>   			      m->read_size_max,
>   			      size);
> -	METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> -			      m->read_latency_max,
> -			      lat);
> -	__update_stdev(total, m->read_latency_sum,
> -		       &m->read_latency_sq_sum, lat);
> +	__update_latency(&m->total_reads, &m->read_latency_sum,
> +			 &m->avg_read_latency, &m->read_latency_min,
> +			 &m->read_latency_max, &m->read_latency_stdev, lat);
>   	spin_unlock(&m->read_metric_lock);
>   }
>   
> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
>   			       unsigned int size, int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc && rc != -ETIMEDOUT))
>   		return;
>   
>   	spin_lock(&m->write_metric_lock);
> -	total = ++m->total_writes;
>   	m->write_size_sum += size;
> -	m->write_latency_sum += lat;
>   	METRIC_UPDATE_MIN_MAX(m->write_size_min,
>   			      m->write_size_max,
>   			      size);
> -	METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> -			      m->write_latency_max,
> -			      lat);
> -	__update_stdev(total, m->write_latency_sum,
> -		       &m->write_latency_sq_sum, lat);
> +	__update_latency(&m->total_writes, &m->write_latency_sum,
> +			 &m->avg_write_latency, &m->write_latency_min,
> +			 &m->write_latency_max, &m->write_latency_stdev, lat);
>   	spin_unlock(&m->write_metric_lock);
>   }
>   
> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
>   				  int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc && rc != -ENOENT))
>   		return;
>   
>   	spin_lock(&m->metadata_metric_lock);
> -	total = ++m->total_metadatas;
> -	m->metadata_latency_sum += lat;
> -	METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> -			      m->metadata_latency_max,
> -			      lat);
> -	__update_stdev(total, m->metadata_latency_sum,
> -		       &m->metadata_latency_sq_sum, lat);
> +	__update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> +			 &m->avg_metadata_latency, &m->metadata_latency_min,
> +			 &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
>   	spin_unlock(&m->metadata_metric_lock);
>   }
> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> index 103ed736f9d2..a5da21b8f8ed 100644
> --- a/fs/ceph/metric.h
> +++ b/fs/ceph/metric.h
> @@ -138,7 +138,8 @@ struct ceph_client_metric {
>   	u64 read_size_min;
>   	u64 read_size_max;
>   	ktime_t read_latency_sum;
> -	ktime_t read_latency_sq_sum;
> +	ktime_t avg_read_latency;
> +	ktime_t read_latency_stdev;
>   	ktime_t read_latency_min;
>   	ktime_t read_latency_max;
>   
> @@ -148,14 +149,16 @@ struct ceph_client_metric {
>   	u64 write_size_min;
>   	u64 write_size_max;
>   	ktime_t write_latency_sum;
> -	ktime_t write_latency_sq_sum;
> +	ktime_t avg_write_latency;
> +	ktime_t write_latency_stdev;
>   	ktime_t write_latency_min;
>   	ktime_t write_latency_max;
>   
>   	spinlock_t metadata_metric_lock;
>   	u64 total_metadatas;
>   	ktime_t metadata_latency_sum;
> -	ktime_t metadata_latency_sq_sum;
> +	ktime_t avg_metadata_latency;
> +	ktime_t metadata_latency_stdev;
>   	ktime_t metadata_latency_min;
>   	ktime_t metadata_latency_max;
>
Venky Shankar Sept. 14, 2021, 1:03 p.m. UTC | #2
On Tue, Sep 14, 2021 at 6:23 PM Xiubo Li <xiubli@redhat.com> wrote:
>
>
> On 9/14/21 4:49 PM, Venky Shankar wrote:
> > The math involved in tracking average and standard deviation
> > for r/w/m latencies looks incorrect. Fix that up. Also, change
> > the variable name that tracks standard deviation (*_sq_sum) to
> > *_stdev.
> >
> > Signed-off-by: Venky Shankar <vshankar@redhat.com>
> > ---
> >   fs/ceph/debugfs.c | 14 +++++-----
> >   fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
> >   fs/ceph/metric.h  |  9 ++++--
> >   3 files changed, 45 insertions(+), 48 deletions(-)
> >
> > diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> > index 38b78b45811f..3abfa7ae8220 100644
> > --- a/fs/ceph/debugfs.c
> > +++ b/fs/ceph/debugfs.c
> > @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
> >       struct ceph_mds_client *mdsc = fsc->mdsc;
> >       struct ceph_client_metric *m = &mdsc->metric;
> >       int nr_caps = 0;
> > -     s64 total, sum, avg, min, max, sq;
> > +     s64 total, sum, avg, min, max, stdev;
> >       u64 sum_sz, avg_sz, min_sz, max_sz;
> >
> >       sum = percpu_counter_sum(&m->total_inodes);
> > @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->read_latency_min;
> >       max = m->read_latency_max;
> > -     sq = m->read_latency_sq_sum;
> > +     stdev = m->read_latency_stdev;
> >       spin_unlock(&m->read_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
> >
> >       spin_lock(&m->write_metric_lock);
> >       total = m->total_writes;
> > @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->write_latency_min;
> >       max = m->write_latency_max;
> > -     sq = m->write_latency_sq_sum;
> > +     stdev = m->write_latency_stdev;
> >       spin_unlock(&m->write_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
>
> Hi Venky,
>
> Sorry I missed you V1 patch set.
>
> Previously the "sq_sum" just counting the square sum and only when
> showing them in the debug file will it to compute the stdev in
> CEPH_LAT_METRIC_SHOW().
>
> So with this patch I think you also need to fix the
> CEPH_LAT_METRIC_SHOW(), no need to compute it twice ?

OK, yeh. I didn't look at that when winding this series over the testing branch.

I'll remove that and resend.

>
> Thanks.
>
> >
> >       spin_lock(&m->metadata_metric_lock);
> >       total = m->total_metadatas;
> > @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->metadata_latency_min;
> >       max = m->metadata_latency_max;
> > -     sq = m->metadata_latency_sq_sum;
> > +     stdev = m->metadata_latency_stdev;
> >       spin_unlock(&m->metadata_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
> >
> >       seq_printf(s, "\n");
> >       seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
> > diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> > index 226dc38e2909..6b774b1a88ce 100644
> > --- a/fs/ceph/metric.c
> > +++ b/fs/ceph/metric.c
> > @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >               goto err_i_caps_mis;
> >
> >       spin_lock_init(&m->read_metric_lock);
> > -     m->read_latency_sq_sum = 0;
> > +     m->read_latency_stdev = 0;
> > +     m->avg_read_latency = 0;
> >       m->read_latency_min = KTIME_MAX;
> >       m->read_latency_max = 0;
> >       m->total_reads = 0;
> > @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >       m->read_size_sum = 0;
> >
> >       spin_lock_init(&m->write_metric_lock);
> > -     m->write_latency_sq_sum = 0;
> > +     m->write_latency_stdev = 0;
> > +     m->avg_write_latency = 0;
> >       m->write_latency_min = KTIME_MAX;
> >       m->write_latency_max = 0;
> >       m->total_writes = 0;
> > @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >       m->write_size_sum = 0;
> >
> >       spin_lock_init(&m->metadata_metric_lock);
> > -     m->metadata_latency_sq_sum = 0;
> > +     m->metadata_latency_stdev = 0;
> > +     m->avg_metadata_latency = 0;
> >       m->metadata_latency_min = KTIME_MAX;
> >       m->metadata_latency_max = 0;
> >       m->total_metadatas = 0;
> > @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
> >               max = new;                      \
> >   }
> >
> > -static inline void __update_stdev(ktime_t total, ktime_t lsum,
> > -                               ktime_t *sq_sump, ktime_t lat)
> > +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
> > +                                 ktime_t *lavg, ktime_t *min, ktime_t *max,
> > +                                 ktime_t *lstdev, ktime_t lat)
> >   {
> > -     ktime_t avg, sq;
> > +     ktime_t total, avg, stdev;
> >
> > -     if (unlikely(total == 1))
> > -             return;
> > +     total = ++(*ctotal);
> > +     *lsum += lat;
> > +
> > +     METRIC_UPDATE_MIN_MAX(*min, *max, lat);
> >
> > -     /* the sq is (lat - old_avg) * (lat - new_avg) */
> > -     avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
> > -     sq = lat - avg;
> > -     avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
> > -     sq = sq * (lat - avg);
> > -     *sq_sump += sq;
> > +     if (unlikely(total == 1)) {
> > +             *lavg = lat;
> > +             *lstdev = 0;
> > +     } else {
> > +             avg = *lavg + div64_s64(lat - *lavg, total);
> > +             stdev = *lstdev + (lat - *lavg)*(lat - avg);
> > +             *lstdev = int_sqrt(div64_u64(stdev, total - 1));
> > +             *lavg = avg;
> > +     }
> >   }
> >
> >   void ceph_update_read_metrics(struct ceph_client_metric *m,
> > @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
> >                             unsigned int size, int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
> >               return;
> >
> >       spin_lock(&m->read_metric_lock);
> > -     total = ++m->total_reads;
> >       m->read_size_sum += size;
> > -     m->read_latency_sum += lat;
> >       METRIC_UPDATE_MIN_MAX(m->read_size_min,
> >                             m->read_size_max,
> >                             size);
> > -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> > -                           m->read_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->read_latency_sum,
> > -                    &m->read_latency_sq_sum, lat);
> > +     __update_latency(&m->total_reads, &m->read_latency_sum,
> > +                      &m->avg_read_latency, &m->read_latency_min,
> > +                      &m->read_latency_max, &m->read_latency_stdev, lat);
> >       spin_unlock(&m->read_metric_lock);
> >   }
> >
> > @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
> >                              unsigned int size, int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc && rc != -ETIMEDOUT))
> >               return;
> >
> >       spin_lock(&m->write_metric_lock);
> > -     total = ++m->total_writes;
> >       m->write_size_sum += size;
> > -     m->write_latency_sum += lat;
> >       METRIC_UPDATE_MIN_MAX(m->write_size_min,
> >                             m->write_size_max,
> >                             size);
> > -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> > -                           m->write_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->write_latency_sum,
> > -                    &m->write_latency_sq_sum, lat);
> > +     __update_latency(&m->total_writes, &m->write_latency_sum,
> > +                      &m->avg_write_latency, &m->write_latency_min,
> > +                      &m->write_latency_max, &m->write_latency_stdev, lat);
> >       spin_unlock(&m->write_metric_lock);
> >   }
> >
> > @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
> >                                 int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc && rc != -ENOENT))
> >               return;
> >
> >       spin_lock(&m->metadata_metric_lock);
> > -     total = ++m->total_metadatas;
> > -     m->metadata_latency_sum += lat;
> > -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> > -                           m->metadata_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->metadata_latency_sum,
> > -                    &m->metadata_latency_sq_sum, lat);
> > +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> > +                      &m->avg_metadata_latency, &m->metadata_latency_min,
> > +                      &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
> >       spin_unlock(&m->metadata_metric_lock);
> >   }
> > diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> > index 103ed736f9d2..a5da21b8f8ed 100644
> > --- a/fs/ceph/metric.h
> > +++ b/fs/ceph/metric.h
> > @@ -138,7 +138,8 @@ struct ceph_client_metric {
> >       u64 read_size_min;
> >       u64 read_size_max;
> >       ktime_t read_latency_sum;
> > -     ktime_t read_latency_sq_sum;
> > +     ktime_t avg_read_latency;
> > +     ktime_t read_latency_stdev;
> >       ktime_t read_latency_min;
> >       ktime_t read_latency_max;
> >
> > @@ -148,14 +149,16 @@ struct ceph_client_metric {
> >       u64 write_size_min;
> >       u64 write_size_max;
> >       ktime_t write_latency_sum;
> > -     ktime_t write_latency_sq_sum;
> > +     ktime_t avg_write_latency;
> > +     ktime_t write_latency_stdev;
> >       ktime_t write_latency_min;
> >       ktime_t write_latency_max;
> >
> >       spinlock_t metadata_metric_lock;
> >       u64 total_metadatas;
> >       ktime_t metadata_latency_sum;
> > -     ktime_t metadata_latency_sq_sum;
> > +     ktime_t avg_metadata_latency;
> > +     ktime_t metadata_latency_stdev;
> >       ktime_t metadata_latency_min;
> >       ktime_t metadata_latency_max;
> >
>
Xiubo Li Sept. 14, 2021, 1:09 p.m. UTC | #3
On 9/14/21 4:49 PM, Venky Shankar wrote:
> The math involved in tracking average and standard deviation
> for r/w/m latencies looks incorrect. Fix that up. Also, change
> the variable name that tracks standard deviation (*_sq_sum) to
> *_stdev.
>
> Signed-off-by: Venky Shankar <vshankar@redhat.com>
> ---
>   fs/ceph/debugfs.c | 14 +++++-----
>   fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
>   fs/ceph/metric.h  |  9 ++++--
>   3 files changed, 45 insertions(+), 48 deletions(-)
>
> diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> index 38b78b45811f..3abfa7ae8220 100644
> --- a/fs/ceph/debugfs.c
> +++ b/fs/ceph/debugfs.c
> @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
>   	struct ceph_mds_client *mdsc = fsc->mdsc;
>   	struct ceph_client_metric *m = &mdsc->metric;
>   	int nr_caps = 0;
> -	s64 total, sum, avg, min, max, sq;
> +	s64 total, sum, avg, min, max, stdev;
>   	u64 sum_sz, avg_sz, min_sz, max_sz;
>   
>   	sum = percpu_counter_sum(&m->total_inodes);
> @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->read_latency_min;
>   	max = m->read_latency_max;
> -	sq = m->read_latency_sq_sum;
> +	stdev = m->read_latency_stdev;
>   	spin_unlock(&m->read_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
>   
>   	spin_lock(&m->write_metric_lock);
>   	total = m->total_writes;
> @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->write_latency_min;
>   	max = m->write_latency_max;
> -	sq = m->write_latency_sq_sum;
> +	stdev = m->write_latency_stdev;
>   	spin_unlock(&m->write_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
>   
>   	spin_lock(&m->metadata_metric_lock);
>   	total = m->total_metadatas;
> @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->metadata_latency_min;
>   	max = m->metadata_latency_max;
> -	sq = m->metadata_latency_sq_sum;
> +	stdev = m->metadata_latency_stdev;
>   	spin_unlock(&m->metadata_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
>   
>   	seq_printf(s, "\n");
>   	seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
> diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> index 226dc38e2909..6b774b1a88ce 100644
> --- a/fs/ceph/metric.c
> +++ b/fs/ceph/metric.c
> @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   		goto err_i_caps_mis;
>   
>   	spin_lock_init(&m->read_metric_lock);
> -	m->read_latency_sq_sum = 0;
> +	m->read_latency_stdev = 0;
> +	m->avg_read_latency = 0;
>   	m->read_latency_min = KTIME_MAX;
>   	m->read_latency_max = 0;
>   	m->total_reads = 0;
> @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   	m->read_size_sum = 0;
>   
>   	spin_lock_init(&m->write_metric_lock);
> -	m->write_latency_sq_sum = 0;
> +	m->write_latency_stdev = 0;
> +	m->avg_write_latency = 0;
>   	m->write_latency_min = KTIME_MAX;
>   	m->write_latency_max = 0;
>   	m->total_writes = 0;
> @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   	m->write_size_sum = 0;
>   
>   	spin_lock_init(&m->metadata_metric_lock);
> -	m->metadata_latency_sq_sum = 0;
> +	m->metadata_latency_stdev = 0;
> +	m->avg_metadata_latency = 0;
>   	m->metadata_latency_min = KTIME_MAX;
>   	m->metadata_latency_max = 0;
>   	m->total_metadatas = 0;
> @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
>   		max = new;			\
>   }
>   
> -static inline void __update_stdev(ktime_t total, ktime_t lsum,
> -				  ktime_t *sq_sump, ktime_t lat)
> +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
> +				    ktime_t *lavg, ktime_t *min, ktime_t *max,
> +				    ktime_t *lstdev, ktime_t lat)
>   {
> -	ktime_t avg, sq;
> +	ktime_t total, avg, stdev;
>   
> -	if (unlikely(total == 1))
> -		return;
> +	total = ++(*ctotal);
> +	*lsum += lat;
> +
> +	METRIC_UPDATE_MIN_MAX(*min, *max, lat);
>   
> -	/* the sq is (lat - old_avg) * (lat - new_avg) */
> -	avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
> -	sq = lat - avg;
> -	avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
> -	sq = sq * (lat - avg);
> -	*sq_sump += sq;
> +	if (unlikely(total == 1)) {
> +		*lavg = lat;
> +		*lstdev = 0;
> +	} else {
> +		avg = *lavg + div64_s64(lat - *lavg, total);
> +		stdev = *lstdev + (lat - *lavg)*(lat - avg);
> +		*lstdev = int_sqrt(div64_u64(stdev, total - 1));
> +		*lavg = avg;
> +	}

IMO, this is incorrect, the math formula please see:

https://www.investopedia.com/ask/answers/042415/what-difference-between-standard-error-means-and-standard-deviation.asp

The most accurate result should be:

stdev = int_sqrt(sum((X(n) - avg)^2, (X(n-1) - avg)^2, ..., (X(1) - 
avg)^2) / (n - 1)).

While you are computing it:

stdev_n = int_sqrt(stdev_(n-1) + (X(n-1) - avg)^2)

Though current stdev computing method is not exactly the same the math 
formula does, but it's closer to it, because the kernel couldn't record 
all the latency value and do it whenever needed, which will occupy a 
large amount of memories and cpu resources.


>   }
>   
>   void ceph_update_read_metrics(struct ceph_client_metric *m,
> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
>   			      unsigned int size, int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
>   		return;
>   
>   	spin_lock(&m->read_metric_lock);
> -	total = ++m->total_reads;
>   	m->read_size_sum += size;
> -	m->read_latency_sum += lat;
>   	METRIC_UPDATE_MIN_MAX(m->read_size_min,
>   			      m->read_size_max,
>   			      size);
> -	METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> -			      m->read_latency_max,
> -			      lat);
> -	__update_stdev(total, m->read_latency_sum,
> -		       &m->read_latency_sq_sum, lat);
> +	__update_latency(&m->total_reads, &m->read_latency_sum,
> +			 &m->avg_read_latency, &m->read_latency_min,
> +			 &m->read_latency_max, &m->read_latency_stdev, lat);
>   	spin_unlock(&m->read_metric_lock);
>   }
>   
> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
>   			       unsigned int size, int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc && rc != -ETIMEDOUT))
>   		return;
>   
>   	spin_lock(&m->write_metric_lock);
> -	total = ++m->total_writes;
>   	m->write_size_sum += size;
> -	m->write_latency_sum += lat;
>   	METRIC_UPDATE_MIN_MAX(m->write_size_min,
>   			      m->write_size_max,
>   			      size);
> -	METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> -			      m->write_latency_max,
> -			      lat);
> -	__update_stdev(total, m->write_latency_sum,
> -		       &m->write_latency_sq_sum, lat);
> +	__update_latency(&m->total_writes, &m->write_latency_sum,
> +			 &m->avg_write_latency, &m->write_latency_min,
> +			 &m->write_latency_max, &m->write_latency_stdev, lat);
>   	spin_unlock(&m->write_metric_lock);
>   }
>   
> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
>   				  int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc && rc != -ENOENT))
>   		return;
>   
>   	spin_lock(&m->metadata_metric_lock);
> -	total = ++m->total_metadatas;
> -	m->metadata_latency_sum += lat;
> -	METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> -			      m->metadata_latency_max,
> -			      lat);
> -	__update_stdev(total, m->metadata_latency_sum,
> -		       &m->metadata_latency_sq_sum, lat);
> +	__update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> +			 &m->avg_metadata_latency, &m->metadata_latency_min,
> +			 &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
>   	spin_unlock(&m->metadata_metric_lock);
>   }
> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> index 103ed736f9d2..a5da21b8f8ed 100644
> --- a/fs/ceph/metric.h
> +++ b/fs/ceph/metric.h
> @@ -138,7 +138,8 @@ struct ceph_client_metric {
>   	u64 read_size_min;
>   	u64 read_size_max;
>   	ktime_t read_latency_sum;
> -	ktime_t read_latency_sq_sum;
> +	ktime_t avg_read_latency;
> +	ktime_t read_latency_stdev;
>   	ktime_t read_latency_min;
>   	ktime_t read_latency_max;
>   
> @@ -148,14 +149,16 @@ struct ceph_client_metric {
>   	u64 write_size_min;
>   	u64 write_size_max;
>   	ktime_t write_latency_sum;
> -	ktime_t write_latency_sq_sum;
> +	ktime_t avg_write_latency;
> +	ktime_t write_latency_stdev;
>   	ktime_t write_latency_min;
>   	ktime_t write_latency_max;
>   
>   	spinlock_t metadata_metric_lock;
>   	u64 total_metadatas;
>   	ktime_t metadata_latency_sum;
> -	ktime_t metadata_latency_sq_sum;
> +	ktime_t avg_metadata_latency;
> +	ktime_t metadata_latency_stdev;
>   	ktime_t metadata_latency_min;
>   	ktime_t metadata_latency_max;
>
Xiubo Li Sept. 14, 2021, 1:13 p.m. UTC | #4
On 9/14/21 4:49 PM, Venky Shankar wrote:
> The math involved in tracking average and standard deviation
> for r/w/m latencies looks incorrect. Fix that up. Also, change
> the variable name that tracks standard deviation (*_sq_sum) to
> *_stdev.
>
> Signed-off-by: Venky Shankar <vshankar@redhat.com>
> ---
>   fs/ceph/debugfs.c | 14 +++++-----
>   fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
>   fs/ceph/metric.h  |  9 ++++--
>   3 files changed, 45 insertions(+), 48 deletions(-)
>
> diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> index 38b78b45811f..3abfa7ae8220 100644
> --- a/fs/ceph/debugfs.c
> +++ b/fs/ceph/debugfs.c
> @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
>   	struct ceph_mds_client *mdsc = fsc->mdsc;
>   	struct ceph_client_metric *m = &mdsc->metric;
>   	int nr_caps = 0;
> -	s64 total, sum, avg, min, max, sq;
> +	s64 total, sum, avg, min, max, stdev;
>   	u64 sum_sz, avg_sz, min_sz, max_sz;
>   
>   	sum = percpu_counter_sum(&m->total_inodes);
> @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->read_latency_min;
>   	max = m->read_latency_max;
> -	sq = m->read_latency_sq_sum;
> +	stdev = m->read_latency_stdev;
>   	spin_unlock(&m->read_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
>   
>   	spin_lock(&m->write_metric_lock);
>   	total = m->total_writes;
> @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->write_latency_min;
>   	max = m->write_latency_max;
> -	sq = m->write_latency_sq_sum;
> +	stdev = m->write_latency_stdev;
>   	spin_unlock(&m->write_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
>   
>   	spin_lock(&m->metadata_metric_lock);
>   	total = m->total_metadatas;
> @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
>   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>   	min = m->metadata_latency_min;
>   	max = m->metadata_latency_max;
> -	sq = m->metadata_latency_sq_sum;
> +	stdev = m->metadata_latency_stdev;
>   	spin_unlock(&m->metadata_metric_lock);
> -	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
> +	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
>   
>   	seq_printf(s, "\n");
>   	seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
> diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> index 226dc38e2909..6b774b1a88ce 100644
> --- a/fs/ceph/metric.c
> +++ b/fs/ceph/metric.c
> @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   		goto err_i_caps_mis;
>   
>   	spin_lock_init(&m->read_metric_lock);
> -	m->read_latency_sq_sum = 0;
> +	m->read_latency_stdev = 0;
> +	m->avg_read_latency = 0;
>   	m->read_latency_min = KTIME_MAX;
>   	m->read_latency_max = 0;
>   	m->total_reads = 0;
> @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   	m->read_size_sum = 0;
>   
>   	spin_lock_init(&m->write_metric_lock);
> -	m->write_latency_sq_sum = 0;
> +	m->write_latency_stdev = 0;
> +	m->avg_write_latency = 0;
>   	m->write_latency_min = KTIME_MAX;
>   	m->write_latency_max = 0;
>   	m->total_writes = 0;
> @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>   	m->write_size_sum = 0;
>   
>   	spin_lock_init(&m->metadata_metric_lock);
> -	m->metadata_latency_sq_sum = 0;
> +	m->metadata_latency_stdev = 0;
> +	m->avg_metadata_latency = 0;
>   	m->metadata_latency_min = KTIME_MAX;
>   	m->metadata_latency_max = 0;
>   	m->total_metadatas = 0;
> @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
>   		max = new;			\
>   }
>   
> -static inline void __update_stdev(ktime_t total, ktime_t lsum,
> -				  ktime_t *sq_sump, ktime_t lat)
> +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
> +				    ktime_t *lavg, ktime_t *min, ktime_t *max,
> +				    ktime_t *lstdev, ktime_t lat)
>   {
> -	ktime_t avg, sq;
> +	ktime_t total, avg, stdev;
>   
> -	if (unlikely(total == 1))
> -		return;
> +	total = ++(*ctotal);
> +	*lsum += lat;
> +
> +	METRIC_UPDATE_MIN_MAX(*min, *max, lat);
>   
> -	/* the sq is (lat - old_avg) * (lat - new_avg) */
> -	avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
> -	sq = lat - avg;
> -	avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
> -	sq = sq * (lat - avg);
> -	*sq_sump += sq;
> +	if (unlikely(total == 1)) {
> +		*lavg = lat;
> +		*lstdev = 0;
> +	} else {
> +		avg = *lavg + div64_s64(lat - *lavg, total);
> +		stdev = *lstdev + (lat - *lavg)*(lat - avg);
> +		*lstdev = int_sqrt(div64_u64(stdev, total - 1));

In kernel space, won't it a little heavy to run the in_sqrt() every time 
when updating the latency ?

@Jeff, any idea ?


> +		*lavg = avg;
> +	}
>   }
>   
>   void ceph_update_read_metrics(struct ceph_client_metric *m,
> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
>   			      unsigned int size, int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
>   		return;
>   
>   	spin_lock(&m->read_metric_lock);
> -	total = ++m->total_reads;
>   	m->read_size_sum += size;
> -	m->read_latency_sum += lat;
>   	METRIC_UPDATE_MIN_MAX(m->read_size_min,
>   			      m->read_size_max,
>   			      size);
> -	METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> -			      m->read_latency_max,
> -			      lat);
> -	__update_stdev(total, m->read_latency_sum,
> -		       &m->read_latency_sq_sum, lat);
> +	__update_latency(&m->total_reads, &m->read_latency_sum,
> +			 &m->avg_read_latency, &m->read_latency_min,
> +			 &m->read_latency_max, &m->read_latency_stdev, lat);
>   	spin_unlock(&m->read_metric_lock);
>   }
>   
> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
>   			       unsigned int size, int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc && rc != -ETIMEDOUT))
>   		return;
>   
>   	spin_lock(&m->write_metric_lock);
> -	total = ++m->total_writes;
>   	m->write_size_sum += size;
> -	m->write_latency_sum += lat;
>   	METRIC_UPDATE_MIN_MAX(m->write_size_min,
>   			      m->write_size_max,
>   			      size);
> -	METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> -			      m->write_latency_max,
> -			      lat);
> -	__update_stdev(total, m->write_latency_sum,
> -		       &m->write_latency_sq_sum, lat);
> +	__update_latency(&m->total_writes, &m->write_latency_sum,
> +			 &m->avg_write_latency, &m->write_latency_min,
> +			 &m->write_latency_max, &m->write_latency_stdev, lat);
>   	spin_unlock(&m->write_metric_lock);
>   }
>   
> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
>   				  int rc)
>   {
>   	ktime_t lat = ktime_sub(r_end, r_start);
> -	ktime_t total;
>   
>   	if (unlikely(rc && rc != -ENOENT))
>   		return;
>   
>   	spin_lock(&m->metadata_metric_lock);
> -	total = ++m->total_metadatas;
> -	m->metadata_latency_sum += lat;
> -	METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> -			      m->metadata_latency_max,
> -			      lat);
> -	__update_stdev(total, m->metadata_latency_sum,
> -		       &m->metadata_latency_sq_sum, lat);
> +	__update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> +			 &m->avg_metadata_latency, &m->metadata_latency_min,
> +			 &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
>   	spin_unlock(&m->metadata_metric_lock);
>   }
> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> index 103ed736f9d2..a5da21b8f8ed 100644
> --- a/fs/ceph/metric.h
> +++ b/fs/ceph/metric.h
> @@ -138,7 +138,8 @@ struct ceph_client_metric {
>   	u64 read_size_min;
>   	u64 read_size_max;
>   	ktime_t read_latency_sum;
> -	ktime_t read_latency_sq_sum;
> +	ktime_t avg_read_latency;
> +	ktime_t read_latency_stdev;
>   	ktime_t read_latency_min;
>   	ktime_t read_latency_max;
>   
> @@ -148,14 +149,16 @@ struct ceph_client_metric {
>   	u64 write_size_min;
>   	u64 write_size_max;
>   	ktime_t write_latency_sum;
> -	ktime_t write_latency_sq_sum;
> +	ktime_t avg_write_latency;
> +	ktime_t write_latency_stdev;
>   	ktime_t write_latency_min;
>   	ktime_t write_latency_max;
>   
>   	spinlock_t metadata_metric_lock;
>   	u64 total_metadatas;
>   	ktime_t metadata_latency_sum;
> -	ktime_t metadata_latency_sq_sum;
> +	ktime_t avg_metadata_latency;
> +	ktime_t metadata_latency_stdev;
>   	ktime_t metadata_latency_min;
>   	ktime_t metadata_latency_max;
>
Venky Shankar Sept. 14, 2021, 1:30 p.m. UTC | #5
On Tue, Sep 14, 2021 at 6:39 PM Xiubo Li <xiubli@redhat.com> wrote:
>
>
> On 9/14/21 4:49 PM, Venky Shankar wrote:
> > The math involved in tracking average and standard deviation
> > for r/w/m latencies looks incorrect. Fix that up. Also, change
> > the variable name that tracks standard deviation (*_sq_sum) to
> > *_stdev.
> >
> > Signed-off-by: Venky Shankar <vshankar@redhat.com>
> > ---
> >   fs/ceph/debugfs.c | 14 +++++-----
> >   fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
> >   fs/ceph/metric.h  |  9 ++++--
> >   3 files changed, 45 insertions(+), 48 deletions(-)
> >
> > diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> > index 38b78b45811f..3abfa7ae8220 100644
> > --- a/fs/ceph/debugfs.c
> > +++ b/fs/ceph/debugfs.c
> > @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
> >       struct ceph_mds_client *mdsc = fsc->mdsc;
> >       struct ceph_client_metric *m = &mdsc->metric;
> >       int nr_caps = 0;
> > -     s64 total, sum, avg, min, max, sq;
> > +     s64 total, sum, avg, min, max, stdev;
> >       u64 sum_sz, avg_sz, min_sz, max_sz;
> >
> >       sum = percpu_counter_sum(&m->total_inodes);
> > @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->read_latency_min;
> >       max = m->read_latency_max;
> > -     sq = m->read_latency_sq_sum;
> > +     stdev = m->read_latency_stdev;
> >       spin_unlock(&m->read_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
> >
> >       spin_lock(&m->write_metric_lock);
> >       total = m->total_writes;
> > @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->write_latency_min;
> >       max = m->write_latency_max;
> > -     sq = m->write_latency_sq_sum;
> > +     stdev = m->write_latency_stdev;
> >       spin_unlock(&m->write_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
> >
> >       spin_lock(&m->metadata_metric_lock);
> >       total = m->total_metadatas;
> > @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->metadata_latency_min;
> >       max = m->metadata_latency_max;
> > -     sq = m->metadata_latency_sq_sum;
> > +     stdev = m->metadata_latency_stdev;
> >       spin_unlock(&m->metadata_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
> >
> >       seq_printf(s, "\n");
> >       seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
> > diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> > index 226dc38e2909..6b774b1a88ce 100644
> > --- a/fs/ceph/metric.c
> > +++ b/fs/ceph/metric.c
> > @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >               goto err_i_caps_mis;
> >
> >       spin_lock_init(&m->read_metric_lock);
> > -     m->read_latency_sq_sum = 0;
> > +     m->read_latency_stdev = 0;
> > +     m->avg_read_latency = 0;
> >       m->read_latency_min = KTIME_MAX;
> >       m->read_latency_max = 0;
> >       m->total_reads = 0;
> > @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >       m->read_size_sum = 0;
> >
> >       spin_lock_init(&m->write_metric_lock);
> > -     m->write_latency_sq_sum = 0;
> > +     m->write_latency_stdev = 0;
> > +     m->avg_write_latency = 0;
> >       m->write_latency_min = KTIME_MAX;
> >       m->write_latency_max = 0;
> >       m->total_writes = 0;
> > @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >       m->write_size_sum = 0;
> >
> >       spin_lock_init(&m->metadata_metric_lock);
> > -     m->metadata_latency_sq_sum = 0;
> > +     m->metadata_latency_stdev = 0;
> > +     m->avg_metadata_latency = 0;
> >       m->metadata_latency_min = KTIME_MAX;
> >       m->metadata_latency_max = 0;
> >       m->total_metadatas = 0;
> > @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
> >               max = new;                      \
> >   }
> >
> > -static inline void __update_stdev(ktime_t total, ktime_t lsum,
> > -                               ktime_t *sq_sump, ktime_t lat)
> > +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
> > +                                 ktime_t *lavg, ktime_t *min, ktime_t *max,
> > +                                 ktime_t *lstdev, ktime_t lat)
> >   {
> > -     ktime_t avg, sq;
> > +     ktime_t total, avg, stdev;
> >
> > -     if (unlikely(total == 1))
> > -             return;
> > +     total = ++(*ctotal);
> > +     *lsum += lat;
> > +
> > +     METRIC_UPDATE_MIN_MAX(*min, *max, lat);
> >
> > -     /* the sq is (lat - old_avg) * (lat - new_avg) */
> > -     avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
> > -     sq = lat - avg;
> > -     avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
> > -     sq = sq * (lat - avg);
> > -     *sq_sump += sq;
> > +     if (unlikely(total == 1)) {
> > +             *lavg = lat;
> > +             *lstdev = 0;
> > +     } else {
> > +             avg = *lavg + div64_s64(lat - *lavg, total);
> > +             stdev = *lstdev + (lat - *lavg)*(lat - avg);
> > +             *lstdev = int_sqrt(div64_u64(stdev, total - 1));
> > +             *lavg = avg;
> > +     }
>
> IMO, this is incorrect, the math formula please see:
>
> https://www.investopedia.com/ask/answers/042415/what-difference-between-standard-error-means-and-standard-deviation.asp
>
> The most accurate result should be:
>
> stdev = int_sqrt(sum((X(n) - avg)^2, (X(n-1) - avg)^2, ..., (X(1) -
> avg)^2) / (n - 1)).
>
> While you are computing it:
>
> stdev_n = int_sqrt(stdev_(n-1) + (X(n-1) - avg)^2)

Hmm. The int_sqrt() is probably not needed at this point and can be
done when sending the metric. That would avoid some cycles.

Also, the way avg is calculated not totally incorrect, however, I
would like to keep it similar to how its done is libcephfs.

>
> Though current stdev computing method is not exactly the same the math
> formula does, but it's closer to it, because the kernel couldn't record
> all the latency value and do it whenever needed, which will occupy a
> large amount of memories and cpu resources.

The approach is to calculate the running variance, I.e., compute the
variance as  data (latency) arrive one at a time.

>
>
> >   }
> >
> >   void ceph_update_read_metrics(struct ceph_client_metric *m,
> > @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
> >                             unsigned int size, int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
> >               return;
> >
> >       spin_lock(&m->read_metric_lock);
> > -     total = ++m->total_reads;
> >       m->read_size_sum += size;
> > -     m->read_latency_sum += lat;
> >       METRIC_UPDATE_MIN_MAX(m->read_size_min,
> >                             m->read_size_max,
> >                             size);
> > -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> > -                           m->read_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->read_latency_sum,
> > -                    &m->read_latency_sq_sum, lat);
> > +     __update_latency(&m->total_reads, &m->read_latency_sum,
> > +                      &m->avg_read_latency, &m->read_latency_min,
> > +                      &m->read_latency_max, &m->read_latency_stdev, lat);
> >       spin_unlock(&m->read_metric_lock);
> >   }
> >
> > @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
> >                              unsigned int size, int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc && rc != -ETIMEDOUT))
> >               return;
> >
> >       spin_lock(&m->write_metric_lock);
> > -     total = ++m->total_writes;
> >       m->write_size_sum += size;
> > -     m->write_latency_sum += lat;
> >       METRIC_UPDATE_MIN_MAX(m->write_size_min,
> >                             m->write_size_max,
> >                             size);
> > -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> > -                           m->write_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->write_latency_sum,
> > -                    &m->write_latency_sq_sum, lat);
> > +     __update_latency(&m->total_writes, &m->write_latency_sum,
> > +                      &m->avg_write_latency, &m->write_latency_min,
> > +                      &m->write_latency_max, &m->write_latency_stdev, lat);
> >       spin_unlock(&m->write_metric_lock);
> >   }
> >
> > @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
> >                                 int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc && rc != -ENOENT))
> >               return;
> >
> >       spin_lock(&m->metadata_metric_lock);
> > -     total = ++m->total_metadatas;
> > -     m->metadata_latency_sum += lat;
> > -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> > -                           m->metadata_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->metadata_latency_sum,
> > -                    &m->metadata_latency_sq_sum, lat);
> > +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> > +                      &m->avg_metadata_latency, &m->metadata_latency_min,
> > +                      &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
> >       spin_unlock(&m->metadata_metric_lock);
> >   }
> > diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> > index 103ed736f9d2..a5da21b8f8ed 100644
> > --- a/fs/ceph/metric.h
> > +++ b/fs/ceph/metric.h
> > @@ -138,7 +138,8 @@ struct ceph_client_metric {
> >       u64 read_size_min;
> >       u64 read_size_max;
> >       ktime_t read_latency_sum;
> > -     ktime_t read_latency_sq_sum;
> > +     ktime_t avg_read_latency;
> > +     ktime_t read_latency_stdev;
> >       ktime_t read_latency_min;
> >       ktime_t read_latency_max;
> >
> > @@ -148,14 +149,16 @@ struct ceph_client_metric {
> >       u64 write_size_min;
> >       u64 write_size_max;
> >       ktime_t write_latency_sum;
> > -     ktime_t write_latency_sq_sum;
> > +     ktime_t avg_write_latency;
> > +     ktime_t write_latency_stdev;
> >       ktime_t write_latency_min;
> >       ktime_t write_latency_max;
> >
> >       spinlock_t metadata_metric_lock;
> >       u64 total_metadatas;
> >       ktime_t metadata_latency_sum;
> > -     ktime_t metadata_latency_sq_sum;
> > +     ktime_t avg_metadata_latency;
> > +     ktime_t metadata_latency_stdev;
> >       ktime_t metadata_latency_min;
> >       ktime_t metadata_latency_max;
> >
>
Jeff Layton Sept. 14, 2021, 1:32 p.m. UTC | #6
On Tue, 2021-09-14 at 21:13 +0800, Xiubo Li wrote:
> On 9/14/21 4:49 PM, Venky Shankar wrote:
> > The math involved in tracking average and standard deviation
> > for r/w/m latencies looks incorrect. Fix that up. Also, change
> > the variable name that tracks standard deviation (*_sq_sum) to
> > *_stdev.
> > 
> > Signed-off-by: Venky Shankar <vshankar@redhat.com>
> > ---
> >   fs/ceph/debugfs.c | 14 +++++-----
> >   fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
> >   fs/ceph/metric.h  |  9 ++++--
> >   3 files changed, 45 insertions(+), 48 deletions(-)
> > 
> > diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> > index 38b78b45811f..3abfa7ae8220 100644
> > --- a/fs/ceph/debugfs.c
> > +++ b/fs/ceph/debugfs.c
> > @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
> >   	struct ceph_mds_client *mdsc = fsc->mdsc;
> >   	struct ceph_client_metric *m = &mdsc->metric;
> >   	int nr_caps = 0;
> > -	s64 total, sum, avg, min, max, sq;
> > +	s64 total, sum, avg, min, max, stdev;
> >   	u64 sum_sz, avg_sz, min_sz, max_sz;
> >   
> >   	sum = percpu_counter_sum(&m->total_inodes);
> > @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
> >   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >   	min = m->read_latency_min;
> >   	max = m->read_latency_max;
> > -	sq = m->read_latency_sq_sum;
> > +	stdev = m->read_latency_stdev;
> >   	spin_unlock(&m->read_metric_lock);
> > -	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
> > +	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
> >   
> >   	spin_lock(&m->write_metric_lock);
> >   	total = m->total_writes;
> > @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
> >   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >   	min = m->write_latency_min;
> >   	max = m->write_latency_max;
> > -	sq = m->write_latency_sq_sum;
> > +	stdev = m->write_latency_stdev;
> >   	spin_unlock(&m->write_metric_lock);
> > -	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
> > +	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
> >   
> >   	spin_lock(&m->metadata_metric_lock);
> >   	total = m->total_metadatas;
> > @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
> >   	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >   	min = m->metadata_latency_min;
> >   	max = m->metadata_latency_max;
> > -	sq = m->metadata_latency_sq_sum;
> > +	stdev = m->metadata_latency_stdev;
> >   	spin_unlock(&m->metadata_metric_lock);
> > -	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
> > +	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
> >   
> >   	seq_printf(s, "\n");
> >   	seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
> > diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> > index 226dc38e2909..6b774b1a88ce 100644
> > --- a/fs/ceph/metric.c
> > +++ b/fs/ceph/metric.c
> > @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >   		goto err_i_caps_mis;
> >   
> >   	spin_lock_init(&m->read_metric_lock);
> > -	m->read_latency_sq_sum = 0;
> > +	m->read_latency_stdev = 0;
> > +	m->avg_read_latency = 0;
> >   	m->read_latency_min = KTIME_MAX;
> >   	m->read_latency_max = 0;
> >   	m->total_reads = 0;
> > @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >   	m->read_size_sum = 0;
> >   
> >   	spin_lock_init(&m->write_metric_lock);
> > -	m->write_latency_sq_sum = 0;
> > +	m->write_latency_stdev = 0;
> > +	m->avg_write_latency = 0;
> >   	m->write_latency_min = KTIME_MAX;
> >   	m->write_latency_max = 0;
> >   	m->total_writes = 0;
> > @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >   	m->write_size_sum = 0;
> >   
> >   	spin_lock_init(&m->metadata_metric_lock);
> > -	m->metadata_latency_sq_sum = 0;
> > +	m->metadata_latency_stdev = 0;
> > +	m->avg_metadata_latency = 0;
> >   	m->metadata_latency_min = KTIME_MAX;
> >   	m->metadata_latency_max = 0;
> >   	m->total_metadatas = 0;
> > @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
> >   		max = new;			\
> >   }
> >   
> > -static inline void __update_stdev(ktime_t total, ktime_t lsum,
> > -				  ktime_t *sq_sump, ktime_t lat)
> > +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
> > +				    ktime_t *lavg, ktime_t *min, ktime_t *max,
> > +				    ktime_t *lstdev, ktime_t lat)
> >   {
> > -	ktime_t avg, sq;
> > +	ktime_t total, avg, stdev;
> >   
> > -	if (unlikely(total == 1))
> > -		return;
> > +	total = ++(*ctotal);
> > +	*lsum += lat;
> > +
> > +	METRIC_UPDATE_MIN_MAX(*min, *max, lat);
> >   
> > -	/* the sq is (lat - old_avg) * (lat - new_avg) */
> > -	avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
> > -	sq = lat - avg;
> > -	avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
> > -	sq = sq * (lat - avg);
> > -	*sq_sump += sq;
> > +	if (unlikely(total == 1)) {
> > +		*lavg = lat;
> > +		*lstdev = 0;
> > +	} else {
> > +		avg = *lavg + div64_s64(lat - *lavg, total);
> > +		stdev = *lstdev + (lat - *lavg)*(lat - avg);
> > +		*lstdev = int_sqrt(div64_u64(stdev, total - 1));
> 
> In kernel space, won't it a little heavy to run the in_sqrt() every time 
> when updating the latency ?
> 
> @Jeff, any idea ?
> 
> 

Yeah, I agree...

int_sqrt() doesn't look _too_ awful -- it's mostly shifts and adds. You
can see the code for it in lib/math/int_sqrt.c. This probably ought to
be using int_sqrt64() too since the argument is a 64-bit value.

Still, keeping the amount of work low for each new update is really
better if you can. It would be best to defer as much computation as
possible to when this info is being queried. In many cases, this info
will never be consulted, so we really want to keep its overhead low.

> > +		*lavg = avg;
> > +	}
> >   }
> >   
> >   void ceph_update_read_metrics(struct ceph_client_metric *m,
> > @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
> >   			      unsigned int size, int rc)
> >   {
> >   	ktime_t lat = ktime_sub(r_end, r_start);
> > -	ktime_t total;
> >   
> >   	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
> >   		return;
> >   
> >   	spin_lock(&m->read_metric_lock);
> > -	total = ++m->total_reads;
> >   	m->read_size_sum += size;
> > -	m->read_latency_sum += lat;
> >   	METRIC_UPDATE_MIN_MAX(m->read_size_min,
> >   			      m->read_size_max,
> >   			      size);
> > -	METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> > -			      m->read_latency_max,
> > -			      lat);
> > -	__update_stdev(total, m->read_latency_sum,
> > -		       &m->read_latency_sq_sum, lat);
> > +	__update_latency(&m->total_reads, &m->read_latency_sum,
> > +			 &m->avg_read_latency, &m->read_latency_min,
> > +			 &m->read_latency_max, &m->read_latency_stdev, lat);
> >   	spin_unlock(&m->read_metric_lock);
> >   }
> >   
> > @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
> >   			       unsigned int size, int rc)
> >   {
> >   	ktime_t lat = ktime_sub(r_end, r_start);
> > -	ktime_t total;
> >   
> >   	if (unlikely(rc && rc != -ETIMEDOUT))
> >   		return;
> >   
> >   	spin_lock(&m->write_metric_lock);
> > -	total = ++m->total_writes;
> >   	m->write_size_sum += size;
> > -	m->write_latency_sum += lat;
> >   	METRIC_UPDATE_MIN_MAX(m->write_size_min,
> >   			      m->write_size_max,
> >   			      size);
> > -	METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> > -			      m->write_latency_max,
> > -			      lat);
> > -	__update_stdev(total, m->write_latency_sum,
> > -		       &m->write_latency_sq_sum, lat);
> > +	__update_latency(&m->total_writes, &m->write_latency_sum,
> > +			 &m->avg_write_latency, &m->write_latency_min,
> > +			 &m->write_latency_max, &m->write_latency_stdev, lat);
> >   	spin_unlock(&m->write_metric_lock);
> >   }
> >   
> > @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
> >   				  int rc)
> >   {
> >   	ktime_t lat = ktime_sub(r_end, r_start);
> > -	ktime_t total;
> >   
> >   	if (unlikely(rc && rc != -ENOENT))
> >   		return;
> >   
> >   	spin_lock(&m->metadata_metric_lock);
> > -	total = ++m->total_metadatas;
> > -	m->metadata_latency_sum += lat;
> > -	METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> > -			      m->metadata_latency_max,
> > -			      lat);
> > -	__update_stdev(total, m->metadata_latency_sum,
> > -		       &m->metadata_latency_sq_sum, lat);
> > +	__update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> > +			 &m->avg_metadata_latency, &m->metadata_latency_min,
> > +			 &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
> >   	spin_unlock(&m->metadata_metric_lock);
> >   }
> > diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> > index 103ed736f9d2..a5da21b8f8ed 100644
> > --- a/fs/ceph/metric.h
> > +++ b/fs/ceph/metric.h
> > @@ -138,7 +138,8 @@ struct ceph_client_metric {
> >   	u64 read_size_min;
> >   	u64 read_size_max;
> >   	ktime_t read_latency_sum;
> > -	ktime_t read_latency_sq_sum;
> > +	ktime_t avg_read_latency;
> > +	ktime_t read_latency_stdev;
> >   	ktime_t read_latency_min;
> >   	ktime_t read_latency_max;
> >   
> > @@ -148,14 +149,16 @@ struct ceph_client_metric {
> >   	u64 write_size_min;
> >   	u64 write_size_max;
> >   	ktime_t write_latency_sum;
> > -	ktime_t write_latency_sq_sum;
> > +	ktime_t avg_write_latency;
> > +	ktime_t write_latency_stdev;
> >   	ktime_t write_latency_min;
> >   	ktime_t write_latency_max;
> >   
> >   	spinlock_t metadata_metric_lock;
> >   	u64 total_metadatas;
> >   	ktime_t metadata_latency_sum;
> > -	ktime_t metadata_latency_sq_sum;
> > +	ktime_t avg_metadata_latency;
> > +	ktime_t metadata_latency_stdev;
> >   	ktime_t metadata_latency_min;
> >   	ktime_t metadata_latency_max;
> >   
>
Venky Shankar Sept. 14, 2021, 1:32 p.m. UTC | #7
On Tue, Sep 14, 2021 at 6:43 PM Xiubo Li <xiubli@redhat.com> wrote:
>
>
> On 9/14/21 4:49 PM, Venky Shankar wrote:
> > The math involved in tracking average and standard deviation
> > for r/w/m latencies looks incorrect. Fix that up. Also, change
> > the variable name that tracks standard deviation (*_sq_sum) to
> > *_stdev.
> >
> > Signed-off-by: Venky Shankar <vshankar@redhat.com>
> > ---
> >   fs/ceph/debugfs.c | 14 +++++-----
> >   fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
> >   fs/ceph/metric.h  |  9 ++++--
> >   3 files changed, 45 insertions(+), 48 deletions(-)
> >
> > diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> > index 38b78b45811f..3abfa7ae8220 100644
> > --- a/fs/ceph/debugfs.c
> > +++ b/fs/ceph/debugfs.c
> > @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
> >       struct ceph_mds_client *mdsc = fsc->mdsc;
> >       struct ceph_client_metric *m = &mdsc->metric;
> >       int nr_caps = 0;
> > -     s64 total, sum, avg, min, max, sq;
> > +     s64 total, sum, avg, min, max, stdev;
> >       u64 sum_sz, avg_sz, min_sz, max_sz;
> >
> >       sum = percpu_counter_sum(&m->total_inodes);
> > @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->read_latency_min;
> >       max = m->read_latency_max;
> > -     sq = m->read_latency_sq_sum;
> > +     stdev = m->read_latency_stdev;
> >       spin_unlock(&m->read_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
> >
> >       spin_lock(&m->write_metric_lock);
> >       total = m->total_writes;
> > @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->write_latency_min;
> >       max = m->write_latency_max;
> > -     sq = m->write_latency_sq_sum;
> > +     stdev = m->write_latency_stdev;
> >       spin_unlock(&m->write_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
> >
> >       spin_lock(&m->metadata_metric_lock);
> >       total = m->total_metadatas;
> > @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
> >       avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >       min = m->metadata_latency_min;
> >       max = m->metadata_latency_max;
> > -     sq = m->metadata_latency_sq_sum;
> > +     stdev = m->metadata_latency_stdev;
> >       spin_unlock(&m->metadata_metric_lock);
> > -     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
> > +     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
> >
> >       seq_printf(s, "\n");
> >       seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
> > diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> > index 226dc38e2909..6b774b1a88ce 100644
> > --- a/fs/ceph/metric.c
> > +++ b/fs/ceph/metric.c
> > @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >               goto err_i_caps_mis;
> >
> >       spin_lock_init(&m->read_metric_lock);
> > -     m->read_latency_sq_sum = 0;
> > +     m->read_latency_stdev = 0;
> > +     m->avg_read_latency = 0;
> >       m->read_latency_min = KTIME_MAX;
> >       m->read_latency_max = 0;
> >       m->total_reads = 0;
> > @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >       m->read_size_sum = 0;
> >
> >       spin_lock_init(&m->write_metric_lock);
> > -     m->write_latency_sq_sum = 0;
> > +     m->write_latency_stdev = 0;
> > +     m->avg_write_latency = 0;
> >       m->write_latency_min = KTIME_MAX;
> >       m->write_latency_max = 0;
> >       m->total_writes = 0;
> > @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >       m->write_size_sum = 0;
> >
> >       spin_lock_init(&m->metadata_metric_lock);
> > -     m->metadata_latency_sq_sum = 0;
> > +     m->metadata_latency_stdev = 0;
> > +     m->avg_metadata_latency = 0;
> >       m->metadata_latency_min = KTIME_MAX;
> >       m->metadata_latency_max = 0;
> >       m->total_metadatas = 0;
> > @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
> >               max = new;                      \
> >   }
> >
> > -static inline void __update_stdev(ktime_t total, ktime_t lsum,
> > -                               ktime_t *sq_sump, ktime_t lat)
> > +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
> > +                                 ktime_t *lavg, ktime_t *min, ktime_t *max,
> > +                                 ktime_t *lstdev, ktime_t lat)
> >   {
> > -     ktime_t avg, sq;
> > +     ktime_t total, avg, stdev;
> >
> > -     if (unlikely(total == 1))
> > -             return;
> > +     total = ++(*ctotal);
> > +     *lsum += lat;
> > +
> > +     METRIC_UPDATE_MIN_MAX(*min, *max, lat);
> >
> > -     /* the sq is (lat - old_avg) * (lat - new_avg) */
> > -     avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
> > -     sq = lat - avg;
> > -     avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
> > -     sq = sq * (lat - avg);
> > -     *sq_sump += sq;
> > +     if (unlikely(total == 1)) {
> > +             *lavg = lat;
> > +             *lstdev = 0;
> > +     } else {
> > +             avg = *lavg + div64_s64(lat - *lavg, total);
> > +             stdev = *lstdev + (lat - *lavg)*(lat - avg);
> > +             *lstdev = int_sqrt(div64_u64(stdev, total - 1));
>
> In kernel space, won't it a little heavy to run the in_sqrt() every time
> when updating the latency ?

It's most likely not needed. We could keep track of the variance
(doesn't require int_sqrt) and calculate stdev when sending metrics.
That would be mathematically correct too as you mentioned.

>
> @Jeff, any idea ?
>
>
> > +             *lavg = avg;
> > +     }
> >   }
> >
> >   void ceph_update_read_metrics(struct ceph_client_metric *m,
> > @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
> >                             unsigned int size, int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
> >               return;
> >
> >       spin_lock(&m->read_metric_lock);
> > -     total = ++m->total_reads;
> >       m->read_size_sum += size;
> > -     m->read_latency_sum += lat;
> >       METRIC_UPDATE_MIN_MAX(m->read_size_min,
> >                             m->read_size_max,
> >                             size);
> > -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> > -                           m->read_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->read_latency_sum,
> > -                    &m->read_latency_sq_sum, lat);
> > +     __update_latency(&m->total_reads, &m->read_latency_sum,
> > +                      &m->avg_read_latency, &m->read_latency_min,
> > +                      &m->read_latency_max, &m->read_latency_stdev, lat);
> >       spin_unlock(&m->read_metric_lock);
> >   }
> >
> > @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
> >                              unsigned int size, int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc && rc != -ETIMEDOUT))
> >               return;
> >
> >       spin_lock(&m->write_metric_lock);
> > -     total = ++m->total_writes;
> >       m->write_size_sum += size;
> > -     m->write_latency_sum += lat;
> >       METRIC_UPDATE_MIN_MAX(m->write_size_min,
> >                             m->write_size_max,
> >                             size);
> > -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> > -                           m->write_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->write_latency_sum,
> > -                    &m->write_latency_sq_sum, lat);
> > +     __update_latency(&m->total_writes, &m->write_latency_sum,
> > +                      &m->avg_write_latency, &m->write_latency_min,
> > +                      &m->write_latency_max, &m->write_latency_stdev, lat);
> >       spin_unlock(&m->write_metric_lock);
> >   }
> >
> > @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
> >                                 int rc)
> >   {
> >       ktime_t lat = ktime_sub(r_end, r_start);
> > -     ktime_t total;
> >
> >       if (unlikely(rc && rc != -ENOENT))
> >               return;
> >
> >       spin_lock(&m->metadata_metric_lock);
> > -     total = ++m->total_metadatas;
> > -     m->metadata_latency_sum += lat;
> > -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> > -                           m->metadata_latency_max,
> > -                           lat);
> > -     __update_stdev(total, m->metadata_latency_sum,
> > -                    &m->metadata_latency_sq_sum, lat);
> > +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> > +                      &m->avg_metadata_latency, &m->metadata_latency_min,
> > +                      &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
> >       spin_unlock(&m->metadata_metric_lock);
> >   }
> > diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> > index 103ed736f9d2..a5da21b8f8ed 100644
> > --- a/fs/ceph/metric.h
> > +++ b/fs/ceph/metric.h
> > @@ -138,7 +138,8 @@ struct ceph_client_metric {
> >       u64 read_size_min;
> >       u64 read_size_max;
> >       ktime_t read_latency_sum;
> > -     ktime_t read_latency_sq_sum;
> > +     ktime_t avg_read_latency;
> > +     ktime_t read_latency_stdev;
> >       ktime_t read_latency_min;
> >       ktime_t read_latency_max;
> >
> > @@ -148,14 +149,16 @@ struct ceph_client_metric {
> >       u64 write_size_min;
> >       u64 write_size_max;
> >       ktime_t write_latency_sum;
> > -     ktime_t write_latency_sq_sum;
> > +     ktime_t avg_write_latency;
> > +     ktime_t write_latency_stdev;
> >       ktime_t write_latency_min;
> >       ktime_t write_latency_max;
> >
> >       spinlock_t metadata_metric_lock;
> >       u64 total_metadatas;
> >       ktime_t metadata_latency_sum;
> > -     ktime_t metadata_latency_sq_sum;
> > +     ktime_t avg_metadata_latency;
> > +     ktime_t metadata_latency_stdev;
> >       ktime_t metadata_latency_min;
> >       ktime_t metadata_latency_max;
> >
>
Xiubo Li Sept. 14, 2021, 1:45 p.m. UTC | #8
On 9/14/21 9:30 PM, Venky Shankar wrote:
> On Tue, Sep 14, 2021 at 6:39 PM Xiubo Li <xiubli@redhat.com> wrote:
>>
>> On 9/14/21 4:49 PM, Venky Shankar wrote:
>>> The math involved in tracking average and standard deviation
>>> for r/w/m latencies looks incorrect. Fix that up. Also, change
>>> the variable name that tracks standard deviation (*_sq_sum) to
>>> *_stdev.
>>>
>>> Signed-off-by: Venky Shankar <vshankar@redhat.com>
>>> ---
>>>    fs/ceph/debugfs.c | 14 +++++-----
>>>    fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
>>>    fs/ceph/metric.h  |  9 ++++--
>>>    3 files changed, 45 insertions(+), 48 deletions(-)
>>>
>>> diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
>>> index 38b78b45811f..3abfa7ae8220 100644
>>> --- a/fs/ceph/debugfs.c
>>> +++ b/fs/ceph/debugfs.c
>>> @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
>>>        struct ceph_mds_client *mdsc = fsc->mdsc;
>>>        struct ceph_client_metric *m = &mdsc->metric;
>>>        int nr_caps = 0;
>>> -     s64 total, sum, avg, min, max, sq;
>>> +     s64 total, sum, avg, min, max, stdev;
>>>        u64 sum_sz, avg_sz, min_sz, max_sz;
>>>
>>>        sum = percpu_counter_sum(&m->total_inodes);
>>> @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
>>>        avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>>>        min = m->read_latency_min;
>>>        max = m->read_latency_max;
>>> -     sq = m->read_latency_sq_sum;
>>> +     stdev = m->read_latency_stdev;
>>>        spin_unlock(&m->read_metric_lock);
>>> -     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
>>> +     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
>>>
>>>        spin_lock(&m->write_metric_lock);
>>>        total = m->total_writes;
>>> @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
>>>        avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>>>        min = m->write_latency_min;
>>>        max = m->write_latency_max;
>>> -     sq = m->write_latency_sq_sum;
>>> +     stdev = m->write_latency_stdev;
>>>        spin_unlock(&m->write_metric_lock);
>>> -     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
>>> +     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
>>>
>>>        spin_lock(&m->metadata_metric_lock);
>>>        total = m->total_metadatas;
>>> @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
>>>        avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>>>        min = m->metadata_latency_min;
>>>        max = m->metadata_latency_max;
>>> -     sq = m->metadata_latency_sq_sum;
>>> +     stdev = m->metadata_latency_stdev;
>>>        spin_unlock(&m->metadata_metric_lock);
>>> -     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
>>> +     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
>>>
>>>        seq_printf(s, "\n");
>>>        seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
>>> diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
>>> index 226dc38e2909..6b774b1a88ce 100644
>>> --- a/fs/ceph/metric.c
>>> +++ b/fs/ceph/metric.c
>>> @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>>>                goto err_i_caps_mis;
>>>
>>>        spin_lock_init(&m->read_metric_lock);
>>> -     m->read_latency_sq_sum = 0;
>>> +     m->read_latency_stdev = 0;
>>> +     m->avg_read_latency = 0;
>>>        m->read_latency_min = KTIME_MAX;
>>>        m->read_latency_max = 0;
>>>        m->total_reads = 0;
>>> @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>>>        m->read_size_sum = 0;
>>>
>>>        spin_lock_init(&m->write_metric_lock);
>>> -     m->write_latency_sq_sum = 0;
>>> +     m->write_latency_stdev = 0;
>>> +     m->avg_write_latency = 0;
>>>        m->write_latency_min = KTIME_MAX;
>>>        m->write_latency_max = 0;
>>>        m->total_writes = 0;
>>> @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>>>        m->write_size_sum = 0;
>>>
>>>        spin_lock_init(&m->metadata_metric_lock);
>>> -     m->metadata_latency_sq_sum = 0;
>>> +     m->metadata_latency_stdev = 0;
>>> +     m->avg_metadata_latency = 0;
>>>        m->metadata_latency_min = KTIME_MAX;
>>>        m->metadata_latency_max = 0;
>>>        m->total_metadatas = 0;
>>> @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
>>>                max = new;                      \
>>>    }
>>>
>>> -static inline void __update_stdev(ktime_t total, ktime_t lsum,
>>> -                               ktime_t *sq_sump, ktime_t lat)
>>> +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
>>> +                                 ktime_t *lavg, ktime_t *min, ktime_t *max,
>>> +                                 ktime_t *lstdev, ktime_t lat)
>>>    {
>>> -     ktime_t avg, sq;
>>> +     ktime_t total, avg, stdev;
>>>
>>> -     if (unlikely(total == 1))
>>> -             return;
>>> +     total = ++(*ctotal);
>>> +     *lsum += lat;
>>> +
>>> +     METRIC_UPDATE_MIN_MAX(*min, *max, lat);
>>>
>>> -     /* the sq is (lat - old_avg) * (lat - new_avg) */
>>> -     avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
>>> -     sq = lat - avg;
>>> -     avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
>>> -     sq = sq * (lat - avg);
>>> -     *sq_sump += sq;
>>> +     if (unlikely(total == 1)) {
>>> +             *lavg = lat;
>>> +             *lstdev = 0;
>>> +     } else {
>>> +             avg = *lavg + div64_s64(lat - *lavg, total);
>>> +             stdev = *lstdev + (lat - *lavg)*(lat - avg);
>>> +             *lstdev = int_sqrt(div64_u64(stdev, total - 1));
>>> +             *lavg = avg;
>>> +     }
>> IMO, this is incorrect, the math formula please see:
>>
>> https://www.investopedia.com/ask/answers/042415/what-difference-between-standard-error-means-and-standard-deviation.asp
>>
>> The most accurate result should be:
>>
>> stdev = int_sqrt(sum((X(n) - avg)^2, (X(n-1) - avg)^2, ..., (X(1) -
>> avg)^2) / (n - 1)).
>>
>> While you are computing it:
>>
>> stdev_n = int_sqrt(stdev_(n-1) + (X(n-1) - avg)^2)
> Hmm. The int_sqrt() is probably not needed at this point and can be
> done when sending the metric. That would avoid some cycles.
>
> Also, the way avg is calculated not totally incorrect, however, I
> would like to keep it similar to how its done is libcephfs.

In user space this is very easy to do, but not in kernel space, 
especially there has no float computing.

Currently the kclient is doing the avg computing by:

avg(n) = (avg(n-1) + latency(n)) / (n), IMO this should be closer to the 
real avg(n) = sum(latency(n), latency(n-1), ..., latency(1)) / n.

Because it's hard to record all the latency values, this is also many 
other user space tools doing to count the avg.


>> Though current stdev computing method is not exactly the same the math
>> formula does, but it's closer to it, because the kernel couldn't record
>> all the latency value and do it whenever needed, which will occupy a
>> large amount of memories and cpu resources.
> The approach is to calculate the running variance, I.e., compute the
> variance as  data (latency) arrive one at a time.
>
>>
>>>    }
>>>
>>>    void ceph_update_read_metrics(struct ceph_client_metric *m,
>>> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
>>>                              unsigned int size, int rc)
>>>    {
>>>        ktime_t lat = ktime_sub(r_end, r_start);
>>> -     ktime_t total;
>>>
>>>        if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
>>>                return;
>>>
>>>        spin_lock(&m->read_metric_lock);
>>> -     total = ++m->total_reads;
>>>        m->read_size_sum += size;
>>> -     m->read_latency_sum += lat;
>>>        METRIC_UPDATE_MIN_MAX(m->read_size_min,
>>>                              m->read_size_max,
>>>                              size);
>>> -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
>>> -                           m->read_latency_max,
>>> -                           lat);
>>> -     __update_stdev(total, m->read_latency_sum,
>>> -                    &m->read_latency_sq_sum, lat);
>>> +     __update_latency(&m->total_reads, &m->read_latency_sum,
>>> +                      &m->avg_read_latency, &m->read_latency_min,
>>> +                      &m->read_latency_max, &m->read_latency_stdev, lat);
>>>        spin_unlock(&m->read_metric_lock);
>>>    }
>>>
>>> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
>>>                               unsigned int size, int rc)
>>>    {
>>>        ktime_t lat = ktime_sub(r_end, r_start);
>>> -     ktime_t total;
>>>
>>>        if (unlikely(rc && rc != -ETIMEDOUT))
>>>                return;
>>>
>>>        spin_lock(&m->write_metric_lock);
>>> -     total = ++m->total_writes;
>>>        m->write_size_sum += size;
>>> -     m->write_latency_sum += lat;
>>>        METRIC_UPDATE_MIN_MAX(m->write_size_min,
>>>                              m->write_size_max,
>>>                              size);
>>> -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
>>> -                           m->write_latency_max,
>>> -                           lat);
>>> -     __update_stdev(total, m->write_latency_sum,
>>> -                    &m->write_latency_sq_sum, lat);
>>> +     __update_latency(&m->total_writes, &m->write_latency_sum,
>>> +                      &m->avg_write_latency, &m->write_latency_min,
>>> +                      &m->write_latency_max, &m->write_latency_stdev, lat);
>>>        spin_unlock(&m->write_metric_lock);
>>>    }
>>>
>>> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
>>>                                  int rc)
>>>    {
>>>        ktime_t lat = ktime_sub(r_end, r_start);
>>> -     ktime_t total;
>>>
>>>        if (unlikely(rc && rc != -ENOENT))
>>>                return;
>>>
>>>        spin_lock(&m->metadata_metric_lock);
>>> -     total = ++m->total_metadatas;
>>> -     m->metadata_latency_sum += lat;
>>> -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
>>> -                           m->metadata_latency_max,
>>> -                           lat);
>>> -     __update_stdev(total, m->metadata_latency_sum,
>>> -                    &m->metadata_latency_sq_sum, lat);
>>> +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
>>> +                      &m->avg_metadata_latency, &m->metadata_latency_min,
>>> +                      &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
>>>        spin_unlock(&m->metadata_metric_lock);
>>>    }
>>> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
>>> index 103ed736f9d2..a5da21b8f8ed 100644
>>> --- a/fs/ceph/metric.h
>>> +++ b/fs/ceph/metric.h
>>> @@ -138,7 +138,8 @@ struct ceph_client_metric {
>>>        u64 read_size_min;
>>>        u64 read_size_max;
>>>        ktime_t read_latency_sum;
>>> -     ktime_t read_latency_sq_sum;
>>> +     ktime_t avg_read_latency;
>>> +     ktime_t read_latency_stdev;
>>>        ktime_t read_latency_min;
>>>        ktime_t read_latency_max;
>>>
>>> @@ -148,14 +149,16 @@ struct ceph_client_metric {
>>>        u64 write_size_min;
>>>        u64 write_size_max;
>>>        ktime_t write_latency_sum;
>>> -     ktime_t write_latency_sq_sum;
>>> +     ktime_t avg_write_latency;
>>> +     ktime_t write_latency_stdev;
>>>        ktime_t write_latency_min;
>>>        ktime_t write_latency_max;
>>>
>>>        spinlock_t metadata_metric_lock;
>>>        u64 total_metadatas;
>>>        ktime_t metadata_latency_sum;
>>> -     ktime_t metadata_latency_sq_sum;
>>> +     ktime_t avg_metadata_latency;
>>> +     ktime_t metadata_latency_stdev;
>>>        ktime_t metadata_latency_min;
>>>        ktime_t metadata_latency_max;
>>>
>
Xiubo Li Sept. 14, 2021, 1:52 p.m. UTC | #9
On 9/14/21 9:45 PM, Xiubo Li wrote:
>
> On 9/14/21 9:30 PM, Venky Shankar wrote:
>> On Tue, Sep 14, 2021 at 6:39 PM Xiubo Li <xiubli@redhat.com> wrote:
>>>
>>> On 9/14/21 4:49 PM, Venky Shankar wrote:
[...]
> In user space this is very easy to do, but not in kernel space, 
> especially there has no float computing.
>
As I remembered this is main reason why I was planing to send the raw 
metrics to MDS and let the MDS do the computing.

So if possible why not just send the raw data to MDS and let the MDS to 
do the stdev computing ?


> Currently the kclient is doing the avg computing by:
>
> avg(n) = (avg(n-1) + latency(n)) / (n), IMO this should be closer to 
> the real avg(n) = sum(latency(n), latency(n-1), ..., latency(1)) / n.
>
> Because it's hard to record all the latency values, this is also many 
> other user space tools doing to count the avg.
>
>
>>> Though current stdev computing method is not exactly the same the math
>>> formula does, but it's closer to it, because the kernel couldn't record
>>> all the latency value and do it whenever needed, which will occupy a
>>> large amount of memories and cpu resources.
>> The approach is to calculate the running variance, I.e., compute the
>> variance as  data (latency) arrive one at a time.
>>
>>>
>>>>    }
>>>>
>>>>    void ceph_update_read_metrics(struct ceph_client_metric *m,
>>>> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct 
>>>> ceph_client_metric *m,
>>>>                              unsigned int size, int rc)
>>>>    {
>>>>        ktime_t lat = ktime_sub(r_end, r_start);
>>>> -     ktime_t total;
>>>>
>>>>        if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
>>>>                return;
>>>>
>>>>        spin_lock(&m->read_metric_lock);
>>>> -     total = ++m->total_reads;
>>>>        m->read_size_sum += size;
>>>> -     m->read_latency_sum += lat;
>>>>        METRIC_UPDATE_MIN_MAX(m->read_size_min,
>>>>                              m->read_size_max,
>>>>                              size);
>>>> -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
>>>> -                           m->read_latency_max,
>>>> -                           lat);
>>>> -     __update_stdev(total, m->read_latency_sum,
>>>> -                    &m->read_latency_sq_sum, lat);
>>>> +     __update_latency(&m->total_reads, &m->read_latency_sum,
>>>> +                      &m->avg_read_latency, &m->read_latency_min,
>>>> +                      &m->read_latency_max, 
>>>> &m->read_latency_stdev, lat);
>>>>        spin_unlock(&m->read_metric_lock);
>>>>    }
>>>>
>>>> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct 
>>>> ceph_client_metric *m,
>>>>                               unsigned int size, int rc)
>>>>    {
>>>>        ktime_t lat = ktime_sub(r_end, r_start);
>>>> -     ktime_t total;
>>>>
>>>>        if (unlikely(rc && rc != -ETIMEDOUT))
>>>>                return;
>>>>
>>>>        spin_lock(&m->write_metric_lock);
>>>> -     total = ++m->total_writes;
>>>>        m->write_size_sum += size;
>>>> -     m->write_latency_sum += lat;
>>>>        METRIC_UPDATE_MIN_MAX(m->write_size_min,
>>>>                              m->write_size_max,
>>>>                              size);
>>>> -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
>>>> -                           m->write_latency_max,
>>>> -                           lat);
>>>> -     __update_stdev(total, m->write_latency_sum,
>>>> -                    &m->write_latency_sq_sum, lat);
>>>> +     __update_latency(&m->total_writes, &m->write_latency_sum,
>>>> +                      &m->avg_write_latency, &m->write_latency_min,
>>>> +                      &m->write_latency_max, 
>>>> &m->write_latency_stdev, lat);
>>>>        spin_unlock(&m->write_metric_lock);
>>>>    }
>>>>
>>>> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct 
>>>> ceph_client_metric *m,
>>>>                                  int rc)
>>>>    {
>>>>        ktime_t lat = ktime_sub(r_end, r_start);
>>>> -     ktime_t total;
>>>>
>>>>        if (unlikely(rc && rc != -ENOENT))
>>>>                return;
>>>>
>>>>        spin_lock(&m->metadata_metric_lock);
>>>> -     total = ++m->total_metadatas;
>>>> -     m->metadata_latency_sum += lat;
>>>> -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
>>>> -                           m->metadata_latency_max,
>>>> -                           lat);
>>>> -     __update_stdev(total, m->metadata_latency_sum,
>>>> -                    &m->metadata_latency_sq_sum, lat);
>>>> +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
>>>> +                      &m->avg_metadata_latency, 
>>>> &m->metadata_latency_min,
>>>> +                      &m->metadata_latency_max, 
>>>> &m->metadata_latency_stdev, lat);
>>>>        spin_unlock(&m->metadata_metric_lock);
>>>>    }
>>>> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
>>>> index 103ed736f9d2..a5da21b8f8ed 100644
>>>> --- a/fs/ceph/metric.h
>>>> +++ b/fs/ceph/metric.h
>>>> @@ -138,7 +138,8 @@ struct ceph_client_metric {
>>>>        u64 read_size_min;
>>>>        u64 read_size_max;
>>>>        ktime_t read_latency_sum;
>>>> -     ktime_t read_latency_sq_sum;
>>>> +     ktime_t avg_read_latency;
>>>> +     ktime_t read_latency_stdev;
>>>>        ktime_t read_latency_min;
>>>>        ktime_t read_latency_max;
>>>>
>>>> @@ -148,14 +149,16 @@ struct ceph_client_metric {
>>>>        u64 write_size_min;
>>>>        u64 write_size_max;
>>>>        ktime_t write_latency_sum;
>>>> -     ktime_t write_latency_sq_sum;
>>>> +     ktime_t avg_write_latency;
>>>> +     ktime_t write_latency_stdev;
>>>>        ktime_t write_latency_min;
>>>>        ktime_t write_latency_max;
>>>>
>>>>        spinlock_t metadata_metric_lock;
>>>>        u64 total_metadatas;
>>>>        ktime_t metadata_latency_sum;
>>>> -     ktime_t metadata_latency_sq_sum;
>>>> +     ktime_t avg_metadata_latency;
>>>> +     ktime_t metadata_latency_stdev;
>>>>        ktime_t metadata_latency_min;
>>>>        ktime_t metadata_latency_max;
>>>>
>>
Venky Shankar Sept. 14, 2021, 1:53 p.m. UTC | #10
On Tue, Sep 14, 2021 at 7:16 PM Xiubo Li <xiubli@redhat.com> wrote:
>
>
> On 9/14/21 9:30 PM, Venky Shankar wrote:
> > On Tue, Sep 14, 2021 at 6:39 PM Xiubo Li <xiubli@redhat.com> wrote:
> >>
> >> On 9/14/21 4:49 PM, Venky Shankar wrote:
> >>> The math involved in tracking average and standard deviation
> >>> for r/w/m latencies looks incorrect. Fix that up. Also, change
> >>> the variable name that tracks standard deviation (*_sq_sum) to
> >>> *_stdev.
> >>>
> >>> Signed-off-by: Venky Shankar <vshankar@redhat.com>
> >>> ---
> >>>    fs/ceph/debugfs.c | 14 +++++-----
> >>>    fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
> >>>    fs/ceph/metric.h  |  9 ++++--
> >>>    3 files changed, 45 insertions(+), 48 deletions(-)
> >>>
> >>> diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
> >>> index 38b78b45811f..3abfa7ae8220 100644
> >>> --- a/fs/ceph/debugfs.c
> >>> +++ b/fs/ceph/debugfs.c
> >>> @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
> >>>        struct ceph_mds_client *mdsc = fsc->mdsc;
> >>>        struct ceph_client_metric *m = &mdsc->metric;
> >>>        int nr_caps = 0;
> >>> -     s64 total, sum, avg, min, max, sq;
> >>> +     s64 total, sum, avg, min, max, stdev;
> >>>        u64 sum_sz, avg_sz, min_sz, max_sz;
> >>>
> >>>        sum = percpu_counter_sum(&m->total_inodes);
> >>> @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
> >>>        avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >>>        min = m->read_latency_min;
> >>>        max = m->read_latency_max;
> >>> -     sq = m->read_latency_sq_sum;
> >>> +     stdev = m->read_latency_stdev;
> >>>        spin_unlock(&m->read_metric_lock);
> >>> -     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
> >>> +     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
> >>>
> >>>        spin_lock(&m->write_metric_lock);
> >>>        total = m->total_writes;
> >>> @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
> >>>        avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >>>        min = m->write_latency_min;
> >>>        max = m->write_latency_max;
> >>> -     sq = m->write_latency_sq_sum;
> >>> +     stdev = m->write_latency_stdev;
> >>>        spin_unlock(&m->write_metric_lock);
> >>> -     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
> >>> +     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
> >>>
> >>>        spin_lock(&m->metadata_metric_lock);
> >>>        total = m->total_metadatas;
> >>> @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
> >>>        avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
> >>>        min = m->metadata_latency_min;
> >>>        max = m->metadata_latency_max;
> >>> -     sq = m->metadata_latency_sq_sum;
> >>> +     stdev = m->metadata_latency_stdev;
> >>>        spin_unlock(&m->metadata_metric_lock);
> >>> -     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
> >>> +     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
> >>>
> >>>        seq_printf(s, "\n");
> >>>        seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
> >>> diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
> >>> index 226dc38e2909..6b774b1a88ce 100644
> >>> --- a/fs/ceph/metric.c
> >>> +++ b/fs/ceph/metric.c
> >>> @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >>>                goto err_i_caps_mis;
> >>>
> >>>        spin_lock_init(&m->read_metric_lock);
> >>> -     m->read_latency_sq_sum = 0;
> >>> +     m->read_latency_stdev = 0;
> >>> +     m->avg_read_latency = 0;
> >>>        m->read_latency_min = KTIME_MAX;
> >>>        m->read_latency_max = 0;
> >>>        m->total_reads = 0;
> >>> @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >>>        m->read_size_sum = 0;
> >>>
> >>>        spin_lock_init(&m->write_metric_lock);
> >>> -     m->write_latency_sq_sum = 0;
> >>> +     m->write_latency_stdev = 0;
> >>> +     m->avg_write_latency = 0;
> >>>        m->write_latency_min = KTIME_MAX;
> >>>        m->write_latency_max = 0;
> >>>        m->total_writes = 0;
> >>> @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
> >>>        m->write_size_sum = 0;
> >>>
> >>>        spin_lock_init(&m->metadata_metric_lock);
> >>> -     m->metadata_latency_sq_sum = 0;
> >>> +     m->metadata_latency_stdev = 0;
> >>> +     m->avg_metadata_latency = 0;
> >>>        m->metadata_latency_min = KTIME_MAX;
> >>>        m->metadata_latency_max = 0;
> >>>        m->total_metadatas = 0;
> >>> @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
> >>>                max = new;                      \
> >>>    }
> >>>
> >>> -static inline void __update_stdev(ktime_t total, ktime_t lsum,
> >>> -                               ktime_t *sq_sump, ktime_t lat)
> >>> +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
> >>> +                                 ktime_t *lavg, ktime_t *min, ktime_t *max,
> >>> +                                 ktime_t *lstdev, ktime_t lat)
> >>>    {
> >>> -     ktime_t avg, sq;
> >>> +     ktime_t total, avg, stdev;
> >>>
> >>> -     if (unlikely(total == 1))
> >>> -             return;
> >>> +     total = ++(*ctotal);
> >>> +     *lsum += lat;
> >>> +
> >>> +     METRIC_UPDATE_MIN_MAX(*min, *max, lat);
> >>>
> >>> -     /* the sq is (lat - old_avg) * (lat - new_avg) */
> >>> -     avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
> >>> -     sq = lat - avg;
> >>> -     avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
> >>> -     sq = sq * (lat - avg);
> >>> -     *sq_sump += sq;
> >>> +     if (unlikely(total == 1)) {
> >>> +             *lavg = lat;
> >>> +             *lstdev = 0;
> >>> +     } else {
> >>> +             avg = *lavg + div64_s64(lat - *lavg, total);
> >>> +             stdev = *lstdev + (lat - *lavg)*(lat - avg);
> >>> +             *lstdev = int_sqrt(div64_u64(stdev, total - 1));
> >>> +             *lavg = avg;
> >>> +     }
> >> IMO, this is incorrect, the math formula please see:
> >>
> >> https://www.investopedia.com/ask/answers/042415/what-difference-between-standard-error-means-and-standard-deviation.asp
> >>
> >> The most accurate result should be:
> >>
> >> stdev = int_sqrt(sum((X(n) - avg)^2, (X(n-1) - avg)^2, ..., (X(1) -
> >> avg)^2) / (n - 1)).
> >>
> >> While you are computing it:
> >>
> >> stdev_n = int_sqrt(stdev_(n-1) + (X(n-1) - avg)^2)
> > Hmm. The int_sqrt() is probably not needed at this point and can be
> > done when sending the metric. That would avoid some cycles.
> >
> > Also, the way avg is calculated not totally incorrect, however, I
> > would like to keep it similar to how its done is libcephfs.
>
> In user space this is very easy to do, but not in kernel space,
> especially there has no float computing.
>
> Currently the kclient is doing the avg computing by:
>
> avg(n) = (avg(n-1) + latency(n)) / (n), IMO this should be closer to the
> real avg(n) = sum(latency(n), latency(n-1), ..., latency(1)) / n.

That's how is done in libcephfs too.

>
> Because it's hard to record all the latency values, this is also many
> other user space tools doing to count the avg.
>
>
> >> Though current stdev computing method is not exactly the same the math
> >> formula does, but it's closer to it, because the kernel couldn't record
> >> all the latency value and do it whenever needed, which will occupy a
> >> large amount of memories and cpu resources.
> > The approach is to calculate the running variance, I.e., compute the
> > variance as  data (latency) arrive one at a time.
> >
> >>
> >>>    }
> >>>
> >>>    void ceph_update_read_metrics(struct ceph_client_metric *m,
> >>> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
> >>>                              unsigned int size, int rc)
> >>>    {
> >>>        ktime_t lat = ktime_sub(r_end, r_start);
> >>> -     ktime_t total;
> >>>
> >>>        if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
> >>>                return;
> >>>
> >>>        spin_lock(&m->read_metric_lock);
> >>> -     total = ++m->total_reads;
> >>>        m->read_size_sum += size;
> >>> -     m->read_latency_sum += lat;
> >>>        METRIC_UPDATE_MIN_MAX(m->read_size_min,
> >>>                              m->read_size_max,
> >>>                              size);
> >>> -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> >>> -                           m->read_latency_max,
> >>> -                           lat);
> >>> -     __update_stdev(total, m->read_latency_sum,
> >>> -                    &m->read_latency_sq_sum, lat);
> >>> +     __update_latency(&m->total_reads, &m->read_latency_sum,
> >>> +                      &m->avg_read_latency, &m->read_latency_min,
> >>> +                      &m->read_latency_max, &m->read_latency_stdev, lat);
> >>>        spin_unlock(&m->read_metric_lock);
> >>>    }
> >>>
> >>> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
> >>>                               unsigned int size, int rc)
> >>>    {
> >>>        ktime_t lat = ktime_sub(r_end, r_start);
> >>> -     ktime_t total;
> >>>
> >>>        if (unlikely(rc && rc != -ETIMEDOUT))
> >>>                return;
> >>>
> >>>        spin_lock(&m->write_metric_lock);
> >>> -     total = ++m->total_writes;
> >>>        m->write_size_sum += size;
> >>> -     m->write_latency_sum += lat;
> >>>        METRIC_UPDATE_MIN_MAX(m->write_size_min,
> >>>                              m->write_size_max,
> >>>                              size);
> >>> -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> >>> -                           m->write_latency_max,
> >>> -                           lat);
> >>> -     __update_stdev(total, m->write_latency_sum,
> >>> -                    &m->write_latency_sq_sum, lat);
> >>> +     __update_latency(&m->total_writes, &m->write_latency_sum,
> >>> +                      &m->avg_write_latency, &m->write_latency_min,
> >>> +                      &m->write_latency_max, &m->write_latency_stdev, lat);
> >>>        spin_unlock(&m->write_metric_lock);
> >>>    }
> >>>
> >>> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
> >>>                                  int rc)
> >>>    {
> >>>        ktime_t lat = ktime_sub(r_end, r_start);
> >>> -     ktime_t total;
> >>>
> >>>        if (unlikely(rc && rc != -ENOENT))
> >>>                return;
> >>>
> >>>        spin_lock(&m->metadata_metric_lock);
> >>> -     total = ++m->total_metadatas;
> >>> -     m->metadata_latency_sum += lat;
> >>> -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> >>> -                           m->metadata_latency_max,
> >>> -                           lat);
> >>> -     __update_stdev(total, m->metadata_latency_sum,
> >>> -                    &m->metadata_latency_sq_sum, lat);
> >>> +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> >>> +                      &m->avg_metadata_latency, &m->metadata_latency_min,
> >>> +                      &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
> >>>        spin_unlock(&m->metadata_metric_lock);
> >>>    }
> >>> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> >>> index 103ed736f9d2..a5da21b8f8ed 100644
> >>> --- a/fs/ceph/metric.h
> >>> +++ b/fs/ceph/metric.h
> >>> @@ -138,7 +138,8 @@ struct ceph_client_metric {
> >>>        u64 read_size_min;
> >>>        u64 read_size_max;
> >>>        ktime_t read_latency_sum;
> >>> -     ktime_t read_latency_sq_sum;
> >>> +     ktime_t avg_read_latency;
> >>> +     ktime_t read_latency_stdev;
> >>>        ktime_t read_latency_min;
> >>>        ktime_t read_latency_max;
> >>>
> >>> @@ -148,14 +149,16 @@ struct ceph_client_metric {
> >>>        u64 write_size_min;
> >>>        u64 write_size_max;
> >>>        ktime_t write_latency_sum;
> >>> -     ktime_t write_latency_sq_sum;
> >>> +     ktime_t avg_write_latency;
> >>> +     ktime_t write_latency_stdev;
> >>>        ktime_t write_latency_min;
> >>>        ktime_t write_latency_max;
> >>>
> >>>        spinlock_t metadata_metric_lock;
> >>>        u64 total_metadatas;
> >>>        ktime_t metadata_latency_sum;
> >>> -     ktime_t metadata_latency_sq_sum;
> >>> +     ktime_t avg_metadata_latency;
> >>> +     ktime_t metadata_latency_stdev;
> >>>        ktime_t metadata_latency_min;
> >>>        ktime_t metadata_latency_max;
> >>>
> >
>
Xiubo Li Sept. 14, 2021, 1:58 p.m. UTC | #11
On 9/14/21 9:53 PM, Venky Shankar wrote:
> On Tue, Sep 14, 2021 at 7:16 PM Xiubo Li <xiubli@redhat.com> wrote:
>>
>> On 9/14/21 9:30 PM, Venky Shankar wrote:
>>> On Tue, Sep 14, 2021 at 6:39 PM Xiubo Li <xiubli@redhat.com> wrote:
>>>> On 9/14/21 4:49 PM, Venky Shankar wrote:
>>>>> The math involved in tracking average and standard deviation
>>>>> for r/w/m latencies looks incorrect. Fix that up. Also, change
>>>>> the variable name that tracks standard deviation (*_sq_sum) to
>>>>> *_stdev.
>>>>>
>>>>> Signed-off-by: Venky Shankar <vshankar@redhat.com>
>>>>> ---
>>>>>     fs/ceph/debugfs.c | 14 +++++-----
>>>>>     fs/ceph/metric.c  | 70 ++++++++++++++++++++++-------------------------
>>>>>     fs/ceph/metric.h  |  9 ++++--
>>>>>     3 files changed, 45 insertions(+), 48 deletions(-)
>>>>>
>>>>> diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
>>>>> index 38b78b45811f..3abfa7ae8220 100644
>>>>> --- a/fs/ceph/debugfs.c
>>>>> +++ b/fs/ceph/debugfs.c
>>>>> @@ -152,7 +152,7 @@ static int metric_show(struct seq_file *s, void *p)
>>>>>         struct ceph_mds_client *mdsc = fsc->mdsc;
>>>>>         struct ceph_client_metric *m = &mdsc->metric;
>>>>>         int nr_caps = 0;
>>>>> -     s64 total, sum, avg, min, max, sq;
>>>>> +     s64 total, sum, avg, min, max, stdev;
>>>>>         u64 sum_sz, avg_sz, min_sz, max_sz;
>>>>>
>>>>>         sum = percpu_counter_sum(&m->total_inodes);
>>>>> @@ -175,9 +175,9 @@ static int metric_show(struct seq_file *s, void *p)
>>>>>         avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>>>>>         min = m->read_latency_min;
>>>>>         max = m->read_latency_max;
>>>>> -     sq = m->read_latency_sq_sum;
>>>>> +     stdev = m->read_latency_stdev;
>>>>>         spin_unlock(&m->read_metric_lock);
>>>>> -     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
>>>>> +     CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
>>>>>
>>>>>         spin_lock(&m->write_metric_lock);
>>>>>         total = m->total_writes;
>>>>> @@ -185,9 +185,9 @@ static int metric_show(struct seq_file *s, void *p)
>>>>>         avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>>>>>         min = m->write_latency_min;
>>>>>         max = m->write_latency_max;
>>>>> -     sq = m->write_latency_sq_sum;
>>>>> +     stdev = m->write_latency_stdev;
>>>>>         spin_unlock(&m->write_metric_lock);
>>>>> -     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
>>>>> +     CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
>>>>>
>>>>>         spin_lock(&m->metadata_metric_lock);
>>>>>         total = m->total_metadatas;
>>>>> @@ -195,9 +195,9 @@ static int metric_show(struct seq_file *s, void *p)
>>>>>         avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
>>>>>         min = m->metadata_latency_min;
>>>>>         max = m->metadata_latency_max;
>>>>> -     sq = m->metadata_latency_sq_sum;
>>>>> +     stdev = m->metadata_latency_stdev;
>>>>>         spin_unlock(&m->metadata_metric_lock);
>>>>> -     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
>>>>> +     CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
>>>>>
>>>>>         seq_printf(s, "\n");
>>>>>         seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
>>>>> diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
>>>>> index 226dc38e2909..6b774b1a88ce 100644
>>>>> --- a/fs/ceph/metric.c
>>>>> +++ b/fs/ceph/metric.c
>>>>> @@ -244,7 +244,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>>>>>                 goto err_i_caps_mis;
>>>>>
>>>>>         spin_lock_init(&m->read_metric_lock);
>>>>> -     m->read_latency_sq_sum = 0;
>>>>> +     m->read_latency_stdev = 0;
>>>>> +     m->avg_read_latency = 0;
>>>>>         m->read_latency_min = KTIME_MAX;
>>>>>         m->read_latency_max = 0;
>>>>>         m->total_reads = 0;
>>>>> @@ -254,7 +255,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>>>>>         m->read_size_sum = 0;
>>>>>
>>>>>         spin_lock_init(&m->write_metric_lock);
>>>>> -     m->write_latency_sq_sum = 0;
>>>>> +     m->write_latency_stdev = 0;
>>>>> +     m->avg_write_latency = 0;
>>>>>         m->write_latency_min = KTIME_MAX;
>>>>>         m->write_latency_max = 0;
>>>>>         m->total_writes = 0;
>>>>> @@ -264,7 +266,8 @@ int ceph_metric_init(struct ceph_client_metric *m)
>>>>>         m->write_size_sum = 0;
>>>>>
>>>>>         spin_lock_init(&m->metadata_metric_lock);
>>>>> -     m->metadata_latency_sq_sum = 0;
>>>>> +     m->metadata_latency_stdev = 0;
>>>>> +     m->avg_metadata_latency = 0;
>>>>>         m->metadata_latency_min = KTIME_MAX;
>>>>>         m->metadata_latency_max = 0;
>>>>>         m->total_metadatas = 0;
>>>>> @@ -322,20 +325,26 @@ void ceph_metric_destroy(struct ceph_client_metric *m)
>>>>>                 max = new;                      \
>>>>>     }
>>>>>
>>>>> -static inline void __update_stdev(ktime_t total, ktime_t lsum,
>>>>> -                               ktime_t *sq_sump, ktime_t lat)
>>>>> +static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
>>>>> +                                 ktime_t *lavg, ktime_t *min, ktime_t *max,
>>>>> +                                 ktime_t *lstdev, ktime_t lat)
>>>>>     {
>>>>> -     ktime_t avg, sq;
>>>>> +     ktime_t total, avg, stdev;
>>>>>
>>>>> -     if (unlikely(total == 1))
>>>>> -             return;
>>>>> +     total = ++(*ctotal);
>>>>> +     *lsum += lat;
>>>>> +
>>>>> +     METRIC_UPDATE_MIN_MAX(*min, *max, lat);
>>>>>
>>>>> -     /* the sq is (lat - old_avg) * (lat - new_avg) */
>>>>> -     avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
>>>>> -     sq = lat - avg;
>>>>> -     avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
>>>>> -     sq = sq * (lat - avg);
>>>>> -     *sq_sump += sq;
>>>>> +     if (unlikely(total == 1)) {
>>>>> +             *lavg = lat;
>>>>> +             *lstdev = 0;
>>>>> +     } else {
>>>>> +             avg = *lavg + div64_s64(lat - *lavg, total);
>>>>> +             stdev = *lstdev + (lat - *lavg)*(lat - avg);
>>>>> +             *lstdev = int_sqrt(div64_u64(stdev, total - 1));
>>>>> +             *lavg = avg;
>>>>> +     }
>>>> IMO, this is incorrect, the math formula please see:
>>>>
>>>> https://www.investopedia.com/ask/answers/042415/what-difference-between-standard-error-means-and-standard-deviation.asp
>>>>
>>>> The most accurate result should be:
>>>>
>>>> stdev = int_sqrt(sum((X(n) - avg)^2, (X(n-1) - avg)^2, ..., (X(1) -
>>>> avg)^2) / (n - 1)).
>>>>
>>>> While you are computing it:
>>>>
>>>> stdev_n = int_sqrt(stdev_(n-1) + (X(n-1) - avg)^2)
>>> Hmm. The int_sqrt() is probably not needed at this point and can be
>>> done when sending the metric. That would avoid some cycles.
>>>
>>> Also, the way avg is calculated not totally incorrect, however, I
>>> would like to keep it similar to how its done is libcephfs.
>> In user space this is very easy to do, but not in kernel space,
>> especially there has no float computing.
>>
>> Currently the kclient is doing the avg computing by:
>>
>> avg(n) = (avg(n-1) + latency(n)) / (n), IMO this should be closer to the
>> real avg(n) = sum(latency(n), latency(n-1), ..., latency(1)) / n.
> That's how is done in libcephfs too.

Okay.

>
>> Because it's hard to record all the latency values, this is also many
>> other user space tools doing to count the avg.
>>
>>
>>>> Though current stdev computing method is not exactly the same the math
>>>> formula does, but it's closer to it, because the kernel couldn't record
>>>> all the latency value and do it whenever needed, which will occupy a
>>>> large amount of memories and cpu resources.
>>> The approach is to calculate the running variance, I.e., compute the
>>> variance as  data (latency) arrive one at a time.
>>>
>>>>>     }
>>>>>
>>>>>     void ceph_update_read_metrics(struct ceph_client_metric *m,
>>>>> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
>>>>>                               unsigned int size, int rc)
>>>>>     {
>>>>>         ktime_t lat = ktime_sub(r_end, r_start);
>>>>> -     ktime_t total;
>>>>>
>>>>>         if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
>>>>>                 return;
>>>>>
>>>>>         spin_lock(&m->read_metric_lock);
>>>>> -     total = ++m->total_reads;
>>>>>         m->read_size_sum += size;
>>>>> -     m->read_latency_sum += lat;
>>>>>         METRIC_UPDATE_MIN_MAX(m->read_size_min,
>>>>>                               m->read_size_max,
>>>>>                               size);
>>>>> -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
>>>>> -                           m->read_latency_max,
>>>>> -                           lat);
>>>>> -     __update_stdev(total, m->read_latency_sum,
>>>>> -                    &m->read_latency_sq_sum, lat);
>>>>> +     __update_latency(&m->total_reads, &m->read_latency_sum,
>>>>> +                      &m->avg_read_latency, &m->read_latency_min,
>>>>> +                      &m->read_latency_max, &m->read_latency_stdev, lat);
>>>>>         spin_unlock(&m->read_metric_lock);
>>>>>     }
>>>>>
>>>>> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct ceph_client_metric *m,
>>>>>                                unsigned int size, int rc)
>>>>>     {
>>>>>         ktime_t lat = ktime_sub(r_end, r_start);
>>>>> -     ktime_t total;
>>>>>
>>>>>         if (unlikely(rc && rc != -ETIMEDOUT))
>>>>>                 return;
>>>>>
>>>>>         spin_lock(&m->write_metric_lock);
>>>>> -     total = ++m->total_writes;
>>>>>         m->write_size_sum += size;
>>>>> -     m->write_latency_sum += lat;
>>>>>         METRIC_UPDATE_MIN_MAX(m->write_size_min,
>>>>>                               m->write_size_max,
>>>>>                               size);
>>>>> -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
>>>>> -                           m->write_latency_max,
>>>>> -                           lat);
>>>>> -     __update_stdev(total, m->write_latency_sum,
>>>>> -                    &m->write_latency_sq_sum, lat);
>>>>> +     __update_latency(&m->total_writes, &m->write_latency_sum,
>>>>> +                      &m->avg_write_latency, &m->write_latency_min,
>>>>> +                      &m->write_latency_max, &m->write_latency_stdev, lat);
>>>>>         spin_unlock(&m->write_metric_lock);
>>>>>     }
>>>>>
>>>>> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct ceph_client_metric *m,
>>>>>                                   int rc)
>>>>>     {
>>>>>         ktime_t lat = ktime_sub(r_end, r_start);
>>>>> -     ktime_t total;
>>>>>
>>>>>         if (unlikely(rc && rc != -ENOENT))
>>>>>                 return;
>>>>>
>>>>>         spin_lock(&m->metadata_metric_lock);
>>>>> -     total = ++m->total_metadatas;
>>>>> -     m->metadata_latency_sum += lat;
>>>>> -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
>>>>> -                           m->metadata_latency_max,
>>>>> -                           lat);
>>>>> -     __update_stdev(total, m->metadata_latency_sum,
>>>>> -                    &m->metadata_latency_sq_sum, lat);
>>>>> +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
>>>>> +                      &m->avg_metadata_latency, &m->metadata_latency_min,
>>>>> +                      &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
>>>>>         spin_unlock(&m->metadata_metric_lock);
>>>>>     }
>>>>> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
>>>>> index 103ed736f9d2..a5da21b8f8ed 100644
>>>>> --- a/fs/ceph/metric.h
>>>>> +++ b/fs/ceph/metric.h
>>>>> @@ -138,7 +138,8 @@ struct ceph_client_metric {
>>>>>         u64 read_size_min;
>>>>>         u64 read_size_max;
>>>>>         ktime_t read_latency_sum;
>>>>> -     ktime_t read_latency_sq_sum;
>>>>> +     ktime_t avg_read_latency;
>>>>> +     ktime_t read_latency_stdev;
>>>>>         ktime_t read_latency_min;
>>>>>         ktime_t read_latency_max;
>>>>>
>>>>> @@ -148,14 +149,16 @@ struct ceph_client_metric {
>>>>>         u64 write_size_min;
>>>>>         u64 write_size_max;
>>>>>         ktime_t write_latency_sum;
>>>>> -     ktime_t write_latency_sq_sum;
>>>>> +     ktime_t avg_write_latency;
>>>>> +     ktime_t write_latency_stdev;
>>>>>         ktime_t write_latency_min;
>>>>>         ktime_t write_latency_max;
>>>>>
>>>>>         spinlock_t metadata_metric_lock;
>>>>>         u64 total_metadatas;
>>>>>         ktime_t metadata_latency_sum;
>>>>> -     ktime_t metadata_latency_sq_sum;
>>>>> +     ktime_t avg_metadata_latency;
>>>>> +     ktime_t metadata_latency_stdev;
>>>>>         ktime_t metadata_latency_min;
>>>>>         ktime_t metadata_latency_max;
>>>>>
>
Venky Shankar Sept. 14, 2021, 2 p.m. UTC | #12
On Tue, Sep 14, 2021 at 7:22 PM Xiubo Li <xiubli@redhat.com> wrote:
>
>
> On 9/14/21 9:45 PM, Xiubo Li wrote:
> >
> > On 9/14/21 9:30 PM, Venky Shankar wrote:
> >> On Tue, Sep 14, 2021 at 6:39 PM Xiubo Li <xiubli@redhat.com> wrote:
> >>>
> >>> On 9/14/21 4:49 PM, Venky Shankar wrote:
> [...]
> > In user space this is very easy to do, but not in kernel space,
> > especially there has no float computing.
> >
> As I remembered this is main reason why I was planing to send the raw
> metrics to MDS and let the MDS do the computing.
>
> So if possible why not just send the raw data to MDS and let the MDS to
> do the stdev computing ?

Since metrics are sent each second (I suppose) and there can be N
operations done within that second, what raw data (say for avg/stdev
calculation) would the client send to the MDS?

>
>
> > Currently the kclient is doing the avg computing by:
> >
> > avg(n) = (avg(n-1) + latency(n)) / (n), IMO this should be closer to
> > the real avg(n) = sum(latency(n), latency(n-1), ..., latency(1)) / n.
> >
> > Because it's hard to record all the latency values, this is also many
> > other user space tools doing to count the avg.
> >
> >
> >>> Though current stdev computing method is not exactly the same the math
> >>> formula does, but it's closer to it, because the kernel couldn't record
> >>> all the latency value and do it whenever needed, which will occupy a
> >>> large amount of memories and cpu resources.
> >> The approach is to calculate the running variance, I.e., compute the
> >> variance as  data (latency) arrive one at a time.
> >>
> >>>
> >>>>    }
> >>>>
> >>>>    void ceph_update_read_metrics(struct ceph_client_metric *m,
> >>>> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct
> >>>> ceph_client_metric *m,
> >>>>                              unsigned int size, int rc)
> >>>>    {
> >>>>        ktime_t lat = ktime_sub(r_end, r_start);
> >>>> -     ktime_t total;
> >>>>
> >>>>        if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
> >>>>                return;
> >>>>
> >>>>        spin_lock(&m->read_metric_lock);
> >>>> -     total = ++m->total_reads;
> >>>>        m->read_size_sum += size;
> >>>> -     m->read_latency_sum += lat;
> >>>>        METRIC_UPDATE_MIN_MAX(m->read_size_min,
> >>>>                              m->read_size_max,
> >>>>                              size);
> >>>> -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
> >>>> -                           m->read_latency_max,
> >>>> -                           lat);
> >>>> -     __update_stdev(total, m->read_latency_sum,
> >>>> -                    &m->read_latency_sq_sum, lat);
> >>>> +     __update_latency(&m->total_reads, &m->read_latency_sum,
> >>>> +                      &m->avg_read_latency, &m->read_latency_min,
> >>>> +                      &m->read_latency_max,
> >>>> &m->read_latency_stdev, lat);
> >>>>        spin_unlock(&m->read_metric_lock);
> >>>>    }
> >>>>
> >>>> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct
> >>>> ceph_client_metric *m,
> >>>>                               unsigned int size, int rc)
> >>>>    {
> >>>>        ktime_t lat = ktime_sub(r_end, r_start);
> >>>> -     ktime_t total;
> >>>>
> >>>>        if (unlikely(rc && rc != -ETIMEDOUT))
> >>>>                return;
> >>>>
> >>>>        spin_lock(&m->write_metric_lock);
> >>>> -     total = ++m->total_writes;
> >>>>        m->write_size_sum += size;
> >>>> -     m->write_latency_sum += lat;
> >>>>        METRIC_UPDATE_MIN_MAX(m->write_size_min,
> >>>>                              m->write_size_max,
> >>>>                              size);
> >>>> -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
> >>>> -                           m->write_latency_max,
> >>>> -                           lat);
> >>>> -     __update_stdev(total, m->write_latency_sum,
> >>>> -                    &m->write_latency_sq_sum, lat);
> >>>> +     __update_latency(&m->total_writes, &m->write_latency_sum,
> >>>> +                      &m->avg_write_latency, &m->write_latency_min,
> >>>> +                      &m->write_latency_max,
> >>>> &m->write_latency_stdev, lat);
> >>>>        spin_unlock(&m->write_metric_lock);
> >>>>    }
> >>>>
> >>>> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct
> >>>> ceph_client_metric *m,
> >>>>                                  int rc)
> >>>>    {
> >>>>        ktime_t lat = ktime_sub(r_end, r_start);
> >>>> -     ktime_t total;
> >>>>
> >>>>        if (unlikely(rc && rc != -ENOENT))
> >>>>                return;
> >>>>
> >>>>        spin_lock(&m->metadata_metric_lock);
> >>>> -     total = ++m->total_metadatas;
> >>>> -     m->metadata_latency_sum += lat;
> >>>> -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
> >>>> -                           m->metadata_latency_max,
> >>>> -                           lat);
> >>>> -     __update_stdev(total, m->metadata_latency_sum,
> >>>> -                    &m->metadata_latency_sq_sum, lat);
> >>>> +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
> >>>> +                      &m->avg_metadata_latency,
> >>>> &m->metadata_latency_min,
> >>>> +                      &m->metadata_latency_max,
> >>>> &m->metadata_latency_stdev, lat);
> >>>>        spin_unlock(&m->metadata_metric_lock);
> >>>>    }
> >>>> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
> >>>> index 103ed736f9d2..a5da21b8f8ed 100644
> >>>> --- a/fs/ceph/metric.h
> >>>> +++ b/fs/ceph/metric.h
> >>>> @@ -138,7 +138,8 @@ struct ceph_client_metric {
> >>>>        u64 read_size_min;
> >>>>        u64 read_size_max;
> >>>>        ktime_t read_latency_sum;
> >>>> -     ktime_t read_latency_sq_sum;
> >>>> +     ktime_t avg_read_latency;
> >>>> +     ktime_t read_latency_stdev;
> >>>>        ktime_t read_latency_min;
> >>>>        ktime_t read_latency_max;
> >>>>
> >>>> @@ -148,14 +149,16 @@ struct ceph_client_metric {
> >>>>        u64 write_size_min;
> >>>>        u64 write_size_max;
> >>>>        ktime_t write_latency_sum;
> >>>> -     ktime_t write_latency_sq_sum;
> >>>> +     ktime_t avg_write_latency;
> >>>> +     ktime_t write_latency_stdev;
> >>>>        ktime_t write_latency_min;
> >>>>        ktime_t write_latency_max;
> >>>>
> >>>>        spinlock_t metadata_metric_lock;
> >>>>        u64 total_metadatas;
> >>>>        ktime_t metadata_latency_sum;
> >>>> -     ktime_t metadata_latency_sq_sum;
> >>>> +     ktime_t avg_metadata_latency;
> >>>> +     ktime_t metadata_latency_stdev;
> >>>>        ktime_t metadata_latency_min;
> >>>>        ktime_t metadata_latency_max;
> >>>>
> >>
>
Xiubo Li Sept. 14, 2021, 2:10 p.m. UTC | #13
On 9/14/21 10:00 PM, Venky Shankar wrote:
> On Tue, Sep 14, 2021 at 7:22 PM Xiubo Li <xiubli@redhat.com> wrote:
>>
>> On 9/14/21 9:45 PM, Xiubo Li wrote:
>>> On 9/14/21 9:30 PM, Venky Shankar wrote:
>>>> On Tue, Sep 14, 2021 at 6:39 PM Xiubo Li <xiubli@redhat.com> wrote:
>>>>> On 9/14/21 4:49 PM, Venky Shankar wrote:
>> [...]
>>> In user space this is very easy to do, but not in kernel space,
>>> especially there has no float computing.
>>>
>> As I remembered this is main reason why I was planing to send the raw
>> metrics to MDS and let the MDS do the computing.
>>
>> So if possible why not just send the raw data to MDS and let the MDS to
>> do the stdev computing ?
> Since metrics are sent each second (I suppose) and there can be N
> operations done within that second, what raw data (say for avg/stdev
> calculation) would the client send to the MDS?

Yeah.

For example, just send the "sq_sum" and the total numbers to MDS, these 
should be enough to compute the stdev. And in MDS or cephfs-top tool can 
just do it by int_sqrt(sq_sum / total).

I am okay with both and it's up to you, but the stdev could be more 
accurate in userspace with float computing.


>
>>
>>> Currently the kclient is doing the avg computing by:
>>>
>>> avg(n) = (avg(n-1) + latency(n)) / (n), IMO this should be closer to
>>> the real avg(n) = sum(latency(n), latency(n-1), ..., latency(1)) / n.
>>>
>>> Because it's hard to record all the latency values, this is also many
>>> other user space tools doing to count the avg.
>>>
>>>
>>>>> Though current stdev computing method is not exactly the same the math
>>>>> formula does, but it's closer to it, because the kernel couldn't record
>>>>> all the latency value and do it whenever needed, which will occupy a
>>>>> large amount of memories and cpu resources.
>>>> The approach is to calculate the running variance, I.e., compute the
>>>> variance as  data (latency) arrive one at a time.
>>>>
>>>>>>     }
>>>>>>
>>>>>>     void ceph_update_read_metrics(struct ceph_client_metric *m,
>>>>>> @@ -343,23 +352,18 @@ void ceph_update_read_metrics(struct
>>>>>> ceph_client_metric *m,
>>>>>>                               unsigned int size, int rc)
>>>>>>     {
>>>>>>         ktime_t lat = ktime_sub(r_end, r_start);
>>>>>> -     ktime_t total;
>>>>>>
>>>>>>         if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
>>>>>>                 return;
>>>>>>
>>>>>>         spin_lock(&m->read_metric_lock);
>>>>>> -     total = ++m->total_reads;
>>>>>>         m->read_size_sum += size;
>>>>>> -     m->read_latency_sum += lat;
>>>>>>         METRIC_UPDATE_MIN_MAX(m->read_size_min,
>>>>>>                               m->read_size_max,
>>>>>>                               size);
>>>>>> -     METRIC_UPDATE_MIN_MAX(m->read_latency_min,
>>>>>> -                           m->read_latency_max,
>>>>>> -                           lat);
>>>>>> -     __update_stdev(total, m->read_latency_sum,
>>>>>> -                    &m->read_latency_sq_sum, lat);
>>>>>> +     __update_latency(&m->total_reads, &m->read_latency_sum,
>>>>>> +                      &m->avg_read_latency, &m->read_latency_min,
>>>>>> +                      &m->read_latency_max,
>>>>>> &m->read_latency_stdev, lat);
>>>>>>         spin_unlock(&m->read_metric_lock);
>>>>>>     }
>>>>>>
>>>>>> @@ -368,23 +372,18 @@ void ceph_update_write_metrics(struct
>>>>>> ceph_client_metric *m,
>>>>>>                                unsigned int size, int rc)
>>>>>>     {
>>>>>>         ktime_t lat = ktime_sub(r_end, r_start);
>>>>>> -     ktime_t total;
>>>>>>
>>>>>>         if (unlikely(rc && rc != -ETIMEDOUT))
>>>>>>                 return;
>>>>>>
>>>>>>         spin_lock(&m->write_metric_lock);
>>>>>> -     total = ++m->total_writes;
>>>>>>         m->write_size_sum += size;
>>>>>> -     m->write_latency_sum += lat;
>>>>>>         METRIC_UPDATE_MIN_MAX(m->write_size_min,
>>>>>>                               m->write_size_max,
>>>>>>                               size);
>>>>>> -     METRIC_UPDATE_MIN_MAX(m->write_latency_min,
>>>>>> -                           m->write_latency_max,
>>>>>> -                           lat);
>>>>>> -     __update_stdev(total, m->write_latency_sum,
>>>>>> -                    &m->write_latency_sq_sum, lat);
>>>>>> +     __update_latency(&m->total_writes, &m->write_latency_sum,
>>>>>> +                      &m->avg_write_latency, &m->write_latency_min,
>>>>>> +                      &m->write_latency_max,
>>>>>> &m->write_latency_stdev, lat);
>>>>>>         spin_unlock(&m->write_metric_lock);
>>>>>>     }
>>>>>>
>>>>>> @@ -393,18 +392,13 @@ void ceph_update_metadata_metrics(struct
>>>>>> ceph_client_metric *m,
>>>>>>                                   int rc)
>>>>>>     {
>>>>>>         ktime_t lat = ktime_sub(r_end, r_start);
>>>>>> -     ktime_t total;
>>>>>>
>>>>>>         if (unlikely(rc && rc != -ENOENT))
>>>>>>                 return;
>>>>>>
>>>>>>         spin_lock(&m->metadata_metric_lock);
>>>>>> -     total = ++m->total_metadatas;
>>>>>> -     m->metadata_latency_sum += lat;
>>>>>> -     METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
>>>>>> -                           m->metadata_latency_max,
>>>>>> -                           lat);
>>>>>> -     __update_stdev(total, m->metadata_latency_sum,
>>>>>> -                    &m->metadata_latency_sq_sum, lat);
>>>>>> +     __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
>>>>>> +                      &m->avg_metadata_latency,
>>>>>> &m->metadata_latency_min,
>>>>>> +                      &m->metadata_latency_max,
>>>>>> &m->metadata_latency_stdev, lat);
>>>>>>         spin_unlock(&m->metadata_metric_lock);
>>>>>>     }
>>>>>> diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
>>>>>> index 103ed736f9d2..a5da21b8f8ed 100644
>>>>>> --- a/fs/ceph/metric.h
>>>>>> +++ b/fs/ceph/metric.h
>>>>>> @@ -138,7 +138,8 @@ struct ceph_client_metric {
>>>>>>         u64 read_size_min;
>>>>>>         u64 read_size_max;
>>>>>>         ktime_t read_latency_sum;
>>>>>> -     ktime_t read_latency_sq_sum;
>>>>>> +     ktime_t avg_read_latency;
>>>>>> +     ktime_t read_latency_stdev;
>>>>>>         ktime_t read_latency_min;
>>>>>>         ktime_t read_latency_max;
>>>>>>
>>>>>> @@ -148,14 +149,16 @@ struct ceph_client_metric {
>>>>>>         u64 write_size_min;
>>>>>>         u64 write_size_max;
>>>>>>         ktime_t write_latency_sum;
>>>>>> -     ktime_t write_latency_sq_sum;
>>>>>> +     ktime_t avg_write_latency;
>>>>>> +     ktime_t write_latency_stdev;
>>>>>>         ktime_t write_latency_min;
>>>>>>         ktime_t write_latency_max;
>>>>>>
>>>>>>         spinlock_t metadata_metric_lock;
>>>>>>         u64 total_metadatas;
>>>>>>         ktime_t metadata_latency_sum;
>>>>>> -     ktime_t metadata_latency_sq_sum;
>>>>>> +     ktime_t avg_metadata_latency;
>>>>>> +     ktime_t metadata_latency_stdev;
>>>>>>         ktime_t metadata_latency_min;
>>>>>>         ktime_t metadata_latency_max;
>>>>>>
>
diff mbox series

Patch

diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 38b78b45811f..3abfa7ae8220 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -152,7 +152,7 @@  static int metric_show(struct seq_file *s, void *p)
 	struct ceph_mds_client *mdsc = fsc->mdsc;
 	struct ceph_client_metric *m = &mdsc->metric;
 	int nr_caps = 0;
-	s64 total, sum, avg, min, max, sq;
+	s64 total, sum, avg, min, max, stdev;
 	u64 sum_sz, avg_sz, min_sz, max_sz;
 
 	sum = percpu_counter_sum(&m->total_inodes);
@@ -175,9 +175,9 @@  static int metric_show(struct seq_file *s, void *p)
 	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
 	min = m->read_latency_min;
 	max = m->read_latency_max;
-	sq = m->read_latency_sq_sum;
+	stdev = m->read_latency_stdev;
 	spin_unlock(&m->read_metric_lock);
-	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
+	CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, stdev);
 
 	spin_lock(&m->write_metric_lock);
 	total = m->total_writes;
@@ -185,9 +185,9 @@  static int metric_show(struct seq_file *s, void *p)
 	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
 	min = m->write_latency_min;
 	max = m->write_latency_max;
-	sq = m->write_latency_sq_sum;
+	stdev = m->write_latency_stdev;
 	spin_unlock(&m->write_metric_lock);
-	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
+	CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, stdev);
 
 	spin_lock(&m->metadata_metric_lock);
 	total = m->total_metadatas;
@@ -195,9 +195,9 @@  static int metric_show(struct seq_file *s, void *p)
 	avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
 	min = m->metadata_latency_min;
 	max = m->metadata_latency_max;
-	sq = m->metadata_latency_sq_sum;
+	stdev = m->metadata_latency_stdev;
 	spin_unlock(&m->metadata_metric_lock);
-	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
+	CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, stdev);
 
 	seq_printf(s, "\n");
 	seq_printf(s, "item          total       avg_sz(bytes)   min_sz(bytes)   max_sz(bytes)  total_sz(bytes)\n");
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index 226dc38e2909..6b774b1a88ce 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -244,7 +244,8 @@  int ceph_metric_init(struct ceph_client_metric *m)
 		goto err_i_caps_mis;
 
 	spin_lock_init(&m->read_metric_lock);
-	m->read_latency_sq_sum = 0;
+	m->read_latency_stdev = 0;
+	m->avg_read_latency = 0;
 	m->read_latency_min = KTIME_MAX;
 	m->read_latency_max = 0;
 	m->total_reads = 0;
@@ -254,7 +255,8 @@  int ceph_metric_init(struct ceph_client_metric *m)
 	m->read_size_sum = 0;
 
 	spin_lock_init(&m->write_metric_lock);
-	m->write_latency_sq_sum = 0;
+	m->write_latency_stdev = 0;
+	m->avg_write_latency = 0;
 	m->write_latency_min = KTIME_MAX;
 	m->write_latency_max = 0;
 	m->total_writes = 0;
@@ -264,7 +266,8 @@  int ceph_metric_init(struct ceph_client_metric *m)
 	m->write_size_sum = 0;
 
 	spin_lock_init(&m->metadata_metric_lock);
-	m->metadata_latency_sq_sum = 0;
+	m->metadata_latency_stdev = 0;
+	m->avg_metadata_latency = 0;
 	m->metadata_latency_min = KTIME_MAX;
 	m->metadata_latency_max = 0;
 	m->total_metadatas = 0;
@@ -322,20 +325,26 @@  void ceph_metric_destroy(struct ceph_client_metric *m)
 		max = new;			\
 }
 
-static inline void __update_stdev(ktime_t total, ktime_t lsum,
-				  ktime_t *sq_sump, ktime_t lat)
+static inline void __update_latency(ktime_t *ctotal, ktime_t *lsum,
+				    ktime_t *lavg, ktime_t *min, ktime_t *max,
+				    ktime_t *lstdev, ktime_t lat)
 {
-	ktime_t avg, sq;
+	ktime_t total, avg, stdev;
 
-	if (unlikely(total == 1))
-		return;
+	total = ++(*ctotal);
+	*lsum += lat;
+
+	METRIC_UPDATE_MIN_MAX(*min, *max, lat);
 
-	/* the sq is (lat - old_avg) * (lat - new_avg) */
-	avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
-	sq = lat - avg;
-	avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
-	sq = sq * (lat - avg);
-	*sq_sump += sq;
+	if (unlikely(total == 1)) {
+		*lavg = lat;
+		*lstdev = 0;
+	} else {
+		avg = *lavg + div64_s64(lat - *lavg, total);
+		stdev = *lstdev + (lat - *lavg)*(lat - avg);
+		*lstdev = int_sqrt(div64_u64(stdev, total - 1));
+		*lavg = avg;
+	}
 }
 
 void ceph_update_read_metrics(struct ceph_client_metric *m,
@@ -343,23 +352,18 @@  void ceph_update_read_metrics(struct ceph_client_metric *m,
 			      unsigned int size, int rc)
 {
 	ktime_t lat = ktime_sub(r_end, r_start);
-	ktime_t total;
 
 	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
 		return;
 
 	spin_lock(&m->read_metric_lock);
-	total = ++m->total_reads;
 	m->read_size_sum += size;
-	m->read_latency_sum += lat;
 	METRIC_UPDATE_MIN_MAX(m->read_size_min,
 			      m->read_size_max,
 			      size);
-	METRIC_UPDATE_MIN_MAX(m->read_latency_min,
-			      m->read_latency_max,
-			      lat);
-	__update_stdev(total, m->read_latency_sum,
-		       &m->read_latency_sq_sum, lat);
+	__update_latency(&m->total_reads, &m->read_latency_sum,
+			 &m->avg_read_latency, &m->read_latency_min,
+			 &m->read_latency_max, &m->read_latency_stdev, lat);
 	spin_unlock(&m->read_metric_lock);
 }
 
@@ -368,23 +372,18 @@  void ceph_update_write_metrics(struct ceph_client_metric *m,
 			       unsigned int size, int rc)
 {
 	ktime_t lat = ktime_sub(r_end, r_start);
-	ktime_t total;
 
 	if (unlikely(rc && rc != -ETIMEDOUT))
 		return;
 
 	spin_lock(&m->write_metric_lock);
-	total = ++m->total_writes;
 	m->write_size_sum += size;
-	m->write_latency_sum += lat;
 	METRIC_UPDATE_MIN_MAX(m->write_size_min,
 			      m->write_size_max,
 			      size);
-	METRIC_UPDATE_MIN_MAX(m->write_latency_min,
-			      m->write_latency_max,
-			      lat);
-	__update_stdev(total, m->write_latency_sum,
-		       &m->write_latency_sq_sum, lat);
+	__update_latency(&m->total_writes, &m->write_latency_sum,
+			 &m->avg_write_latency, &m->write_latency_min,
+			 &m->write_latency_max, &m->write_latency_stdev, lat);
 	spin_unlock(&m->write_metric_lock);
 }
 
@@ -393,18 +392,13 @@  void ceph_update_metadata_metrics(struct ceph_client_metric *m,
 				  int rc)
 {
 	ktime_t lat = ktime_sub(r_end, r_start);
-	ktime_t total;
 
 	if (unlikely(rc && rc != -ENOENT))
 		return;
 
 	spin_lock(&m->metadata_metric_lock);
-	total = ++m->total_metadatas;
-	m->metadata_latency_sum += lat;
-	METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
-			      m->metadata_latency_max,
-			      lat);
-	__update_stdev(total, m->metadata_latency_sum,
-		       &m->metadata_latency_sq_sum, lat);
+	__update_latency(&m->total_metadatas, &m->metadata_latency_sum,
+			 &m->avg_metadata_latency, &m->metadata_latency_min,
+			 &m->metadata_latency_max, &m->metadata_latency_stdev, lat);
 	spin_unlock(&m->metadata_metric_lock);
 }
diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
index 103ed736f9d2..a5da21b8f8ed 100644
--- a/fs/ceph/metric.h
+++ b/fs/ceph/metric.h
@@ -138,7 +138,8 @@  struct ceph_client_metric {
 	u64 read_size_min;
 	u64 read_size_max;
 	ktime_t read_latency_sum;
-	ktime_t read_latency_sq_sum;
+	ktime_t avg_read_latency;
+	ktime_t read_latency_stdev;
 	ktime_t read_latency_min;
 	ktime_t read_latency_max;
 
@@ -148,14 +149,16 @@  struct ceph_client_metric {
 	u64 write_size_min;
 	u64 write_size_max;
 	ktime_t write_latency_sum;
-	ktime_t write_latency_sq_sum;
+	ktime_t avg_write_latency;
+	ktime_t write_latency_stdev;
 	ktime_t write_latency_min;
 	ktime_t write_latency_max;
 
 	spinlock_t metadata_metric_lock;
 	u64 total_metadatas;
 	ktime_t metadata_latency_sum;
-	ktime_t metadata_latency_sq_sum;
+	ktime_t avg_metadata_latency;
+	ktime_t metadata_latency_stdev;
 	ktime_t metadata_latency_min;
 	ktime_t metadata_latency_max;