diff mbox series

[1/2] mm/damon/core: add a tracepoint for damos apply target regions

Message ID 20230911045908.97649-2-sj@kernel.org (mailing list archive)
State Handled Elsewhere
Headers show
Series mm/damon: add a tracepoint for damos apply target regions | expand

Commit Message

SeongJae Park Sept. 11, 2023, 4:59 a.m. UTC
DAMON provides damon_aggregated tracepoint, which exposes details of
each region and its access monitoring results.  It is useful for
getting whole monitoring results, e.g., for recording purposes.

For investigations of DAMOS, DAMON Sysfs interface provides DAMOS
statistics and tried_regions directory.  But, those provides only
statistics and snapshots.  If the scheme is frequently applied and if
the user needs to know every detail of DAMOS behavior, the
snapshot-based interface could be insufficient and expensive.

As a last resort, userspace users need to record the all monitoring
results via damon_aggregated tracepoint and simulate how DAMOS would
worked.  It is unnecessarily complicated.  DAMON kernel API users,
meanwhile, can do that easily via before_damos_apply() callback field of
'struct damon_callback', though.

Add a tracepoint that will be called just after before_damos_apply()
callback for more convenient investigations of DAMOS.  The tracepoint
exposes all details about each regions, similar to damon_aggregated
tracepoint.

Please note that DAMOS is currently not only for memory management but
also for query-like efficient monitoring results retrievals (when 'stat'
action is used).  Until now, only statistics or snapshots were
supported.  Addition of this tracepoint allows efficient full recording
of DAMOS-based filtered monitoring results.

Signed-off-by: SeongJae Park <sj@kernel.org>
---
 include/trace/events/damon.h | 37 ++++++++++++++++++++++++++++++++++++
 mm/damon/core.c              | 27 +++++++++++++++++++++++++-
 2 files changed, 63 insertions(+), 1 deletion(-)

Comments

Steven Rostedt Sept. 11, 2023, 6:19 p.m. UTC | #1
On Mon, 11 Sep 2023 04:59:07 +0000
SeongJae Park <sj@kernel.org> wrote:

> --- a/mm/damon/core.c
> +++ b/mm/damon/core.c
> @@ -950,6 +950,28 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
>  	struct timespec64 begin, end;
>  	unsigned long sz_applied = 0;
>  	int err = 0;
> +	/*
> +	 * We plan to support multiple context per kdamond, as DAMON sysfs
> +	 * implies with 'nr_contexts' file.  Nevertheless, only single context
> +	 * per kdamond is supported for now.  So, we can simply use '0' context
> +	 * index here.
> +	 */
> +	unsigned int cidx = 0;
> +	struct damos *siter;		/* schemes iterator */
> +	unsigned int sidx = 0;
> +	struct damon_target *titer;	/* targets iterator */
> +	unsigned int tidx = 0;
> +

If this loop is only for passing sidx and tidx to the trace point, you can
add around it:

	if (trace_damos_before_apply_enabled()) {

> +	damon_for_each_scheme(siter, c) {
> +		if (siter == s)
> +			break;
> +		sidx++;
> +	}
> +	damon_for_each_target(titer, c) {
> +		if (titer == t)
> +			break;
> +		tidx++;
> +	}

	}


And then this loop will only be done if that trace event is enabled.

To prevent races, you may also want to add a third parameter, or initialize
them to -1:

	sidx = -1;

	if (trace_damo_before_apply_enabled()) {
		sidx = 0;
		[..]
	}

And you can change the TRACE_EVENT() TO TRACE_EVENT_CONDITION():

TRACE_EVENT_CONDITION(damos_before_apply,

	TP_PROTO(...),

	TP_ARGS(...),

	TP_CONDITION(sidx >= 0),

and the trace event will not be called if sidx is less than zero.

Also, this if statement is only done when the trace event is enabled, so
it's equivalent to:

	if (trace_damos_before_apply_enabled()) {
		if (sdx >= 0)
			trace_damos_before_apply(cidx, sidx, tidx, r,
					damon_nr_regions(t));
	}

-- Steve



>  
>  	if (c->ops.apply_scheme) {
>  		if (quota->esz && quota->charged_sz + sz > quota->esz) {
> @@ -964,8 +986,11 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
>  		ktime_get_coarse_ts64(&begin);
>  		if (c->callback.before_damos_apply)
>  			err = c->callback.before_damos_apply(c, t, r, s);
> -		if (!err)
> +		if (!err) {
> +			trace_damos_before_apply(cidx, sidx, tidx, r,
> +					damon_nr_regions(t));
>  			sz_applied = c->ops.apply_scheme(c, t, r, s);
> +		}
>  		ktime_get_coarse_ts64(&end);
>  		quota->total_charged_ns += timespec64_to_ns(&end) -
>  			timespec64_to_ns(&begin);
> --
SeongJae Park Sept. 11, 2023, 7:05 p.m. UTC | #2
Hi Steven,

On Mon, 11 Sep 2023 14:19:55 -0400 Steven Rostedt <rostedt@goodmis.org> wrote:

> On Mon, 11 Sep 2023 04:59:07 +0000
> SeongJae Park <sj@kernel.org> wrote:
> 
> > --- a/mm/damon/core.c
> > +++ b/mm/damon/core.c
> > @@ -950,6 +950,28 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
> >  	struct timespec64 begin, end;
> >  	unsigned long sz_applied = 0;
> >  	int err = 0;
> > +	/*
> > +	 * We plan to support multiple context per kdamond, as DAMON sysfs
> > +	 * implies with 'nr_contexts' file.  Nevertheless, only single context
> > +	 * per kdamond is supported for now.  So, we can simply use '0' context
> > +	 * index here.
> > +	 */
> > +	unsigned int cidx = 0;
> > +	struct damos *siter;		/* schemes iterator */
> > +	unsigned int sidx = 0;
> > +	struct damon_target *titer;	/* targets iterator */
> > +	unsigned int tidx = 0;
> > +
> 
> If this loop is only for passing sidx and tidx to the trace point,

You're correct.

> you can add around it:
> 
> 	if (trace_damos_before_apply_enabled()) {
> 
> > +	damon_for_each_scheme(siter, c) {
> > +		if (siter == s)
> > +			break;
> > +		sidx++;
> > +	}
> > +	damon_for_each_target(titer, c) {
> > +		if (titer == t)
> > +			break;
> > +		tidx++;
> > +	}
> 
> 	}
> 
> 
> And then this loop will only be done if that trace event is enabled.

Today I learned yet another great feature of the tracing framework.  Thank you
Steven, I will add that to the next spin of this patchset!

> 
> To prevent races, you may also want to add a third parameter, or initialize
> them to -1:
> 
> 	sidx = -1;
> 
> 	if (trace_damo_before_apply_enabled()) {
> 		sidx = 0;
> 		[..]
> 	}
> 
> And you can change the TRACE_EVENT() TO TRACE_EVENT_CONDITION():
> 
> TRACE_EVENT_CONDITION(damos_before_apply,
> 
> 	TP_PROTO(...),
> 
> 	TP_ARGS(...),
> 
> 	TP_CONDITION(sidx >= 0),
> 
> and the trace event will not be called if sidx is less than zero.
> 
> Also, this if statement is only done when the trace event is enabled, so
> it's equivalent to:
> 
> 	if (trace_damos_before_apply_enabled()) {
> 		if (sdx >= 0)
> 			trace_damos_before_apply(cidx, sidx, tidx, r,
> 					damon_nr_regions(t));
> 	}

Again, thank you very much for letting me know this awesome feature.  However,
sidx is supposed to be always >=0 here, since kdamond is running in single
thread and hence no race is expected.  If it exists, it's a bug.  So, I
wouldn't make this change.  Appreciate again for letting me know this very
useful feature, and please let me know if I'm missing something, though!


Thanks,
SJ

> 
> -- Steve
> 
> 
> 
> >  
> >  	if (c->ops.apply_scheme) {
> >  		if (quota->esz && quota->charged_sz + sz > quota->esz) {
> > @@ -964,8 +986,11 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
> >  		ktime_get_coarse_ts64(&begin);
> >  		if (c->callback.before_damos_apply)
> >  			err = c->callback.before_damos_apply(c, t, r, s);
> > -		if (!err)
> > +		if (!err) {
> > +			trace_damos_before_apply(cidx, sidx, tidx, r,
> > +					damon_nr_regions(t));
> >  			sz_applied = c->ops.apply_scheme(c, t, r, s);
> > +		}
> >  		ktime_get_coarse_ts64(&end);
> >  		quota->total_charged_ns += timespec64_to_ns(&end) -
> >  			timespec64_to_ns(&begin);
> > -- 
>
Steven Rostedt Sept. 11, 2023, 8:31 p.m. UTC | #3
On Mon, 11 Sep 2023 19:05:04 +0000
SeongJae Park <sj@kernel.org> wrote:

> > Also, this if statement is only done when the trace event is enabled, so
> > it's equivalent to:
> > 
> > 	if (trace_damos_before_apply_enabled()) {
> > 		if (sdx >= 0)
> > 			trace_damos_before_apply(cidx, sidx, tidx, r,
> > 					damon_nr_regions(t));
> > 	}  
> 
> Again, thank you very much for letting me know this awesome feature.  However,
> sidx is supposed to be always >=0 here, since kdamond is running in single
> thread and hence no race is expected.  If it exists, it's a bug.  So, I
> wouldn't make this change.  Appreciate again for letting me know this very
> useful feature, and please let me know if I'm missing something, though!

The race isn't with your code, but the enabling of tracing.

Let's say you enable tracing just ass it passed the first:

	if (trace_damos_before_apply_enabled()) {
   
		damon_for_each_scheme(siter, c) {
			if (siter == s)
				break;
			sidx++;
		}
		damon_for_each_target(titer, c) {
			if (titer == t)
				break;
			tidx++;
	}  

Now, sidx and tidx is zero (when they were not computed, thus, they
shouldn't be zero.

Then tracing is fully enabled here, and now we enter:

 	if (trace_damos_before_apply_enabled()) {
		trace_damos_before_apply(cidx, sidx, tidx, r,
 				damon_nr_regions(t));
 	}

Now the trace event is hit with sidx and tidx zero when they should not be.
This could confuse you when looking at the report.

What I suggested was to initialize sidx to zero, set it in the first
trace_*_enabled() check, and ignore calling the tracepoint if it's not >= 0.

-- Steve
SeongJae Park Sept. 11, 2023, 8:36 p.m. UTC | #4
On Mon, 11 Sep 2023 16:31:27 -0400 Steven Rostedt <rostedt@goodmis.org> wrote:

> On Mon, 11 Sep 2023 19:05:04 +0000
> SeongJae Park <sj@kernel.org> wrote:
> 
> > > Also, this if statement is only done when the trace event is enabled, so
> > > it's equivalent to:
> > > 
> > > 	if (trace_damos_before_apply_enabled()) {
> > > 		if (sdx >= 0)
> > > 			trace_damos_before_apply(cidx, sidx, tidx, r,
> > > 					damon_nr_regions(t));
> > > 	}  
> > 
> > Again, thank you very much for letting me know this awesome feature.  However,
> > sidx is supposed to be always >=0 here, since kdamond is running in single
> > thread and hence no race is expected.  If it exists, it's a bug.  So, I
> > wouldn't make this change.  Appreciate again for letting me know this very
> > useful feature, and please let me know if I'm missing something, though!
> 
> The race isn't with your code, but the enabling of tracing.
> 
> Let's say you enable tracing just ass it passed the first:
> 
> 	if (trace_damos_before_apply_enabled()) {
>    
> 		damon_for_each_scheme(siter, c) {
> 			if (siter == s)
> 				break;
> 			sidx++;
> 		}
> 		damon_for_each_target(titer, c) {
> 			if (titer == t)
> 				break;
> 			tidx++;
> 	}  
> 
> Now, sidx and tidx is zero (when they were not computed, thus, they
> shouldn't be zero.
> 
> Then tracing is fully enabled here, and now we enter:
> 
>  	if (trace_damos_before_apply_enabled()) {
> 		trace_damos_before_apply(cidx, sidx, tidx, r,
>  				damon_nr_regions(t));
>  	}
> 
> Now the trace event is hit with sidx and tidx zero when they should not be.
> This could confuse you when looking at the report.

Thank you so much for enlightening me with this kind explanation, Steve!  And
this all make sense.  I will follow your suggestion in the next spin.

> 
> What I suggested was to initialize sidx to zero,

Nit.  Initialize to not zero but -1, right?

> set it in the first trace_*_enabled() check, and ignore calling the
> tracepoint if it's not >= 0.
> 
> -- Steve
> 


Thanks,
SJ
Steven Rostedt Sept. 11, 2023, 8:51 p.m. UTC | #5
On Mon, 11 Sep 2023 20:36:42 +0000
SeongJae Park <sj@kernel.org> wrote:

> > Then tracing is fully enabled here, and now we enter:
> > 
> >  	if (trace_damos_before_apply_enabled()) {
> > 		trace_damos_before_apply(cidx, sidx, tidx, r,
> >  				damon_nr_regions(t));
> >  	}
> > 
> > Now the trace event is hit with sidx and tidx zero when they should not be.
> > This could confuse you when looking at the report.  
> 
> Thank you so much for enlightening me with this kind explanation, Steve!  And
> this all make sense.  I will follow your suggestion in the next spin.
> 
> > 
> > What I suggested was to initialize sidx to zero,  
> 
> Nit.  Initialize to not zero but -1, right?

Yeah, but I was also thinking of the reset of it too :-p

	sidx = -1;

	if (trace_damos_before_apply_enabled()) {
		sidx = 0;

-- Steve


> 
> > set it in the first trace_*_enabled() check, and ignore calling the
> > tracepoint if it's not >= 0.
> >
diff mbox series

Patch

diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index 0b8d13bde17a..9e7b39495b05 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -9,6 +9,43 @@ 
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
+TRACE_EVENT(damos_before_apply,
+
+	TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+		unsigned int target_idx, struct damon_region *r,
+		unsigned int nr_regions),
+
+	TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, context_idx)
+		__field(unsigned int, scheme_idx)
+		__field(unsigned long, target_idx)
+		__field(unsigned long, start)
+		__field(unsigned long, end)
+		__field(unsigned int, nr_accesses)
+		__field(unsigned int, age)
+		__field(unsigned int, nr_regions)
+	),
+
+	TP_fast_assign(
+		__entry->context_idx = context_idx;
+		__entry->scheme_idx = scheme_idx;
+		__entry->target_idx = target_idx;
+		__entry->start = r->ar.start;
+		__entry->end = r->ar.end;
+		__entry->nr_accesses = r->nr_accesses;
+		__entry->age = r->age;
+		__entry->nr_regions = nr_regions;
+	),
+
+	TP_printk("ctx_idx=%u scheme_idx=%u target_idx=%lu nr_regions=%u %lu-%lu: %u %u",
+			__entry->context_idx, __entry->scheme_idx,
+			__entry->target_idx, __entry->nr_regions,
+			__entry->start, __entry->end,
+			__entry->nr_accesses, __entry->age)
+);
+
 TRACE_EVENT(damon_aggregated,
 
 	TP_PROTO(unsigned int target_id, struct damon_region *r,
diff --git a/mm/damon/core.c b/mm/damon/core.c
index ca631dd88b33..aa7fbcdf7310 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -950,6 +950,28 @@  static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
 	struct timespec64 begin, end;
 	unsigned long sz_applied = 0;
 	int err = 0;
+	/*
+	 * We plan to support multiple context per kdamond, as DAMON sysfs
+	 * implies with 'nr_contexts' file.  Nevertheless, only single context
+	 * per kdamond is supported for now.  So, we can simply use '0' context
+	 * index here.
+	 */
+	unsigned int cidx = 0;
+	struct damos *siter;		/* schemes iterator */
+	unsigned int sidx = 0;
+	struct damon_target *titer;	/* targets iterator */
+	unsigned int tidx = 0;
+
+	damon_for_each_scheme(siter, c) {
+		if (siter == s)
+			break;
+		sidx++;
+	}
+	damon_for_each_target(titer, c) {
+		if (titer == t)
+			break;
+		tidx++;
+	}
 
 	if (c->ops.apply_scheme) {
 		if (quota->esz && quota->charged_sz + sz > quota->esz) {
@@ -964,8 +986,11 @@  static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
 		ktime_get_coarse_ts64(&begin);
 		if (c->callback.before_damos_apply)
 			err = c->callback.before_damos_apply(c, t, r, s);
-		if (!err)
+		if (!err) {
+			trace_damos_before_apply(cidx, sidx, tidx, r,
+					damon_nr_regions(t));
 			sz_applied = c->ops.apply_scheme(c, t, r, s);
+		}
 		ktime_get_coarse_ts64(&end);
 		quota->total_charged_ns += timespec64_to_ns(&end) -
 			timespec64_to_ns(&begin);