Message ID | 20240820163512.1096301-4-qyousef@layalina.io (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | sched/fair/schedutil: Better manage system response time | expand |
On Tue, Aug 20, 2024 at 05:34:59PM +0100, Qais Yousef wrote: > It is basically the ramp-up time from 0 to a given value. Will be used > later to implement new tunable to control response time for schedutil. > > Signed-off-by: Qais Yousef <qyousef@layalina.io> > --- > kernel/sched/pelt.c | 21 +++++++++++++++++++++ > kernel/sched/sched.h | 1 + > 2 files changed, 22 insertions(+) > > diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c > index 2ce83e880bd5..06cb881ba582 100644 > --- a/kernel/sched/pelt.c > +++ b/kernel/sched/pelt.c > @@ -487,3 +487,24 @@ unsigned long approximate_util_avg(unsigned long util, u64 delta) > > return sa.util_avg; > } > + > +/* > + * Approximate the required amount of runtime in ms required to reach @util. > + */ > +u64 approximate_runtime(unsigned long util) > +{ > + struct sched_avg sa = {}; > + u64 delta = 1024; // period = 1024 = ~1ms > + u64 runtime = 0; > + > + if (unlikely(!util)) > + return runtime; Seems like this check can be removed since it's covered by the loop condition. > + > + while (sa.util_avg < util) { > + accumulate_sum(delta, &sa, 1, 0, 1); > + ___update_load_avg(&sa, 0); > + runtime++; > + } I think this could be a lookup table (probably 1024 * u8), for constant-time runtime approximation. > + > + return runtime; > +} > diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h > index 294c6769e330..47f158b2cdc2 100644 > --- a/kernel/sched/sched.h > +++ b/kernel/sched/sched.h > @@ -3065,6 +3065,7 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, > unsigned long max); > > unsigned long approximate_util_avg(unsigned long util, u64 delta); > +u64 approximate_runtime(unsigned long util); > > /* > * Verify the fitness of task @p to run on @cpu taking into account the > -- > 2.34.1 > Cheers, Sultan
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 2ce83e880bd5..06cb881ba582 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -487,3 +487,24 @@ unsigned long approximate_util_avg(unsigned long util, u64 delta) return sa.util_avg; } + +/* + * Approximate the required amount of runtime in ms required to reach @util. + */ +u64 approximate_runtime(unsigned long util) +{ + struct sched_avg sa = {}; + u64 delta = 1024; // period = 1024 = ~1ms + u64 runtime = 0; + + if (unlikely(!util)) + return runtime; + + while (sa.util_avg < util) { + accumulate_sum(delta, &sa, 1, 0, 1); + ___update_load_avg(&sa, 0); + runtime++; + } + + return runtime; +} diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 294c6769e330..47f158b2cdc2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3065,6 +3065,7 @@ unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, unsigned long max); unsigned long approximate_util_avg(unsigned long util, u64 delta); +u64 approximate_runtime(unsigned long util); /* * Verify the fitness of task @p to run on @cpu taking into account the
It is basically the ramp-up time from 0 to a given value. Will be used later to implement new tunable to control response time for schedutil. Signed-off-by: Qais Yousef <qyousef@layalina.io> --- kernel/sched/pelt.c | 21 +++++++++++++++++++++ kernel/sched/sched.h | 1 + 2 files changed, 22 insertions(+)