diff mbox series

cpufreq: schedutil: Call sugov_update_next_freq() before check to fast_switch_enabled

Message ID 20210224054232.1222-1-zbestahu@gmail.com (mailing list archive)
State Superseded, archived
Headers show
Series cpufreq: schedutil: Call sugov_update_next_freq() before check to fast_switch_enabled | expand

Commit Message

Yue Hu Feb. 24, 2021, 5:42 a.m. UTC
From: Yue Hu <huyue2@yulong.com>

Note that sugov_update_next_freq() may return false, that means the
caller sugov_fast_switch() will do nothing except fast switch check.

Similarly, sugov_deferred_update() also has unnecessary operations
of raw_spin_{lock,unlock} in sugov_update_single_freq() for that case.

So, let's call sugov_update_next_freq() before the fast switch check
to avoid unnecessary behaviors above. Update the related interface
definitions accordingly.

Signed-off-by: Yue Hu <huyue2@yulong.com>
---
 kernel/sched/cpufreq_schedutil.c | 28 ++++++++++++++--------------
 1 file changed, 14 insertions(+), 14 deletions(-)

Comments

Viresh Kumar Feb. 24, 2021, 6:02 a.m. UTC | #1
On 24-02-21, 13:42, Yue Hu wrote:
> From: Yue Hu <huyue2@yulong.com>
> 
> Note that sugov_update_next_freq() may return false, that means the
> caller sugov_fast_switch() will do nothing except fast switch check.
> 
> Similarly, sugov_deferred_update() also has unnecessary operations
> of raw_spin_{lock,unlock} in sugov_update_single_freq() for that case.
> 
> So, let's call sugov_update_next_freq() before the fast switch check
> to avoid unnecessary behaviors above. Update the related interface
> definitions accordingly.
> 
> Signed-off-by: Yue Hu <huyue2@yulong.com>
> ---
>  kernel/sched/cpufreq_schedutil.c | 28 ++++++++++++++--------------
>  1 file changed, 14 insertions(+), 14 deletions(-)
> 
> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> index 41e498b..d23e5be 100644
> --- a/kernel/sched/cpufreq_schedutil.c
> +++ b/kernel/sched/cpufreq_schedutil.c
> @@ -114,19 +114,13 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
>  	return true;
>  }
>  
> -static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
> -			      unsigned int next_freq)
> +static void sugov_fast_switch(struct sugov_policy *sg_policy, unsigned int next_freq)
>  {
> -	if (sugov_update_next_freq(sg_policy, time, next_freq))
> -		cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
> +	cpufreq_driver_fast_switch(sg_policy->policy, next_freq);

I will call this directly instead, no need of the wrapper anymore.

>  }
>  
> -static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
> -				  unsigned int next_freq)
> +static void sugov_deferred_update(struct sugov_policy *sg_policy)
>  {
> -	if (!sugov_update_next_freq(sg_policy, time, next_freq))
> -		return;
> -
>  	if (!sg_policy->work_in_progress) {
>  		sg_policy->work_in_progress = true;
>  		irq_work_queue(&sg_policy->irq_work);
> @@ -368,16 +362,19 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
>  		sg_policy->cached_raw_freq = cached_freq;
>  	}
>  
> +	if (!sugov_update_next_freq(sg_policy, time, next_f))
> +		return;
> +
>  	/*
>  	 * This code runs under rq->lock for the target CPU, so it won't run
>  	 * concurrently on two different CPUs for the same target and it is not
>  	 * necessary to acquire the lock in the fast switch case.
>  	 */
>  	if (sg_policy->policy->fast_switch_enabled) {
> -		sugov_fast_switch(sg_policy, time, next_f);
> +		sugov_fast_switch(sg_policy, next_f);
>  	} else {
>  		raw_spin_lock(&sg_policy->update_lock);
> -		sugov_deferred_update(sg_policy, time, next_f);
> +		sugov_deferred_update(sg_policy);
>  		raw_spin_unlock(&sg_policy->update_lock);
>  	}
>  }
> @@ -456,12 +453,15 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
>  	if (sugov_should_update_freq(sg_policy, time)) {
>  		next_f = sugov_next_freq_shared(sg_cpu, time);
>  
> +		if (!sugov_update_next_freq(sg_policy, time, next_f))
> +			goto unlock;
> +
>  		if (sg_policy->policy->fast_switch_enabled)
> -			sugov_fast_switch(sg_policy, time, next_f);
> +			sugov_fast_switch(sg_policy, next_f);
>  		else
> -			sugov_deferred_update(sg_policy, time, next_f);
> +			sugov_deferred_update(sg_policy);
>  	}
> -
> +unlock:
>  	raw_spin_unlock(&sg_policy->update_lock);
>  }
Yue Hu Feb. 24, 2021, 6:07 a.m. UTC | #2
On Wed, 24 Feb 2021 11:32:36 +0530
Viresh Kumar <viresh.kumar@linaro.org> wrote:

> On 24-02-21, 13:42, Yue Hu wrote:
> > From: Yue Hu <huyue2@yulong.com>
> > 
> > Note that sugov_update_next_freq() may return false, that means the
> > caller sugov_fast_switch() will do nothing except fast switch check.
> > 
> > Similarly, sugov_deferred_update() also has unnecessary operations
> > of raw_spin_{lock,unlock} in sugov_update_single_freq() for that case.
> > 
> > So, let's call sugov_update_next_freq() before the fast switch check
> > to avoid unnecessary behaviors above. Update the related interface
> > definitions accordingly.
> > 
> > Signed-off-by: Yue Hu <huyue2@yulong.com>
> > ---
> >  kernel/sched/cpufreq_schedutil.c | 28 ++++++++++++++--------------
> >  1 file changed, 14 insertions(+), 14 deletions(-)
> > 
> > diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> > index 41e498b..d23e5be 100644
> > --- a/kernel/sched/cpufreq_schedutil.c
> > +++ b/kernel/sched/cpufreq_schedutil.c
> > @@ -114,19 +114,13 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
> >  	return true;
> >  }
> >  
> > -static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
> > -			      unsigned int next_freq)
> > +static void sugov_fast_switch(struct sugov_policy *sg_policy, unsigned int next_freq)
> >  {
> > -	if (sugov_update_next_freq(sg_policy, time, next_freq))
> > -		cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
> > +	cpufreq_driver_fast_switch(sg_policy->policy, next_freq);  
> 
> I will call this directly instead, no need of the wrapper anymore.

To fix it in next version.

Thank you.

> 
> >  }
> >  
> > -static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
> > -				  unsigned int next_freq)
> > +static void sugov_deferred_update(struct sugov_policy *sg_policy)
> >  {
> > -	if (!sugov_update_next_freq(sg_policy, time, next_freq))
> > -		return;
> > -
> >  	if (!sg_policy->work_in_progress) {
> >  		sg_policy->work_in_progress = true;
> >  		irq_work_queue(&sg_policy->irq_work);
> > @@ -368,16 +362,19 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
> >  		sg_policy->cached_raw_freq = cached_freq;
> >  	}
> >  
> > +	if (!sugov_update_next_freq(sg_policy, time, next_f))
> > +		return;
> > +
> >  	/*
> >  	 * This code runs under rq->lock for the target CPU, so it won't run
> >  	 * concurrently on two different CPUs for the same target and it is not
> >  	 * necessary to acquire the lock in the fast switch case.
> >  	 */
> >  	if (sg_policy->policy->fast_switch_enabled) {
> > -		sugov_fast_switch(sg_policy, time, next_f);
> > +		sugov_fast_switch(sg_policy, next_f);
> >  	} else {
> >  		raw_spin_lock(&sg_policy->update_lock);
> > -		sugov_deferred_update(sg_policy, time, next_f);
> > +		sugov_deferred_update(sg_policy);
> >  		raw_spin_unlock(&sg_policy->update_lock);
> >  	}
> >  }
> > @@ -456,12 +453,15 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
> >  	if (sugov_should_update_freq(sg_policy, time)) {
> >  		next_f = sugov_next_freq_shared(sg_cpu, time);
> >  
> > +		if (!sugov_update_next_freq(sg_policy, time, next_f))
> > +			goto unlock;
> > +
> >  		if (sg_policy->policy->fast_switch_enabled)
> > -			sugov_fast_switch(sg_policy, time, next_f);
> > +			sugov_fast_switch(sg_policy, next_f);
> >  		else
> > -			sugov_deferred_update(sg_policy, time, next_f);
> > +			sugov_deferred_update(sg_policy);
> >  	}
> > -
> > +unlock:
> >  	raw_spin_unlock(&sg_policy->update_lock);
> >  }  
>
Rafael J. Wysocki March 18, 2021, 6:50 p.m. UTC | #3
On Wed, Feb 24, 2021 at 6:44 AM Yue Hu <zbestahu@gmail.com> wrote:
>
> From: Yue Hu <huyue2@yulong.com>
>
> Note that sugov_update_next_freq() may return false, that means the
> caller sugov_fast_switch() will do nothing except fast switch check.
>
> Similarly, sugov_deferred_update() also has unnecessary operations
> of raw_spin_{lock,unlock} in sugov_update_single_freq() for that case.
>
> So, let's call sugov_update_next_freq() before the fast switch check
> to avoid unnecessary behaviors above. Update the related interface
> definitions accordingly.
>
> Signed-off-by: Yue Hu <huyue2@yulong.com>
> ---
>  kernel/sched/cpufreq_schedutil.c | 28 ++++++++++++++--------------
>  1 file changed, 14 insertions(+), 14 deletions(-)
>
> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> index 41e498b..d23e5be 100644
> --- a/kernel/sched/cpufreq_schedutil.c
> +++ b/kernel/sched/cpufreq_schedutil.c
> @@ -114,19 +114,13 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
>         return true;
>  }
>
> -static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
> -                             unsigned int next_freq)
> +static void sugov_fast_switch(struct sugov_policy *sg_policy, unsigned int next_freq)
>  {
> -       if (sugov_update_next_freq(sg_policy, time, next_freq))
> -               cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
> +       cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
>  }
>
> -static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
> -                                 unsigned int next_freq)
> +static void sugov_deferred_update(struct sugov_policy *sg_policy)
>  {
> -       if (!sugov_update_next_freq(sg_policy, time, next_freq))
> -               return;
> -
>         if (!sg_policy->work_in_progress) {
>                 sg_policy->work_in_progress = true;
>                 irq_work_queue(&sg_policy->irq_work);
> @@ -368,16 +362,19 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
>                 sg_policy->cached_raw_freq = cached_freq;
>         }
>
> +       if (!sugov_update_next_freq(sg_policy, time, next_f))
> +               return;
> +
>         /*
>          * This code runs under rq->lock for the target CPU, so it won't run
>          * concurrently on two different CPUs for the same target and it is not
>          * necessary to acquire the lock in the fast switch case.
>          */
>         if (sg_policy->policy->fast_switch_enabled) {
> -               sugov_fast_switch(sg_policy, time, next_f);
> +               sugov_fast_switch(sg_policy, next_f);
>         } else {
>                 raw_spin_lock(&sg_policy->update_lock);
> -               sugov_deferred_update(sg_policy, time, next_f);
> +               sugov_deferred_update(sg_policy);
>                 raw_spin_unlock(&sg_policy->update_lock);
>         }
>  }
> @@ -456,12 +453,15 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
>         if (sugov_should_update_freq(sg_policy, time)) {
>                 next_f = sugov_next_freq_shared(sg_cpu, time);
>
> +               if (!sugov_update_next_freq(sg_policy, time, next_f))
> +                       goto unlock;
> +
>                 if (sg_policy->policy->fast_switch_enabled)
> -                       sugov_fast_switch(sg_policy, time, next_f);
> +                       sugov_fast_switch(sg_policy, next_f);
>                 else
> -                       sugov_deferred_update(sg_policy, time, next_f);
> +                       sugov_deferred_update(sg_policy);
>         }
> -
> +unlock:
>         raw_spin_unlock(&sg_policy->update_lock);
>  }
>
> --

Applied as 5.13 material, thanks!
diff mbox series

Patch

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 41e498b..d23e5be 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -114,19 +114,13 @@  static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
 	return true;
 }
 
-static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
-			      unsigned int next_freq)
+static void sugov_fast_switch(struct sugov_policy *sg_policy, unsigned int next_freq)
 {
-	if (sugov_update_next_freq(sg_policy, time, next_freq))
-		cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
+	cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
 }
 
-static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
-				  unsigned int next_freq)
+static void sugov_deferred_update(struct sugov_policy *sg_policy)
 {
-	if (!sugov_update_next_freq(sg_policy, time, next_freq))
-		return;
-
 	if (!sg_policy->work_in_progress) {
 		sg_policy->work_in_progress = true;
 		irq_work_queue(&sg_policy->irq_work);
@@ -368,16 +362,19 @@  static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
 		sg_policy->cached_raw_freq = cached_freq;
 	}
 
+	if (!sugov_update_next_freq(sg_policy, time, next_f))
+		return;
+
 	/*
 	 * This code runs under rq->lock for the target CPU, so it won't run
 	 * concurrently on two different CPUs for the same target and it is not
 	 * necessary to acquire the lock in the fast switch case.
 	 */
 	if (sg_policy->policy->fast_switch_enabled) {
-		sugov_fast_switch(sg_policy, time, next_f);
+		sugov_fast_switch(sg_policy, next_f);
 	} else {
 		raw_spin_lock(&sg_policy->update_lock);
-		sugov_deferred_update(sg_policy, time, next_f);
+		sugov_deferred_update(sg_policy);
 		raw_spin_unlock(&sg_policy->update_lock);
 	}
 }
@@ -456,12 +453,15 @@  static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 	if (sugov_should_update_freq(sg_policy, time)) {
 		next_f = sugov_next_freq_shared(sg_cpu, time);
 
+		if (!sugov_update_next_freq(sg_policy, time, next_f))
+			goto unlock;
+
 		if (sg_policy->policy->fast_switch_enabled)
-			sugov_fast_switch(sg_policy, time, next_f);
+			sugov_fast_switch(sg_policy, next_f);
 		else
-			sugov_deferred_update(sg_policy, time, next_f);
+			sugov_deferred_update(sg_policy);
 	}
-
+unlock:
 	raw_spin_unlock(&sg_policy->update_lock);
 }