diff mbox

[2/5] cpufreq: schedutil: support scheduler cpufreq callbacks on remote CPUs

Message ID 1462828814-32530-3-git-send-email-smuckle@linaro.org (mailing list archive)
State Changes Requested, archived
Headers show

Commit Message

Steve Muckle May 9, 2016, 9:20 p.m. UTC
In preparation for the scheduler cpufreq callback happening on remote
CPUs, add support for this in schedutil.

Schedutil currently requires the callback occur on the CPU being
updated in order to support fast frequency switches. Remove this
limitation by checking for the current CPU being outside the target
CPU's cpufreq policy and if this is the case, enqueuing an irq_work on
the target CPU. The irq_work for schedutil is modified to carry out a
fast frequency switch if that is enabled for the policy.

If the callback occurs on a CPU within the target CPU's policy, the
transition is carried out on the local CPU.

Signed-off-by: Steve Muckle <smuckle@linaro.org>
---
 kernel/sched/cpufreq_schedutil.c | 86 ++++++++++++++++++++++++++++++----------
 1 file changed, 65 insertions(+), 21 deletions(-)

Comments

Rafael J. Wysocki May 18, 2016, 11:24 p.m. UTC | #1
On Mon, May 9, 2016 at 11:20 PM, Steve Muckle <steve.muckle@linaro.org> wrote:
> In preparation for the scheduler cpufreq callback happening on remote
> CPUs, add support for this in schedutil.
>
> Schedutil currently requires the callback occur on the CPU being
> updated in order to support fast frequency switches. Remove this
> limitation by checking for the current CPU being outside the target
> CPU's cpufreq policy and if this is the case, enqueuing an irq_work on
> the target CPU. The irq_work for schedutil is modified to carry out a
> fast frequency switch if that is enabled for the policy.
>
> If the callback occurs on a CPU within the target CPU's policy, the
> transition is carried out on the local CPU.
>
> Signed-off-by: Steve Muckle <smuckle@linaro.org>
> ---
>  kernel/sched/cpufreq_schedutil.c | 86 ++++++++++++++++++++++++++++++----------
>  1 file changed, 65 insertions(+), 21 deletions(-)
>
> diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> index 154ae3a51e86..c81f9432f520 100644
> --- a/kernel/sched/cpufreq_schedutil.c
> +++ b/kernel/sched/cpufreq_schedutil.c
> @@ -76,27 +76,61 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
>         return delta_ns >= sg_policy->freq_update_delay_ns;
>  }
>
> -static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
> +static void sugov_fast_switch(struct sugov_policy *sg_policy, int cpu,
> +                             unsigned int next_freq)
> +{
> +       struct cpufreq_policy *policy = sg_policy->policy;
> +
> +       next_freq = cpufreq_driver_fast_switch(policy, next_freq);
> +       if (next_freq == CPUFREQ_ENTRY_INVALID)
> +               return;
> +
> +       policy->cur = next_freq;
> +       trace_cpu_frequency(next_freq, cpu);
> +}
> +
> +#ifdef CONFIG_SMP

schedutil depends on CONFIG_SMP now, so that's not necessary at least
for the time being.

> +static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
> +                                        int cpu)
> +{
> +       struct cpufreq_policy *policy = sg_policy->policy;
> +
> +       if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) {

This check is overkill for policies that aren't shared (and we have a
special case for them already).

> +               sg_policy->work_in_progress = true;
> +               irq_work_queue_on(&sg_policy->irq_work, cpu);
> +               return true;
> +       }
> +
> +       return false;
> +}
> +#else
> +static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
> +                                        int cpu)
> +{
> +       return false;
> +}
> +#endif
> +
> +static void sugov_update_commit(struct sugov_cpu *sg_cpu, int cpu, u64 time,

It looks like you might pass hook here instead of the sg_cpu, cpu pair.

>                                 unsigned int next_freq)
>  {
> +       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
>         struct cpufreq_policy *policy = sg_policy->policy;
>
>         sg_policy->last_freq_update_time = time;
>
> +       if (sg_policy->next_freq == next_freq) {
> +               trace_cpu_frequency(policy->cur, cpu);
> +               return;
> +       }

There was a reason why I put the above under the fast_switch_enabled
branch and it was because this check/trace is not necessary otherwise.

> +       sg_policy->next_freq = next_freq;
> +
> +       if (sugov_queue_remote_callback(sg_policy, cpu))
> +               return;
> +
>         if (policy->fast_switch_enabled) {
> -               if (sg_policy->next_freq == next_freq) {
> -                       trace_cpu_frequency(policy->cur, smp_processor_id());
> -                       return;
> -               }
> -               sg_policy->next_freq = next_freq;
> -               next_freq = cpufreq_driver_fast_switch(policy, next_freq);
> -               if (next_freq == CPUFREQ_ENTRY_INVALID)
> -                       return;
> -
> -               policy->cur = next_freq;
> -               trace_cpu_frequency(next_freq, smp_processor_id());
> -       } else if (sg_policy->next_freq != next_freq) {
> -               sg_policy->next_freq = next_freq;
> +               sugov_fast_switch(sg_policy, cpu, next_freq);
> +       } else {
>                 sg_policy->work_in_progress = true;
>                 irq_work_queue(&sg_policy->irq_work);
>         }
> @@ -142,12 +176,13 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
>
>         next_f = util == ULONG_MAX ? policy->cpuinfo.max_freq :
>                         get_next_freq(policy, util, max);
> -       sugov_update_commit(sg_policy, time, next_f);
> +       sugov_update_commit(sg_cpu, hook->cpu, time, next_f);
>  }
>
> -static unsigned int sugov_next_freq_shared(struct sugov_policy *sg_policy,
> +static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
>                                            unsigned long util, unsigned long max)
>  {
> +       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
>         struct cpufreq_policy *policy = sg_policy->policy;
>         unsigned int max_f = policy->cpuinfo.max_freq;
>         u64 last_freq_update_time = sg_policy->last_freq_update_time;
> @@ -161,10 +196,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_policy *sg_policy,
>                 unsigned long j_util, j_max;
>                 s64 delta_ns;
>
> -               if (j == smp_processor_id())
> +               j_sg_cpu = &per_cpu(sugov_cpu, j);
> +               if (j_sg_cpu == sg_cpu)
>                         continue;
>
> -               j_sg_cpu = &per_cpu(sugov_cpu, j);
>                 /*
>                  * If the CPU utilization was last updated before the previous
>                  * frequency update and the time elapsed between the last update
> @@ -204,8 +239,8 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
>         sg_cpu->last_update = time;
>
>         if (sugov_should_update_freq(sg_policy, time)) {
> -               next_f = sugov_next_freq_shared(sg_policy, util, max);
> -               sugov_update_commit(sg_policy, time, next_f);
> +               next_f = sugov_next_freq_shared(sg_cpu, util, max);
> +               sugov_update_commit(sg_cpu, hook->cpu, time, next_f);
>         }
>
>         raw_spin_unlock(&sg_policy->update_lock);
> @@ -226,9 +261,18 @@ static void sugov_work(struct work_struct *work)
>  static void sugov_irq_work(struct irq_work *irq_work)
>  {
>         struct sugov_policy *sg_policy;
> +       struct cpufreq_policy *policy;
>
>         sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
> -       schedule_work_on(smp_processor_id(), &sg_policy->work);
> +       policy = sg_policy->policy;
> +
> +       if (policy->fast_switch_enabled) {
> +               sugov_fast_switch(sg_policy, smp_processor_id(),
> +                                 sg_policy->next_freq);
> +               sg_policy->work_in_progress = false;
> +       } else {
> +               schedule_work_on(smp_processor_id(), &sg_policy->work);
> +       }
>  }
>
>  /************************** sysfs interface ************************/
> --
> 2.4.10
>
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Steve Muckle May 19, 2016, 6:40 p.m. UTC | #2
On Thu, May 19, 2016 at 01:24:41AM +0200, Rafael J. Wysocki wrote:
> On Mon, May 9, 2016 at 11:20 PM, Steve Muckle <steve.muckle@linaro.org> wrote:
> > In preparation for the scheduler cpufreq callback happening on remote
> > CPUs, add support for this in schedutil.
> >
> > Schedutil currently requires the callback occur on the CPU being
> > updated in order to support fast frequency switches. Remove this
> > limitation by checking for the current CPU being outside the target
> > CPU's cpufreq policy and if this is the case, enqueuing an irq_work on
> > the target CPU. The irq_work for schedutil is modified to carry out a
> > fast frequency switch if that is enabled for the policy.
> >
> > If the callback occurs on a CPU within the target CPU's policy, the
> > transition is carried out on the local CPU.
> >
> > Signed-off-by: Steve Muckle <smuckle@linaro.org>
> > ---
> >  kernel/sched/cpufreq_schedutil.c | 86 ++++++++++++++++++++++++++++++----------
> >  1 file changed, 65 insertions(+), 21 deletions(-)
> >
> > diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
> > index 154ae3a51e86..c81f9432f520 100644
> > --- a/kernel/sched/cpufreq_schedutil.c
> > +++ b/kernel/sched/cpufreq_schedutil.c
> > @@ -76,27 +76,61 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
> >         return delta_ns >= sg_policy->freq_update_delay_ns;
> >  }
> >
> > -static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
> > +static void sugov_fast_switch(struct sugov_policy *sg_policy, int cpu,
> > +                             unsigned int next_freq)
> > +{
> > +       struct cpufreq_policy *policy = sg_policy->policy;
> > +
> > +       next_freq = cpufreq_driver_fast_switch(policy, next_freq);
> > +       if (next_freq == CPUFREQ_ENTRY_INVALID)
> > +               return;
> > +
> > +       policy->cur = next_freq;
> > +       trace_cpu_frequency(next_freq, cpu);
> > +}
> > +
> > +#ifdef CONFIG_SMP
> 
> schedutil depends on CONFIG_SMP now, so that's not necessary at least
> for the time being.

Will remove.

> 
> > +static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
> > +                                        int cpu)
> > +{
> > +       struct cpufreq_policy *policy = sg_policy->policy;
> > +
> > +       if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) {
> 
> This check is overkill for policies that aren't shared (and we have a
> special case for them already).

I don't see why it is overkill - regardless of whether the policy is
shared, we need to determine whether or not we are running on one of the
CPUs (or in the case of a non-shared policy, the single CPU) within that
policy to know whether we can immediately change the frequency in this
context or a remote call is required.

> > +               sg_policy->work_in_progress = true;
> > +               irq_work_queue_on(&sg_policy->irq_work, cpu);
> > +               return true;
> > +       }
> > +
> > +       return false;
> > +}
> > +#else
> > +static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
> > +                                        int cpu)
> > +{
> > +       return false;
> > +}
> > +#endif
> > +
> > +static void sugov_update_commit(struct sugov_cpu *sg_cpu, int cpu, u64 time,
> 
> It looks like you might pass hook here instead of the sg_cpu, cpu pair.

I can do that but it means having to do the comtainer_of operation
again. Strictly speaking this seems slightly less efficient than passing
the above values which are already available in the callers.

> 
> >                                 unsigned int next_freq)
> >  {
> > +       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
> >         struct cpufreq_policy *policy = sg_policy->policy;
> >
> >         sg_policy->last_freq_update_time = time;
> >
> > +       if (sg_policy->next_freq == next_freq) {
> > +               trace_cpu_frequency(policy->cur, cpu);
> > +               return;
> > +       }
> 
> There was a reason why I put the above under the fast_switch_enabled
> branch and it was because this check/trace is not necessary otherwise.

I remember asking about this tracepoint earlier. You had said it was
required because powertop would not work without it (reporting the CPU
as idle in certain situations).

I'm not sure why that is only true for the fast switch enabled case but
it seems like an odd inconsistency for the governor to trace unchanged
frequencies when fast switches are enabled but not otherwise. It'd be
useful I think for profiling and tuning if the tracing was consistent.

This behavioral change is admittedly not part of the purpose of the
patch and could be split out if needbe.

thanks,
Steve
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Rafael J. Wysocki May 19, 2016, 8:55 p.m. UTC | #3
On Thu, May 19, 2016 at 8:40 PM, Steve Muckle <steve.muckle@linaro.org> wrote:
> On Thu, May 19, 2016 at 01:24:41AM +0200, Rafael J. Wysocki wrote:
>> On Mon, May 9, 2016 at 11:20 PM, Steve Muckle <steve.muckle@linaro.org> wrote:
>> > In preparation for the scheduler cpufreq callback happening on remote
>> > CPUs, add support for this in schedutil.
>> >
>> > Schedutil currently requires the callback occur on the CPU being
>> > updated in order to support fast frequency switches. Remove this
>> > limitation by checking for the current CPU being outside the target
>> > CPU's cpufreq policy and if this is the case, enqueuing an irq_work on
>> > the target CPU. The irq_work for schedutil is modified to carry out a
>> > fast frequency switch if that is enabled for the policy.
>> >
>> > If the callback occurs on a CPU within the target CPU's policy, the
>> > transition is carried out on the local CPU.
>> >
>> > Signed-off-by: Steve Muckle <smuckle@linaro.org>
>> > ---
>> >  kernel/sched/cpufreq_schedutil.c | 86 ++++++++++++++++++++++++++++++----------
>> >  1 file changed, 65 insertions(+), 21 deletions(-)
>> >
>> > diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
>> > index 154ae3a51e86..c81f9432f520 100644
>> > --- a/kernel/sched/cpufreq_schedutil.c
>> > +++ b/kernel/sched/cpufreq_schedutil.c
>> > @@ -76,27 +76,61 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
>> >         return delta_ns >= sg_policy->freq_update_delay_ns;
>> >  }
>> >
>> > -static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
>> > +static void sugov_fast_switch(struct sugov_policy *sg_policy, int cpu,
>> > +                             unsigned int next_freq)
>> > +{
>> > +       struct cpufreq_policy *policy = sg_policy->policy;
>> > +
>> > +       next_freq = cpufreq_driver_fast_switch(policy, next_freq);
>> > +       if (next_freq == CPUFREQ_ENTRY_INVALID)
>> > +               return;
>> > +
>> > +       policy->cur = next_freq;
>> > +       trace_cpu_frequency(next_freq, cpu);
>> > +}
>> > +
>> > +#ifdef CONFIG_SMP
>>
>> schedutil depends on CONFIG_SMP now, so that's not necessary at least
>> for the time being.
>
> Will remove.
>
>>
>> > +static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
>> > +                                        int cpu)
>> > +{
>> > +       struct cpufreq_policy *policy = sg_policy->policy;
>> > +
>> > +       if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) {
>>
>> This check is overkill for policies that aren't shared (and we have a
>> special case for them already).
>
> I don't see why it is overkill -

Because it requires more computation, memory accesses etc than simply
comparing smp_processor_id() with cpu.

> regardless of whether the policy is
> shared, we need to determine whether or not we are running on one of the
> CPUs (or in the case of a non-shared policy, the single CPU) within that
> policy to know whether we can immediately change the frequency in this
> context or a remote call is required.
>
>> > +               sg_policy->work_in_progress = true;
>> > +               irq_work_queue_on(&sg_policy->irq_work, cpu);
>> > +               return true;
>> > +       }
>> > +
>> > +       return false;
>> > +}
>> > +#else
>> > +static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
>> > +                                        int cpu)
>> > +{
>> > +       return false;
>> > +}
>> > +#endif
>> > +
>> > +static void sugov_update_commit(struct sugov_cpu *sg_cpu, int cpu, u64 time,
>>
>> It looks like you might pass hook here instead of the sg_cpu, cpu pair.
>
> I can do that but it means having to do the comtainer_of operation
> again. Strictly speaking this seems slightly less efficient than passing
> the above values which are already available in the callers.

Well, it seems a bit odd to pass two things referring to the same CPU,
but then I don't care that much.

>> >                                 unsigned int next_freq)
>> >  {
>> > +       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
>> >         struct cpufreq_policy *policy = sg_policy->policy;
>> >
>> >         sg_policy->last_freq_update_time = time;
>> >
>> > +       if (sg_policy->next_freq == next_freq) {
>> > +               trace_cpu_frequency(policy->cur, cpu);
>> > +               return;
>> > +       }
>>
>> There was a reason why I put the above under the fast_switch_enabled
>> branch and it was because this check/trace is not necessary otherwise.
>
> I remember asking about this tracepoint earlier. You had said it was
> required because powertop would not work without it (reporting the CPU
> as idle in certain situations).
>
> I'm not sure why that is only true for the fast switch enabled case

Because in the other case cpufreq stats are used by powertop and then
this problem is not visible.

> but it seems like an odd inconsistency for the governor to trace unchanged
> frequencies when fast switches are enabled but not otherwise. It'd be
> useful I think for profiling and tuning if the tracing was consistent.

Well, fair enough.

> This behavioral change is admittedly not part of the purpose of the
> patch and could be split out if needbe.

No need to split IMO, but it might be prudent to mention that change
in behavior in the changelog.
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Steve Muckle May 19, 2016, 10:59 p.m. UTC | #4
On Thu, May 19, 2016 at 10:55:23PM +0200, Rafael J. Wysocki wrote:
> >> > +static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
> >> > +                                        int cpu)
> >> > +{
> >> > +       struct cpufreq_policy *policy = sg_policy->policy;
> >> > +
> >> > +       if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) {
> >>
> >> This check is overkill for policies that aren't shared (and we have a
> >> special case for them already).
> >
> > I don't see why it is overkill -
> 
> Because it requires more computation, memory accesses etc than simply
> comparing smp_processor_id() with cpu.

Do you have a preference on how to restructure this? Otherwise I'll
create a second version of sugov_update_commit, factoring out as much of
it as I can into two inline sub-functions. 

...
> 
> > but it seems like an odd inconsistency for the governor to trace unchanged
> > frequencies when fast switches are enabled but not otherwise. It'd be
> > useful I think for profiling and tuning if the tracing was consistent.
> 
> Well, fair enough.
> 
> > This behavioral change is admittedly not part of the purpose of the
> > patch and could be split out if needbe.
> 
> No need to split IMO, but it might be prudent to mention that change
> in behavior in the changelog.

Will do.

thanks,
Steve
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Rafael J. Wysocki May 19, 2016, 11:14 p.m. UTC | #5
On Fri, May 20, 2016 at 12:59 AM, Steve Muckle <steve.muckle@linaro.org> wrote:
> On Thu, May 19, 2016 at 10:55:23PM +0200, Rafael J. Wysocki wrote:
>> >> > +static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
>> >> > +                                        int cpu)
>> >> > +{
>> >> > +       struct cpufreq_policy *policy = sg_policy->policy;
>> >> > +
>> >> > +       if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) {
>> >>
>> >> This check is overkill for policies that aren't shared (and we have a
>> >> special case for them already).
>> >
>> > I don't see why it is overkill -
>>
>> Because it requires more computation, memory accesses etc than simply
>> comparing smp_processor_id() with cpu.
>
> Do you have a preference on how to restructure this?

Not really.

> Otherwise I'll create a second version of sugov_update_commit, factoring out as much of
> it as I can into two inline sub-functions.

I guess in that case it might be better to fold the
sugov_update_commit() code into its callers and then factor out common
stuff into sub-functions called from there.
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 154ae3a51e86..c81f9432f520 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -76,27 +76,61 @@  static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
 	return delta_ns >= sg_policy->freq_update_delay_ns;
 }
 
-static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
+static void sugov_fast_switch(struct sugov_policy *sg_policy, int cpu,
+			      unsigned int next_freq)
+{
+	struct cpufreq_policy *policy = sg_policy->policy;
+
+	next_freq = cpufreq_driver_fast_switch(policy, next_freq);
+	if (next_freq == CPUFREQ_ENTRY_INVALID)
+		return;
+
+	policy->cur = next_freq;
+	trace_cpu_frequency(next_freq, cpu);
+}
+
+#ifdef CONFIG_SMP
+static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
+					 int cpu)
+{
+	struct cpufreq_policy *policy = sg_policy->policy;
+
+	if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) {
+		sg_policy->work_in_progress = true;
+		irq_work_queue_on(&sg_policy->irq_work, cpu);
+		return true;
+	}
+
+	return false;
+}
+#else
+static inline bool sugov_queue_remote_callback(struct sugov_policy *sg_policy,
+					 int cpu)
+{
+	return false;
+}
+#endif
+
+static void sugov_update_commit(struct sugov_cpu *sg_cpu, int cpu, u64 time,
 				unsigned int next_freq)
 {
+	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
 	struct cpufreq_policy *policy = sg_policy->policy;
 
 	sg_policy->last_freq_update_time = time;
 
+	if (sg_policy->next_freq == next_freq) {
+		trace_cpu_frequency(policy->cur, cpu);
+		return;
+	}
+	sg_policy->next_freq = next_freq;
+
+	if (sugov_queue_remote_callback(sg_policy, cpu))
+		return;
+
 	if (policy->fast_switch_enabled) {
-		if (sg_policy->next_freq == next_freq) {
-			trace_cpu_frequency(policy->cur, smp_processor_id());
-			return;
-		}
-		sg_policy->next_freq = next_freq;
-		next_freq = cpufreq_driver_fast_switch(policy, next_freq);
-		if (next_freq == CPUFREQ_ENTRY_INVALID)
-			return;
-
-		policy->cur = next_freq;
-		trace_cpu_frequency(next_freq, smp_processor_id());
-	} else if (sg_policy->next_freq != next_freq) {
-		sg_policy->next_freq = next_freq;
+		sugov_fast_switch(sg_policy, cpu, next_freq);
+	} else {
 		sg_policy->work_in_progress = true;
 		irq_work_queue(&sg_policy->irq_work);
 	}
@@ -142,12 +176,13 @@  static void sugov_update_single(struct update_util_data *hook, u64 time,
 
 	next_f = util == ULONG_MAX ? policy->cpuinfo.max_freq :
 			get_next_freq(policy, util, max);
-	sugov_update_commit(sg_policy, time, next_f);
+	sugov_update_commit(sg_cpu, hook->cpu, time, next_f);
 }
 
-static unsigned int sugov_next_freq_shared(struct sugov_policy *sg_policy,
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
 					   unsigned long util, unsigned long max)
 {
+	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
 	struct cpufreq_policy *policy = sg_policy->policy;
 	unsigned int max_f = policy->cpuinfo.max_freq;
 	u64 last_freq_update_time = sg_policy->last_freq_update_time;
@@ -161,10 +196,10 @@  static unsigned int sugov_next_freq_shared(struct sugov_policy *sg_policy,
 		unsigned long j_util, j_max;
 		s64 delta_ns;
 
-		if (j == smp_processor_id())
+		j_sg_cpu = &per_cpu(sugov_cpu, j);
+		if (j_sg_cpu == sg_cpu)
 			continue;
 
-		j_sg_cpu = &per_cpu(sugov_cpu, j);
 		/*
 		 * If the CPU utilization was last updated before the previous
 		 * frequency update and the time elapsed between the last update
@@ -204,8 +239,8 @@  static void sugov_update_shared(struct update_util_data *hook, u64 time,
 	sg_cpu->last_update = time;
 
 	if (sugov_should_update_freq(sg_policy, time)) {
-		next_f = sugov_next_freq_shared(sg_policy, util, max);
-		sugov_update_commit(sg_policy, time, next_f);
+		next_f = sugov_next_freq_shared(sg_cpu, util, max);
+		sugov_update_commit(sg_cpu, hook->cpu, time, next_f);
 	}
 
 	raw_spin_unlock(&sg_policy->update_lock);
@@ -226,9 +261,18 @@  static void sugov_work(struct work_struct *work)
 static void sugov_irq_work(struct irq_work *irq_work)
 {
 	struct sugov_policy *sg_policy;
+	struct cpufreq_policy *policy;
 
 	sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
-	schedule_work_on(smp_processor_id(), &sg_policy->work);
+	policy = sg_policy->policy;
+
+	if (policy->fast_switch_enabled) {
+		sugov_fast_switch(sg_policy, smp_processor_id(),
+				  sg_policy->next_freq);
+		sg_policy->work_in_progress = false;
+	} else {
+		schedule_work_on(smp_processor_id(), &sg_policy->work);
+	}
 }
 
 /************************** sysfs interface ************************/