diff mbox series

cpuidle: riscv-sbi: Add cluster_pm_enter()/exit()

Message ID 20240226065113.1690534-1-nick.hu@sifive.com (mailing list archive)
State Accepted
Commit 3fd665f2854dc0c54dcbfee4557dae67d95b3d76
Headers show
Series cpuidle: riscv-sbi: Add cluster_pm_enter()/exit() | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR success PR summary
conchuod/patch-1-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh
conchuod/patch-1-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh
conchuod/patch-1-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh
conchuod/patch-1-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-1-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-1-test-6 success .github/scripts/patches/tests/checkpatch.sh
conchuod/patch-1-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh
conchuod/patch-1-test-8 success .github/scripts/patches/tests/header_inline.sh
conchuod/patch-1-test-9 success .github/scripts/patches/tests/kdoc.sh
conchuod/patch-1-test-10 success .github/scripts/patches/tests/module_param.sh
conchuod/patch-1-test-11 success .github/scripts/patches/tests/verify_fixes.sh
conchuod/patch-1-test-12 success .github/scripts/patches/tests/verify_signedoff.sh

Commit Message

Nick Hu Feb. 26, 2024, 6:51 a.m. UTC
When the cpus in the same cluster are all in the idle state, the kernel
might put the cluster into a deeper low power state. Call the
cluster_pm_enter() before entering the low power state and call the
cluster_pm_exit() after the cluster woken up.

Signed-off-by: Nick Hu <nick.hu@sifive.com>
---
 drivers/cpuidle/cpuidle-riscv-sbi.c | 24 ++++++++++++++++++++++--
 1 file changed, 22 insertions(+), 2 deletions(-)

Comments

patchwork-bot+linux-riscv@kernel.org April 28, 2024, 10 p.m. UTC | #1
Hello:

This patch was applied to riscv/linux.git (for-next)
by Palmer Dabbelt <palmer@rivosinc.com>:

On Mon, 26 Feb 2024 14:51:13 +0800 you wrote:
> When the cpus in the same cluster are all in the idle state, the kernel
> might put the cluster into a deeper low power state. Call the
> cluster_pm_enter() before entering the low power state and call the
> cluster_pm_exit() after the cluster woken up.
> 
> Signed-off-by: Nick Hu <nick.hu@sifive.com>
> 
> [...]

Here is the summary with links:
  - cpuidle: riscv-sbi: Add cluster_pm_enter()/exit()
    https://git.kernel.org/riscv/c/3fd665f2854d

You are awesome, thank you!
Ulf Hansson April 29, 2024, 2:32 p.m. UTC | #2
On Mon, 26 Feb 2024 at 07:51, Nick Hu <nick.hu@sifive.com> wrote:
>
> When the cpus in the same cluster are all in the idle state, the kernel
> might put the cluster into a deeper low power state. Call the
> cluster_pm_enter() before entering the low power state and call the
> cluster_pm_exit() after the cluster woken up.
>
> Signed-off-by: Nick Hu <nick.hu@sifive.com>

I was not cced this patch, but noticed that this patch got queued up
recently. Sorry for not noticing earlier.

If not too late, can you please drop/revert it? We should really move
away from the CPU cluster notifiers. See more information below.

> ---
>  drivers/cpuidle/cpuidle-riscv-sbi.c | 24 ++++++++++++++++++++++--
>  1 file changed, 22 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
> index e8094fc92491..298dc76a00cf 100644
> --- a/drivers/cpuidle/cpuidle-riscv-sbi.c
> +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
> @@ -394,6 +394,7 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
>  {
>         struct genpd_power_state *state = &pd->states[pd->state_idx];
>         u32 *pd_state;
> +       int ret;
>
>         if (!state->data)
>                 return 0;
> @@ -401,6 +402,10 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
>         if (!sbi_cpuidle_pd_allow_domain_state)
>                 return -EBUSY;
>
> +       ret = cpu_cluster_pm_enter();
> +       if (ret)
> +               return ret;

Rather than using the CPU cluster notifiers, consumers of the genpd
can register themselves to receive genpd on/off notifiers.

In other words, none of this should be needed, right?

[...]

Kind regards
Uffe
Nick Hu April 29, 2024, 4:22 p.m. UTC | #3
Hi Ulf

On Mon, Apr 29, 2024 at 10:32 PM Ulf Hansson <ulf.hansson@linaro.org> wrote:
>
> On Mon, 26 Feb 2024 at 07:51, Nick Hu <nick.hu@sifive.com> wrote:
> >
> > When the cpus in the same cluster are all in the idle state, the kernel
> > might put the cluster into a deeper low power state. Call the
> > cluster_pm_enter() before entering the low power state and call the
> > cluster_pm_exit() after the cluster woken up.
> >
> > Signed-off-by: Nick Hu <nick.hu@sifive.com>
>
> I was not cced this patch, but noticed that this patch got queued up
> recently. Sorry for not noticing earlier.
>
> If not too late, can you please drop/revert it? We should really move
> away from the CPU cluster notifiers. See more information below.
>
> > ---
> >  drivers/cpuidle/cpuidle-riscv-sbi.c | 24 ++++++++++++++++++++++--
> >  1 file changed, 22 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
> > index e8094fc92491..298dc76a00cf 100644
> > --- a/drivers/cpuidle/cpuidle-riscv-sbi.c
> > +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
> > @@ -394,6 +394,7 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
> >  {
> >         struct genpd_power_state *state = &pd->states[pd->state_idx];
> >         u32 *pd_state;
> > +       int ret;
> >
> >         if (!state->data)
> >                 return 0;
> > @@ -401,6 +402,10 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
> >         if (!sbi_cpuidle_pd_allow_domain_state)
> >                 return -EBUSY;
> >
> > +       ret = cpu_cluster_pm_enter();
> > +       if (ret)
> > +               return ret;
>
> Rather than using the CPU cluster notifiers, consumers of the genpd
> can register themselves to receive genpd on/off notifiers.
>
> In other words, none of this should be needed, right?
>
Thanks for the feedback!
Maybe I miss something, I'm wondering about a case like below:
If we have a shared L2 cache controller inside the cpu cluster power
domain and we add this controller to be a consumer of the power
domain, Shouldn't the genpd invoke the domain idle only after the
shared L2 cache controller is suspended?
Is there a way that we can put the L2 cache down while all cpus in the
same cluster are idle?
> [...]
>
> Kind regards
> Uffe
Nick Hu April 29, 2024, 4:26 p.m. UTC | #4
On Tue, Apr 30, 2024 at 12:22 AM Nick Hu <nick.hu@sifive.com> wrote:
>
> Hi Ulf
>
> On Mon, Apr 29, 2024 at 10:32 PM Ulf Hansson <ulf.hansson@linaro.org> wrote:
> >
> > On Mon, 26 Feb 2024 at 07:51, Nick Hu <nick.hu@sifive.com> wrote:
> > >
> > > When the cpus in the same cluster are all in the idle state, the kernel
> > > might put the cluster into a deeper low power state. Call the
> > > cluster_pm_enter() before entering the low power state and call the
> > > cluster_pm_exit() after the cluster woken up.
> > >
> > > Signed-off-by: Nick Hu <nick.hu@sifive.com>
> >
> > I was not cced this patch, but noticed that this patch got queued up
> > recently. Sorry for not noticing earlier.
> >
> > If not too late, can you please drop/revert it? We should really move
> > away from the CPU cluster notifiers. See more information below.
> >
> > > ---
> > >  drivers/cpuidle/cpuidle-riscv-sbi.c | 24 ++++++++++++++++++++++--
> > >  1 file changed, 22 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
> > > index e8094fc92491..298dc76a00cf 100644
> > > --- a/drivers/cpuidle/cpuidle-riscv-sbi.c
> > > +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
> > > @@ -394,6 +394,7 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
> > >  {
> > >         struct genpd_power_state *state = &pd->states[pd->state_idx];
> > >         u32 *pd_state;
> > > +       int ret;
> > >
> > >         if (!state->data)
> > >                 return 0;
> > > @@ -401,6 +402,10 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
> > >         if (!sbi_cpuidle_pd_allow_domain_state)
> > >                 return -EBUSY;
> > >
> > > +       ret = cpu_cluster_pm_enter();
> > > +       if (ret)
> > > +               return ret;
> >
> > Rather than using the CPU cluster notifiers, consumers of the genpd
> > can register themselves to receive genpd on/off notifiers.
> >
> > In other words, none of this should be needed, right?
> >
> Thanks for the feedback!
> Maybe I miss something, I'm wondering about a case like below:
> If we have a shared L2 cache controller inside the cpu cluster power
> domain and we add this controller to be a consumer of the power
> domain, Shouldn't the genpd invoke the domain idle only after the
> shared L2 cache controller is suspended?
> Is there a way that we can put the L2 cache down while all cpus in the
> same cluster are idle?
> > [...]
Sorry, I made some mistake in my second question.
Update the question here:
Is there a way that we can save the L2 cache states while all cpus in the
same cluster are idle and the cluster could be powered down?
> >
> > Kind regards
> > Uffe
Palmer Dabbelt April 29, 2024, 8:59 p.m. UTC | #5
On Mon, 29 Apr 2024 07:32:12 PDT (-0700), ulf.hansson@linaro.org wrote:
> On Mon, 26 Feb 2024 at 07:51, Nick Hu <nick.hu@sifive.com> wrote:
>>
>> When the cpus in the same cluster are all in the idle state, the kernel
>> might put the cluster into a deeper low power state. Call the
>> cluster_pm_enter() before entering the low power state and call the
>> cluster_pm_exit() after the cluster woken up.
>>
>> Signed-off-by: Nick Hu <nick.hu@sifive.com>
>
> I was not cced this patch, but noticed that this patch got queued up
> recently. Sorry for not noticing earlier.
>
> If not too late, can you please drop/revert it? We should really move
> away from the CPU cluster notifiers. See more information below.

Sorry about that, I'll toss it.  I'm testing some other stuff right now 
so it might miss today's linux-next.

>> ---
>>  drivers/cpuidle/cpuidle-riscv-sbi.c | 24 ++++++++++++++++++++++--
>>  1 file changed, 22 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
>> index e8094fc92491..298dc76a00cf 100644
>> --- a/drivers/cpuidle/cpuidle-riscv-sbi.c
>> +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
>> @@ -394,6 +394,7 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
>>  {
>>         struct genpd_power_state *state = &pd->states[pd->state_idx];
>>         u32 *pd_state;
>> +       int ret;
>>
>>         if (!state->data)
>>                 return 0;
>> @@ -401,6 +402,10 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
>>         if (!sbi_cpuidle_pd_allow_domain_state)
>>                 return -EBUSY;
>>
>> +       ret = cpu_cluster_pm_enter();
>> +       if (ret)
>> +               return ret;
>
> Rather than using the CPU cluster notifiers, consumers of the genpd
> can register themselves to receive genpd on/off notifiers.
>
> In other words, none of this should be needed, right?
>
> [...]
>
> Kind regards
> Uffe
Ulf Hansson April 30, 2024, 8:12 a.m. UTC | #6
On Mon, 29 Apr 2024 at 18:26, Nick Hu <nick.hu@sifive.com> wrote:
>
> On Tue, Apr 30, 2024 at 12:22 AM Nick Hu <nick.hu@sifive.com> wrote:
> >
> > Hi Ulf
> >
> > On Mon, Apr 29, 2024 at 10:32 PM Ulf Hansson <ulf.hansson@linaro.org> wrote:
> > >
> > > On Mon, 26 Feb 2024 at 07:51, Nick Hu <nick.hu@sifive.com> wrote:
> > > >
> > > > When the cpus in the same cluster are all in the idle state, the kernel
> > > > might put the cluster into a deeper low power state. Call the
> > > > cluster_pm_enter() before entering the low power state and call the
> > > > cluster_pm_exit() after the cluster woken up.
> > > >
> > > > Signed-off-by: Nick Hu <nick.hu@sifive.com>
> > >
> > > I was not cced this patch, but noticed that this patch got queued up
> > > recently. Sorry for not noticing earlier.
> > >
> > > If not too late, can you please drop/revert it? We should really move
> > > away from the CPU cluster notifiers. See more information below.
> > >
> > > > ---
> > > >  drivers/cpuidle/cpuidle-riscv-sbi.c | 24 ++++++++++++++++++++++--
> > > >  1 file changed, 22 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
> > > > index e8094fc92491..298dc76a00cf 100644
> > > > --- a/drivers/cpuidle/cpuidle-riscv-sbi.c
> > > > +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
> > > > @@ -394,6 +394,7 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
> > > >  {
> > > >         struct genpd_power_state *state = &pd->states[pd->state_idx];
> > > >         u32 *pd_state;
> > > > +       int ret;
> > > >
> > > >         if (!state->data)
> > > >                 return 0;
> > > > @@ -401,6 +402,10 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
> > > >         if (!sbi_cpuidle_pd_allow_domain_state)
> > > >                 return -EBUSY;
> > > >
> > > > +       ret = cpu_cluster_pm_enter();
> > > > +       if (ret)
> > > > +               return ret;
> > >
> > > Rather than using the CPU cluster notifiers, consumers of the genpd
> > > can register themselves to receive genpd on/off notifiers.
> > >
> > > In other words, none of this should be needed, right?
> > >
> > Thanks for the feedback!
> > Maybe I miss something, I'm wondering about a case like below:
> > If we have a shared L2 cache controller inside the cpu cluster power
> > domain and we add this controller to be a consumer of the power
> > domain, Shouldn't the genpd invoke the domain idle only after the
> > shared L2 cache controller is suspended?
> > Is there a way that we can put the L2 cache down while all cpus in the
> > same cluster are idle?
> > > [...]
> Sorry, I made some mistake in my second question.
> Update the question here:
> Is there a way that we can save the L2 cache states while all cpus in the
> same cluster are idle and the cluster could be powered down?

If the L2 cache is a consumer of the cluster, the consumer driver for
the L2 cache should register for genpd on/off notifiers.

The device representing the L2 cache needs to be enabled for runtime
PM, to be taken into account correctly by the cluster genpd. In this
case, the device should most likely remain runtime suspended, but
instead rely on the genpd on/off notifiers to understand when
save/restore of the cache states should be done.

Kind regards
Uffe
diff mbox series

Patch

diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index e8094fc92491..298dc76a00cf 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -394,6 +394,7 @@  static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
 {
 	struct genpd_power_state *state = &pd->states[pd->state_idx];
 	u32 *pd_state;
+	int ret;
 
 	if (!state->data)
 		return 0;
@@ -401,6 +402,10 @@  static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
 	if (!sbi_cpuidle_pd_allow_domain_state)
 		return -EBUSY;
 
+	ret = cpu_cluster_pm_enter();
+	if (ret)
+		return ret;
+
 	/* OSI mode is enabled, set the corresponding domain state. */
 	pd_state = state->data;
 	sbi_set_domain_state(*pd_state);
@@ -408,6 +413,19 @@  static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
 	return 0;
 }
 
+static int sbi_cpuidle_pd_power_on(struct generic_pm_domain *pd)
+{
+	struct genpd_power_state *state = &pd->states[pd->state_idx];
+
+	if (!state->data)
+		return 0;
+
+	if (!sbi_cpuidle_pd_allow_domain_state)
+		return -EBUSY;
+
+	return cpu_cluster_pm_exit();
+}
+
 struct sbi_pd_provider {
 	struct list_head link;
 	struct device_node *node;
@@ -433,10 +451,12 @@  static int sbi_pd_init(struct device_node *np)
 	pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
 
 	/* Allow power off when OSI is available. */
-	if (sbi_cpuidle_use_osi)
+	if (sbi_cpuidle_use_osi) {
 		pd->power_off = sbi_cpuidle_pd_power_off;
-	else
+		pd->power_on = sbi_cpuidle_pd_power_on;
+	} else {
 		pd->flags |= GENPD_FLAG_ALWAYS_ON;
+	}
 
 	/* Use governor for CPU PM domains if it has some states to manage. */
 	pd_gov = pd->states ? &pm_domain_cpu_gov : NULL;