diff mbox series

[v2,5/7] drm/panfrost: Add support for multiple power domain support

Message ID 20200108052337.65916-6-drinkcat@chromium.org (mailing list archive)
State New, archived
Headers show
Series Add dts for mt8183 GPU (and misc panfrost patches) | expand

Commit Message

Nicolas Boichat Jan. 8, 2020, 5:23 a.m. UTC
When there is a single power domain per device, the core will
ensure the power domains are all switched on.

However, when there are multiple ones, as in MT8183 Bifrost GPU,
we need to handle them in driver code.


Signed-off-by: Nicolas Boichat <drinkcat@chromium.org>
---

The downstream driver we use on chromeos-4.19 currently uses 2
additional devices in device tree to accomodate for this [1], but
I believe this solution is cleaner.

[1] https://chromium.googlesource.com/chromiumos/third_party/kernel/+/refs/heads/chromeos-4.19/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c#31

drivers/gpu/drm/panfrost/panfrost_device.c | 87 ++++++++++++++++++++--
 drivers/gpu/drm/panfrost/panfrost_device.h |  4 +
 2 files changed, 83 insertions(+), 8 deletions(-)

Comments

Steven Price Jan. 9, 2020, 2:08 p.m. UTC | #1
On 08/01/2020 05:23, Nicolas Boichat wrote:
> When there is a single power domain per device, the core will
> ensure the power domains are all switched on.
> 
> However, when there are multiple ones, as in MT8183 Bifrost GPU,
> we need to handle them in driver code.
> 
> 
> Signed-off-by: Nicolas Boichat <drinkcat@chromium.org>
> ---
> 
> The downstream driver we use on chromeos-4.19 currently uses 2
> additional devices in device tree to accomodate for this [1], but
> I believe this solution is cleaner.

I'm not sure what is best, but it seems odd to encode this into the Panfrost driver itself - it doesn't have any knowledge of what to do with these power domains. The naming of the domains looks suspiciously like someone thought that e.g. only half of the cores could be powered, but it doesn't look like that was implemented in the chromeos driver linked and anyway that is *meant* to be automatic in the hardware! (I.e. if you only power up one cores in one core stack then the PDC should only enable the power domain for that set of cores).

Steve

> 
> [1] https://chromium.googlesource.com/chromiumos/third_party/kernel/+/refs/heads/chromeos-4.19/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c#31
> 
> drivers/gpu/drm/panfrost/panfrost_device.c | 87 ++++++++++++++++++++--
>   drivers/gpu/drm/panfrost/panfrost_device.h |  4 +
>   2 files changed, 83 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
> index a0b0a6fef8b4e63..c6e9e059de94a4d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_device.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_device.c
> @@ -5,6 +5,7 @@
>   #include <linux/clk.h>
>   #include <linux/reset.h>
>   #include <linux/platform_device.h>
> +#include <linux/pm_domain.h>
>   #include <linux/regulator/consumer.h>
>   
>   #include "panfrost_device.h"
> @@ -131,6 +132,67 @@ static void panfrost_regulator_fini(struct panfrost_device *pfdev)
>   	regulator_disable(pfdev->regulator_sram);
>   }
>   
> +static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
> +{
> +	int i;
> +
> +	for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
> +		if (!pfdev->pm_domain_devs[i])
> +			break;
> +
> +		if (pfdev->pm_domain_links[i])
> +			device_link_del(pfdev->pm_domain_links[i]);
> +
> +		dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
> +	}
> +}
> +
> +static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
> +{
> +	int err;
> +	int i, num_domains;
> +
> +	num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
> +						 "power-domains",
> +						 "#power-domain-cells");
> +	/* Single domains are handled by the core. */
> +	if (num_domains < 2)
> +		return 0;
> +
> +	if (num_domains > ARRAY_SIZE(pfdev->pm_domain_devs)) {
> +		dev_err(pfdev->dev, "Too many pm-domains: %d\n", num_domains);
> +		return -EINVAL;
> +	}
> +
> +	for (i = 0; i < num_domains; i++) {
> +		pfdev->pm_domain_devs[i] =
> +			dev_pm_domain_attach_by_id(pfdev->dev, i);
> +		if (IS_ERR(pfdev->pm_domain_devs[i])) {
> +			err = PTR_ERR(pfdev->pm_domain_devs[i]);
> +			pfdev->pm_domain_devs[i] = NULL;
> +			dev_err(pfdev->dev,
> +				"failed to get pm-domain %d: %d\n", i, err);
> +			goto err;
> +		}
> +
> +		pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
> +				pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
> +				DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
> +		if (!pfdev->pm_domain_links[i]) {
> +			dev_err(pfdev->pm_domain_devs[i],
> +				"adding device link failed!\n");
> +			err = -ENODEV;
> +			goto err;
> +		}
> +	}
> +
> +	return 0;
> +
> +err:
> +	panfrost_pm_domain_fini(pfdev);
> +	return err;
> +}
> +
>   int panfrost_device_init(struct panfrost_device *pfdev)
>   {
>   	int err;
> @@ -161,37 +223,45 @@ int panfrost_device_init(struct panfrost_device *pfdev)
>   		goto err_out1;
>   	}
>   
> +	err = panfrost_pm_domain_init(pfdev);
> +	if (err) {
> +		dev_err(pfdev->dev, "pm_domain init failed %d\n", err);
> +		goto err_out2;
> +	}
> +
>   	res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
>   	pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
>   	if (IS_ERR(pfdev->iomem)) {
>   		dev_err(pfdev->dev, "failed to ioremap iomem\n");
>   		err = PTR_ERR(pfdev->iomem);
> -		goto err_out2;
> +		goto err_out3;
>   	}
>   
>   	err = panfrost_gpu_init(pfdev);
>   	if (err)
> -		goto err_out2;
> +		goto err_out3;
>   
>   	err = panfrost_mmu_init(pfdev);
>   	if (err)
> -		goto err_out3;
> +		goto err_out4;
>   
>   	err = panfrost_job_init(pfdev);
>   	if (err)
> -		goto err_out4;
> +		goto err_out5;
>   
>   	err = panfrost_perfcnt_init(pfdev);
>   	if (err)
> -		goto err_out5;
> +		goto err_out6;
>   
>   	return 0;
> -err_out5:
> +err_out6:
>   	panfrost_job_fini(pfdev);
> -err_out4:
> +err_out5:
>   	panfrost_mmu_fini(pfdev);
> -err_out3:
> +err_out4:
>   	panfrost_gpu_fini(pfdev);
> +err_out3:
> +	panfrost_pm_domain_fini(pfdev);
>   err_out2:
>   	panfrost_reset_fini(pfdev);
>   err_out1:
> @@ -208,6 +278,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
>   	panfrost_mmu_fini(pfdev);
>   	panfrost_gpu_fini(pfdev);
>   	panfrost_reset_fini(pfdev);
> +	panfrost_pm_domain_fini(pfdev);
>   	panfrost_regulator_fini(pfdev);
>   	panfrost_clk_fini(pfdev);
>   }
> diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
> index a124334d69e7e93..92d471676fc7823 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_device.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
> @@ -19,6 +19,7 @@ struct panfrost_job;
>   struct panfrost_perfcnt;
>   
>   #define NUM_JOB_SLOTS 3
> +#define MAX_PM_DOMAINS 3
>   
>   struct panfrost_features {
>   	u16 id;
> @@ -62,6 +63,9 @@ struct panfrost_device {
>   	struct regulator *regulator;
>   	struct regulator *regulator_sram;
>   	struct reset_control *rstc;
> +	/* pm_domains for devices with more than one. */
> +	struct device *pm_domain_devs[MAX_PM_DOMAINS];
> +	struct device_link *pm_domain_links[MAX_PM_DOMAINS];
>   
>   	struct panfrost_features features;
>   
>
Nicolas Boichat Jan. 10, 2020, 1:53 a.m. UTC | #2
+Ulf to keep me honest on the power domains

On Thu, Jan 9, 2020 at 10:08 PM Steven Price <steven.price@arm.com> wrote:
>
> On 08/01/2020 05:23, Nicolas Boichat wrote:
> > When there is a single power domain per device, the core will
> > ensure the power domains are all switched on.
> >
> > However, when there are multiple ones, as in MT8183 Bifrost GPU,
> > we need to handle them in driver code.
> >
> >
> > Signed-off-by: Nicolas Boichat <drinkcat@chromium.org>
> > ---
> >
> > The downstream driver we use on chromeos-4.19 currently uses 2
> > additional devices in device tree to accomodate for this [1], but
> > I believe this solution is cleaner.
>
> I'm not sure what is best, but it seems odd to encode this into the Panfrost driver itself - it doesn't have any knowledge of what to do with these power domains. The naming of the domains looks suspiciously like someone thought that e.g. only half of the cores could be powered, but it doesn't look like that was implemented in the chromeos driver linked and anyway that is *meant* to be automatic in the hardware! (I.e. if you only power up one cores in one core stack then the PDC should only enable the power domain for that set of cores).

This is actually implemented in the Chrome OS driver [1]. IMHO power
domains are a bit confusing [2]:
 i. If there's only 1 power domain in the device, then the core takes
care of power on the domain (based on pm_runtime)
 ii. If there's more than 1 power domain, then the device needs to
link the domains manually.

So the Chrome OS [1] driver takes approach (i), by creating 3 devices,
each with 1 power domain that is switched on/off automatically using
pm_runtime.

This patch takes approach (ii) with device links to handle the extra domains.

I believe the latter is more upstream-friendly, but, as always,
suggestions welcome.

[2] https://elixir.bootlin.com/linux/latest/source/drivers/base/power/domain.c#L2466

> Steve
>
> >
> > [1] https://chromium.googlesource.com/chromiumos/third_party/kernel/+/refs/heads/chromeos-4.19/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c#31
> >
> > drivers/gpu/drm/panfrost/panfrost_device.c | 87 ++++++++++++++++++++--
> >   drivers/gpu/drm/panfrost/panfrost_device.h |  4 +
> >   2 files changed, 83 insertions(+), 8 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
> > index a0b0a6fef8b4e63..c6e9e059de94a4d 100644
> > --- a/drivers/gpu/drm/panfrost/panfrost_device.c
> > +++ b/drivers/gpu/drm/panfrost/panfrost_device.c
> > @@ -5,6 +5,7 @@
> >   #include <linux/clk.h>
> >   #include <linux/reset.h>
> >   #include <linux/platform_device.h>
> > +#include <linux/pm_domain.h>
> >   #include <linux/regulator/consumer.h>
> >
> >   #include "panfrost_device.h"
> > @@ -131,6 +132,67 @@ static void panfrost_regulator_fini(struct panfrost_device *pfdev)
> >       regulator_disable(pfdev->regulator_sram);
> >   }
> >
> > +static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
> > +{
> > +     int i;
> > +
> > +     for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
> > +             if (!pfdev->pm_domain_devs[i])
> > +                     break;
> > +
> > +             if (pfdev->pm_domain_links[i])
> > +                     device_link_del(pfdev->pm_domain_links[i]);
> > +
> > +             dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
> > +     }
> > +}
> > +
> > +static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
> > +{
> > +     int err;
> > +     int i, num_domains;
> > +
> > +     num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
> > +                                              "power-domains",
> > +                                              "#power-domain-cells");
> > +     /* Single domains are handled by the core. */
> > +     if (num_domains < 2)
> > +             return 0;
> > +
> > +     if (num_domains > ARRAY_SIZE(pfdev->pm_domain_devs)) {
> > +             dev_err(pfdev->dev, "Too many pm-domains: %d\n", num_domains);
> > +             return -EINVAL;
> > +     }
> > +
> > +     for (i = 0; i < num_domains; i++) {
> > +             pfdev->pm_domain_devs[i] =
> > +                     dev_pm_domain_attach_by_id(pfdev->dev, i);
> > +             if (IS_ERR(pfdev->pm_domain_devs[i])) {
> > +                     err = PTR_ERR(pfdev->pm_domain_devs[i]);
> > +                     pfdev->pm_domain_devs[i] = NULL;
> > +                     dev_err(pfdev->dev,
> > +                             "failed to get pm-domain %d: %d\n", i, err);
> > +                     goto err;
> > +             }
> > +
> > +             pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
> > +                             pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
> > +                             DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
> > +             if (!pfdev->pm_domain_links[i]) {
> > +                     dev_err(pfdev->pm_domain_devs[i],
> > +                             "adding device link failed!\n");
> > +                     err = -ENODEV;
> > +                     goto err;
> > +             }
> > +     }
> > +
> > +     return 0;
> > +
> > +err:
> > +     panfrost_pm_domain_fini(pfdev);
> > +     return err;
> > +}
> > +
> >   int panfrost_device_init(struct panfrost_device *pfdev)
> >   {
> >       int err;
> > @@ -161,37 +223,45 @@ int panfrost_device_init(struct panfrost_device *pfdev)
> >               goto err_out1;
> >       }
> >
> > +     err = panfrost_pm_domain_init(pfdev);
> > +     if (err) {
> > +             dev_err(pfdev->dev, "pm_domain init failed %d\n", err);
> > +             goto err_out2;
> > +     }
> > +
> >       res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
> >       pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
> >       if (IS_ERR(pfdev->iomem)) {
> >               dev_err(pfdev->dev, "failed to ioremap iomem\n");
> >               err = PTR_ERR(pfdev->iomem);
> > -             goto err_out2;
> > +             goto err_out3;
> >       }
> >
> >       err = panfrost_gpu_init(pfdev);
> >       if (err)
> > -             goto err_out2;
> > +             goto err_out3;
> >
> >       err = panfrost_mmu_init(pfdev);
> >       if (err)
> > -             goto err_out3;
> > +             goto err_out4;
> >
> >       err = panfrost_job_init(pfdev);
> >       if (err)
> > -             goto err_out4;
> > +             goto err_out5;
> >
> >       err = panfrost_perfcnt_init(pfdev);
> >       if (err)
> > -             goto err_out5;
> > +             goto err_out6;
> >
> >       return 0;
> > -err_out5:
> > +err_out6:
> >       panfrost_job_fini(pfdev);
> > -err_out4:
> > +err_out5:
> >       panfrost_mmu_fini(pfdev);
> > -err_out3:
> > +err_out4:
> >       panfrost_gpu_fini(pfdev);
> > +err_out3:
> > +     panfrost_pm_domain_fini(pfdev);
> >   err_out2:
> >       panfrost_reset_fini(pfdev);
> >   err_out1:
> > @@ -208,6 +278,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
> >       panfrost_mmu_fini(pfdev);
> >       panfrost_gpu_fini(pfdev);
> >       panfrost_reset_fini(pfdev);
> > +     panfrost_pm_domain_fini(pfdev);
> >       panfrost_regulator_fini(pfdev);
> >       panfrost_clk_fini(pfdev);
> >   }
> > diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
> > index a124334d69e7e93..92d471676fc7823 100644
> > --- a/drivers/gpu/drm/panfrost/panfrost_device.h
> > +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
> > @@ -19,6 +19,7 @@ struct panfrost_job;
> >   struct panfrost_perfcnt;
> >
> >   #define NUM_JOB_SLOTS 3
> > +#define MAX_PM_DOMAINS 3
> >
> >   struct panfrost_features {
> >       u16 id;
> > @@ -62,6 +63,9 @@ struct panfrost_device {
> >       struct regulator *regulator;
> >       struct regulator *regulator_sram;
> >       struct reset_control *rstc;
> > +     /* pm_domains for devices with more than one. */
> > +     struct device *pm_domain_devs[MAX_PM_DOMAINS];
> > +     struct device_link *pm_domain_links[MAX_PM_DOMAINS];
> >
> >       struct panfrost_features features;
> >
> >
>
Ulf Hansson Jan. 27, 2020, 7:55 a.m. UTC | #3
On Fri, 10 Jan 2020 at 02:53, Nicolas Boichat <drinkcat@chromium.org> wrote:
>
> +Ulf to keep me honest on the power domains
>
> On Thu, Jan 9, 2020 at 10:08 PM Steven Price <steven.price@arm.com> wrote:
> >
> > On 08/01/2020 05:23, Nicolas Boichat wrote:
> > > When there is a single power domain per device, the core will
> > > ensure the power domains are all switched on.
> > >
> > > However, when there are multiple ones, as in MT8183 Bifrost GPU,
> > > we need to handle them in driver code.
> > >
> > >
> > > Signed-off-by: Nicolas Boichat <drinkcat@chromium.org>
> > > ---
> > >
> > > The downstream driver we use on chromeos-4.19 currently uses 2
> > > additional devices in device tree to accomodate for this [1], but
> > > I believe this solution is cleaner.
> >
> > I'm not sure what is best, but it seems odd to encode this into the Panfrost driver itself - it doesn't have any knowledge of what to do with these power domains. The naming of the domains looks suspiciously like someone thought that e.g. only half of the cores could be powered, but it doesn't look like that was implemented in the chromeos driver linked and anyway that is *meant* to be automatic in the hardware! (I.e. if you only power up one cores in one core stack then the PDC should only enable the power domain for that set of cores).
>
> This is actually implemented in the Chrome OS driver [1]. IMHO power
> domains are a bit confusing [2]:
>  i. If there's only 1 power domain in the device, then the core takes
> care of power on the domain (based on pm_runtime)
>  ii. If there's more than 1 power domain, then the device needs to
> link the domains manually.
>
> So the Chrome OS [1] driver takes approach (i), by creating 3 devices,
> each with 1 power domain that is switched on/off automatically using
> pm_runtime.
>
> This patch takes approach (ii) with device links to handle the extra domains.
>
> I believe the latter is more upstream-friendly, but, as always,
> suggestions welcome.

Apologies for the late reply. A few comments below.

If the device is partitioned across multiple PM domains (it may need
several power rails), then that should be described with the "multi PM
domain" approach in the DTS. As in (ii).

Using "device links" is however optional, as it may depend on the use
case. If all multiple PM domains needs to be powered on/off together,
then it's certainly recommended to use device links.

However, if the PM domains can be powered on/off independently (one
can be on while another is off), then it's probably easier to operate
directly with runtime PM, on the returned struct *device from
dev_pm_domain_attach_by_id().

Also note, there is dev_pm_domain_attach_by_name(), which allows us to
specify a name for the PM domain in the DTS, rather than using an
index. This may be more future proof to use.

[...]

Hope this helps.

Kind regards
Uffe
Nicolas Boichat Feb. 7, 2020, 2:04 a.m. UTC | #4
Hi Ulf,

On Mon, Jan 27, 2020 at 3:55 PM Ulf Hansson <ulf.hansson@linaro.org> wrote:
>
> On Fri, 10 Jan 2020 at 02:53, Nicolas Boichat <drinkcat@chromium.org> wrote:
> >
> > +Ulf to keep me honest on the power domains
> >
> > On Thu, Jan 9, 2020 at 10:08 PM Steven Price <steven.price@arm.com> wrote:
> > >
> > > On 08/01/2020 05:23, Nicolas Boichat wrote:
> > > > When there is a single power domain per device, the core will
> > > > ensure the power domains are all switched on.
> > > >
> > > > However, when there are multiple ones, as in MT8183 Bifrost GPU,
> > > > we need to handle them in driver code.
> > > >
> > > >
> > > > Signed-off-by: Nicolas Boichat <drinkcat@chromium.org>
> > > > ---
> > > >
> > > > The downstream driver we use on chromeos-4.19 currently uses 2
> > > > additional devices in device tree to accomodate for this [1], but
> > > > I believe this solution is cleaner.
> > >
> > > I'm not sure what is best, but it seems odd to encode this into the Panfrost driver itself - it doesn't have any knowledge of what to do with these power domains. The naming of the domains looks suspiciously like someone thought that e.g. only half of the cores could be powered, but it doesn't look like that was implemented in the chromeos driver linked and anyway that is *meant* to be automatic in the hardware! (I.e. if you only power up one cores in one core stack then the PDC should only enable the power domain for that set of cores).
> >
> > This is actually implemented in the Chrome OS driver [1]. IMHO power
> > domains are a bit confusing [2]:
> >  i. If there's only 1 power domain in the device, then the core takes
> > care of power on the domain (based on pm_runtime)
> >  ii. If there's more than 1 power domain, then the device needs to
> > link the domains manually.
> >
> > So the Chrome OS [1] driver takes approach (i), by creating 3 devices,
> > each with 1 power domain that is switched on/off automatically using
> > pm_runtime.
> >
> > This patch takes approach (ii) with device links to handle the extra domains.
> >
> > I believe the latter is more upstream-friendly, but, as always,
> > suggestions welcome.
>
> Apologies for the late reply. A few comments below.

No worries, than for the helpful reply!

> If the device is partitioned across multiple PM domains (it may need
> several power rails), then that should be described with the "multi PM
> domain" approach in the DTS. As in (ii).
>
> Using "device links" is however optional, as it may depend on the use
> case. If all multiple PM domains needs to be powered on/off together,
> then it's certainly recommended to use device links.

That's the case here, there's no support for turning on/off the
domains individually.

> However, if the PM domains can be powered on/off independently (one
> can be on while another is off), then it's probably easier to operate
> directly with runtime PM, on the returned struct *device from
> dev_pm_domain_attach_by_id().
>
> Also note, there is dev_pm_domain_attach_by_name(), which allows us to
> specify a name for the PM domain in the DTS, rather than using an
> index. This may be more future proof to use.

Agree, probably better to have actual names than just "counting" the
number of domains like I do, especially as we have a compatible struct
anyway. I'll update the patch.

> [...]
>
> Hope this helps.
>
> Kind regards
> Uffe
Nicolas Boichat Feb. 7, 2020, 2:04 a.m. UTC | #5
On Fri, Feb 7, 2020 at 10:04 AM Nicolas Boichat <drinkcat@chromium.org> wrote:
>
> Hi Ulf,
>
> On Mon, Jan 27, 2020 at 3:55 PM Ulf Hansson <ulf.hansson@linaro.org> wrote:
> >
> > On Fri, 10 Jan 2020 at 02:53, Nicolas Boichat <drinkcat@chromium.org> wrote:
> > >
> > > +Ulf to keep me honest on the power domains
> > >
> > > On Thu, Jan 9, 2020 at 10:08 PM Steven Price <steven.price@arm.com> wrote:
> > > >
> > > > On 08/01/2020 05:23, Nicolas Boichat wrote:
> > > > > When there is a single power domain per device, the core will
> > > > > ensure the power domains are all switched on.
> > > > >
> > > > > However, when there are multiple ones, as in MT8183 Bifrost GPU,
> > > > > we need to handle them in driver code.
> > > > >
> > > > >
> > > > > Signed-off-by: Nicolas Boichat <drinkcat@chromium.org>
> > > > > ---
> > > > >
> > > > > The downstream driver we use on chromeos-4.19 currently uses 2
> > > > > additional devices in device tree to accomodate for this [1], but
> > > > > I believe this solution is cleaner.
> > > >
> > > > I'm not sure what is best, but it seems odd to encode this into the Panfrost driver itself - it doesn't have any knowledge of what to do with these power domains. The naming of the domains looks suspiciously like someone thought that e.g. only half of the cores could be powered, but it doesn't look like that was implemented in the chromeos driver linked and anyway that is *meant* to be automatic in the hardware! (I.e. if you only power up one cores in one core stack then the PDC should only enable the power domain for that set of cores).
> > >
> > > This is actually implemented in the Chrome OS driver [1]. IMHO power
> > > domains are a bit confusing [2]:
> > >  i. If there's only 1 power domain in the device, then the core takes
> > > care of power on the domain (based on pm_runtime)
> > >  ii. If there's more than 1 power domain, then the device needs to
> > > link the domains manually.
> > >
> > > So the Chrome OS [1] driver takes approach (i), by creating 3 devices,
> > > each with 1 power domain that is switched on/off automatically using
> > > pm_runtime.
> > >
> > > This patch takes approach (ii) with device links to handle the extra domains.
> > >
> > > I believe the latter is more upstream-friendly, but, as always,
> > > suggestions welcome.
> >
> > Apologies for the late reply. A few comments below.
>
> No worries, than for the helpful reply!

(s/than/thanks/... ,-P)

>
> > If the device is partitioned across multiple PM domains (it may need
> > several power rails), then that should be described with the "multi PM
> > domain" approach in the DTS. As in (ii).
> >
> > Using "device links" is however optional, as it may depend on the use
> > case. If all multiple PM domains needs to be powered on/off together,
> > then it's certainly recommended to use device links.
>
> That's the case here, there's no support for turning on/off the
> domains individually.
>
> > However, if the PM domains can be powered on/off independently (one
> > can be on while another is off), then it's probably easier to operate
> > directly with runtime PM, on the returned struct *device from
> > dev_pm_domain_attach_by_id().
> >
> > Also note, there is dev_pm_domain_attach_by_name(), which allows us to
> > specify a name for the PM domain in the DTS, rather than using an
> > index. This may be more future proof to use.
>
> Agree, probably better to have actual names than just "counting" the
> number of domains like I do, especially as we have a compatible struct
> anyway. I'll update the patch.
>
> > [...]
> >
> > Hope this helps.
> >
> > Kind regards
> > Uffe
diff mbox series

Patch

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index a0b0a6fef8b4e63..c6e9e059de94a4d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,6 +5,7 @@ 
 #include <linux/clk.h>
 #include <linux/reset.h>
 #include <linux/platform_device.h>
+#include <linux/pm_domain.h>
 #include <linux/regulator/consumer.h>
 
 #include "panfrost_device.h"
@@ -131,6 +132,67 @@  static void panfrost_regulator_fini(struct panfrost_device *pfdev)
 	regulator_disable(pfdev->regulator_sram);
 }
 
+static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
+		if (!pfdev->pm_domain_devs[i])
+			break;
+
+		if (pfdev->pm_domain_links[i])
+			device_link_del(pfdev->pm_domain_links[i]);
+
+		dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
+	}
+}
+
+static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
+{
+	int err;
+	int i, num_domains;
+
+	num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+						 "power-domains",
+						 "#power-domain-cells");
+	/* Single domains are handled by the core. */
+	if (num_domains < 2)
+		return 0;
+
+	if (num_domains > ARRAY_SIZE(pfdev->pm_domain_devs)) {
+		dev_err(pfdev->dev, "Too many pm-domains: %d\n", num_domains);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_domains; i++) {
+		pfdev->pm_domain_devs[i] =
+			dev_pm_domain_attach_by_id(pfdev->dev, i);
+		if (IS_ERR(pfdev->pm_domain_devs[i])) {
+			err = PTR_ERR(pfdev->pm_domain_devs[i]);
+			pfdev->pm_domain_devs[i] = NULL;
+			dev_err(pfdev->dev,
+				"failed to get pm-domain %d: %d\n", i, err);
+			goto err;
+		}
+
+		pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
+				pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+				DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+		if (!pfdev->pm_domain_links[i]) {
+			dev_err(pfdev->pm_domain_devs[i],
+				"adding device link failed!\n");
+			err = -ENODEV;
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	panfrost_pm_domain_fini(pfdev);
+	return err;
+}
+
 int panfrost_device_init(struct panfrost_device *pfdev)
 {
 	int err;
@@ -161,37 +223,45 @@  int panfrost_device_init(struct panfrost_device *pfdev)
 		goto err_out1;
 	}
 
+	err = panfrost_pm_domain_init(pfdev);
+	if (err) {
+		dev_err(pfdev->dev, "pm_domain init failed %d\n", err);
+		goto err_out2;
+	}
+
 	res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
 	pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
 	if (IS_ERR(pfdev->iomem)) {
 		dev_err(pfdev->dev, "failed to ioremap iomem\n");
 		err = PTR_ERR(pfdev->iomem);
-		goto err_out2;
+		goto err_out3;
 	}
 
 	err = panfrost_gpu_init(pfdev);
 	if (err)
-		goto err_out2;
+		goto err_out3;
 
 	err = panfrost_mmu_init(pfdev);
 	if (err)
-		goto err_out3;
+		goto err_out4;
 
 	err = panfrost_job_init(pfdev);
 	if (err)
-		goto err_out4;
+		goto err_out5;
 
 	err = panfrost_perfcnt_init(pfdev);
 	if (err)
-		goto err_out5;
+		goto err_out6;
 
 	return 0;
-err_out5:
+err_out6:
 	panfrost_job_fini(pfdev);
-err_out4:
+err_out5:
 	panfrost_mmu_fini(pfdev);
-err_out3:
+err_out4:
 	panfrost_gpu_fini(pfdev);
+err_out3:
+	panfrost_pm_domain_fini(pfdev);
 err_out2:
 	panfrost_reset_fini(pfdev);
 err_out1:
@@ -208,6 +278,7 @@  void panfrost_device_fini(struct panfrost_device *pfdev)
 	panfrost_mmu_fini(pfdev);
 	panfrost_gpu_fini(pfdev);
 	panfrost_reset_fini(pfdev);
+	panfrost_pm_domain_fini(pfdev);
 	panfrost_regulator_fini(pfdev);
 	panfrost_clk_fini(pfdev);
 }
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index a124334d69e7e93..92d471676fc7823 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -19,6 +19,7 @@  struct panfrost_job;
 struct panfrost_perfcnt;
 
 #define NUM_JOB_SLOTS 3
+#define MAX_PM_DOMAINS 3
 
 struct panfrost_features {
 	u16 id;
@@ -62,6 +63,9 @@  struct panfrost_device {
 	struct regulator *regulator;
 	struct regulator *regulator_sram;
 	struct reset_control *rstc;
+	/* pm_domains for devices with more than one. */
+	struct device *pm_domain_devs[MAX_PM_DOMAINS];
+	struct device_link *pm_domain_links[MAX_PM_DOMAINS];
 
 	struct panfrost_features features;