diff mbox series

drm/tidss: Power up attached PM domains on probe

Message ID 20231009075018.2836020-1-devarsht@ti.com (mailing list archive)
State New, archived
Headers show
Series drm/tidss: Power up attached PM domains on probe | expand

Commit Message

Devarsh Thakkar Oct. 9, 2023, 7:50 a.m. UTC
Some SoC's such as AM62P have dedicated power domains
for OLDI which need to be powered on separetely along
with display controller.

So during driver probe, power up all attached PM domains
enumerated in devicetree node for DSS.

This also prepares base to add display support for AM62P.

Signed-off-by: Devarsh Thakkar <devarsht@ti.com>
---
 drivers/gpu/drm/tidss/tidss_drv.c | 76 +++++++++++++++++++++++++++++++
 drivers/gpu/drm/tidss/tidss_drv.h |  5 ++
 2 files changed, 81 insertions(+)

Comments

Devarsh Thakkar Oct. 9, 2023, 11:10 a.m. UTC | #1
Hi Maxime,

Thanks for the review.

On 09/10/23 14:53, Maxime Ripard wrote:
> Hi Devarsh,
> 
> On Mon, Oct 09, 2023 at 01:20:18PM +0530, Devarsh Thakkar wrote:
>> Some SoC's such as AM62P have dedicated power domains
>> for OLDI which need to be powered on separetely along
>> with display controller.
>>
>> So during driver probe, power up all attached PM domains
>> enumerated in devicetree node for DSS.
>>
>> This also prepares base to add display support for AM62P.
>>
>> Signed-off-by: Devarsh Thakkar <devarsht@ti.com>
>> ---
>>  drivers/gpu/drm/tidss/tidss_drv.c | 76 +++++++++++++++++++++++++++++++
>>  drivers/gpu/drm/tidss/tidss_drv.h |  5 ++
>>  2 files changed, 81 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
>> index 4d063eb9cd0b..a703a27d17bf 100644
>> --- a/drivers/gpu/drm/tidss/tidss_drv.c
>> +++ b/drivers/gpu/drm/tidss/tidss_drv.c
>> @@ -8,6 +8,7 @@
>>  #include <linux/of.h>
>>  #include <linux/module.h>
>>  #include <linux/pm_runtime.h>
>> +#include <linux/pm_domain.h>
>>  
>>  #include <drm/drm_atomic.h>
>>  #include <drm/drm_atomic_helper.h>
>> @@ -114,6 +115,72 @@ static const struct drm_driver tidss_driver = {
>>  	.minor			= 0,
>>  };
>>  
>> +static int tidss_detach_pm_domains(struct tidss_device *tidss)
>> +{
>> +	int i;
>> +
>> +	if (tidss->num_domains <= 1)
>> +		return 0;
>> +
>> +	for (i = 0; i < tidss->num_domains; i++) {
>> +		if (tidss->pd_link[i] && !IS_ERR(tidss->pd_link[i]))
>> +			device_link_del(tidss->pd_link[i]);
>> +		if (tidss->pd_dev[i] && !IS_ERR(tidss->pd_dev[i]))
>> +			dev_pm_domain_detach(tidss->pd_dev[i], true);
>> +		tidss->pd_dev[i] = NULL;
>> +		tidss->pd_link[i] = NULL;
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static int tidss_attach_pm_domains(struct tidss_device *tidss)
>> +{
>> +	struct device *dev = tidss->dev;
>> +	int i;
>> +	int ret;
>> +	struct platform_device *pdev = to_platform_device(dev);
>> +	struct device_node *np = pdev->dev.of_node;
>> +
>> +	tidss->num_domains = of_count_phandle_with_args(np, "power-domains",
>> +							"#power-domain-cells");
>> +	if (tidss->num_domains <= 1) {
>> +		dev_dbg(dev, "One or less power domains, no need to do attach domains\n");
>> +		return 0;
>> +	}
>> +
>> +	tidss->pd_dev = devm_kmalloc_array(dev, tidss->num_domains,
>> +					   sizeof(*tidss->pd_dev), GFP_KERNEL);
>> +	if (!tidss->pd_dev)
>> +		return -ENOMEM;
>> +
>> +	tidss->pd_link = devm_kmalloc_array(dev, tidss->num_domains,
>> +					    sizeof(*tidss->pd_link), GFP_KERNEL);
>> +	if (!tidss->pd_link)
>> +		return -ENOMEM;
>> +
>> +	for (i = 0; i < tidss->num_domains; i++) {
>> +		tidss->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
>> +		if (IS_ERR(tidss->pd_dev[i])) {
>> +			ret = PTR_ERR(tidss->pd_dev[i]);
>> +			goto fail;
>> +		}
>> +
>> +		tidss->pd_link[i] = device_link_add(dev, tidss->pd_dev[i],
>> +						    DL_FLAG_STATELESS |
>> +						    DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
>> +		if (!tidss->pd_link[i]) {
>> +			ret = -EINVAL;
>> +			goto fail;
>> +		}
>> +	}
>> +
>> +	return 0;
>> +fail:
>> +	tidss_detach_pm_domains(tidss);
>> +	return ret;
>> +}
> 
> My understanding is that this will be done automatically at probe time.
> Why do we need to roll our own there? A comment on top of the function
> and the commit log would help.

By default, the TI SCI power domain controller driver only powers up one power
domain associated with device, With AM62P, we now have separate power domains
for OLDI Tx ports (for more efficient power-saving control) which is different
from core DSS device power domain, so this patch powers on the associated
power domains too if enumerated in device-tree.

Regards
Devarsh

> 
> Thanks!
> Maxime
Devarsh Thakkar Oct. 12, 2023, 8:45 a.m. UTC | #2
Hi Maxime,

On 09/10/23 16:40, Devarsh Thakkar wrote:
> Hi Maxime,
> 
> Thanks for the review.
> 
> On 09/10/23 14:53, Maxime Ripard wrote:
>> Hi Devarsh,
>>
>> On Mon, Oct 09, 2023 at 01:20:18PM +0530, Devarsh Thakkar wrote:
>>> Some SoC's such as AM62P have dedicated power domains
>>> for OLDI which need to be powered on separetely along
>>> with display controller.
>>>
>>> So during driver probe, power up all attached PM domains
>>> enumerated in devicetree node for DSS.
>>>
>>> This also prepares base to add display support for AM62P.
>>>
>>> Signed-off-by: Devarsh Thakkar <devarsht@ti.com>
>>> ---
>>>   drivers/gpu/drm/tidss/tidss_drv.c | 76 +++++++++++++++++++++++++++++++
>>>   drivers/gpu/drm/tidss/tidss_drv.h |  5 ++
>>>   2 files changed, 81 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
>>> index 4d063eb9cd0b..a703a27d17bf 100644
>>> --- a/drivers/gpu/drm/tidss/tidss_drv.c
>>> +++ b/drivers/gpu/drm/tidss/tidss_drv.c
>>> @@ -8,6 +8,7 @@
>>>   #include <linux/of.h>
>>>   #include <linux/module.h>
>>>   #include <linux/pm_runtime.h>
>>> +#include <linux/pm_domain.h>
>>>   
>>>   #include <drm/drm_atomic.h>
>>>   #include <drm/drm_atomic_helper.h>
>>> @@ -114,6 +115,72 @@ static const struct drm_driver tidss_driver = {
>>>   	.minor			= 0,
>>>   };
>>>   
>>> +static int tidss_detach_pm_domains(struct tidss_device *tidss)
>>> +{
>>> +	int i;
>>> +
>>> +	if (tidss->num_domains <= 1)
>>> +		return 0;
>>> +
>>> +	for (i = 0; i < tidss->num_domains; i++) {
>>> +		if (tidss->pd_link[i] && !IS_ERR(tidss->pd_link[i]))
>>> +			device_link_del(tidss->pd_link[i]);
>>> +		if (tidss->pd_dev[i] && !IS_ERR(tidss->pd_dev[i]))
>>> +			dev_pm_domain_detach(tidss->pd_dev[i], true);
>>> +		tidss->pd_dev[i] = NULL;
>>> +		tidss->pd_link[i] = NULL;
>>> +	}
>>> +
>>> +	return 0;
>>> +}
>>> +
>>> +static int tidss_attach_pm_domains(struct tidss_device *tidss)
>>> +{
>>> +	struct device *dev = tidss->dev;
>>> +	int i;
>>> +	int ret;
>>> +	struct platform_device *pdev = to_platform_device(dev);
>>> +	struct device_node *np = pdev->dev.of_node;
>>> +
>>> +	tidss->num_domains = of_count_phandle_with_args(np, "power-domains",
>>> +							"#power-domain-cells");
>>> +	if (tidss->num_domains <= 1) {
>>> +		dev_dbg(dev, "One or less power domains, no need to do attach domains\n");
>>> +		return 0;
>>> +	}
>>> +
>>> +	tidss->pd_dev = devm_kmalloc_array(dev, tidss->num_domains,
>>> +					   sizeof(*tidss->pd_dev), GFP_KERNEL);
>>> +	if (!tidss->pd_dev)
>>> +		return -ENOMEM;
>>> +
>>> +	tidss->pd_link = devm_kmalloc_array(dev, tidss->num_domains,
>>> +					    sizeof(*tidss->pd_link), GFP_KERNEL);
>>> +	if (!tidss->pd_link)
>>> +		return -ENOMEM;
>>> +
>>> +	for (i = 0; i < tidss->num_domains; i++) {
>>> +		tidss->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
>>> +		if (IS_ERR(tidss->pd_dev[i])) {
>>> +			ret = PTR_ERR(tidss->pd_dev[i]);
>>> +			goto fail;
>>> +		}
>>> +
>>> +		tidss->pd_link[i] = device_link_add(dev, tidss->pd_dev[i],
>>> +						    DL_FLAG_STATELESS |
>>> +						    DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
>>> +		if (!tidss->pd_link[i]) {
>>> +			ret = -EINVAL;
>>> +			goto fail;
>>> +		}
>>> +	}
>>> +
>>> +	return 0;
>>> +fail:
>>> +	tidss_detach_pm_domains(tidss);
>>> +	return ret;
>>> +}
>>
>> My understanding is that this will be done automatically at probe time.
>> Why do we need to roll our own there? A comment on top of the function
>> and the commit log would help.
> 
> By default, the TI SCI power domain controller driver only powers up one power
> domain associated with device, With AM62P, we now have separate power domains
> for OLDI Tx ports (for more efficient power-saving control) which is different
> from core DSS device power domain, so this patch powers on the associated
> power domains too if enumerated in device-tree.
> 

My bad, I think it is not the ti sci power domain controller driver but 
the kernel core itself which seems to have a check to only allow one 
power domain per device (thanks to Vignesh for pointing out) :

	/*
	 * Devices with multiple PM domains must be attached separately,
	 * as we can only attach one PM domain per device.
	 */
	if (of_count_phandle_with_args(dev->of_node, "power-domains",
				       "#power-domain-cells") != 1)
		return 0;

https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/tree/drivers/base/power/domain.c?h=next-20231012#n2828

But anyways, I talked to team internally and there are some plans to 
have separate OLDI bridge driver which would inherit these new power 
domains, so I guess we may not need this patch at all.

Regards
Devarsh


> Regards
> Devarsh
> 
>>
>> Thanks!
>> Maxime
Devarsh Thakkar Oct. 12, 2023, 11:40 a.m. UTC | #3
On 09/10/23 13:20, Devarsh Thakkar wrote:
> Some SoC's such as AM62P have dedicated power domains
> for OLDI which need to be powered on separetely along
> with display controller.
> 
> So during driver probe, power up all attached PM domains
> enumerated in devicetree node for DSS.
> 
> This also prepares base to add display support for AM62P.
> 

NAK, for this patch, as discussed with team there are already plans
to have separate OLDI bridge driver which should eventually handle
the additional power domains.

Sorry for the noise.

Regards
Devarsh
> Signed-off-by: Devarsh Thakkar <devarsht@ti.com> > ---
>   drivers/gpu/drm/tidss/tidss_drv.c | 76 +++++++++++++++++++++++++++++++
>   drivers/gpu/drm/tidss/tidss_drv.h |  5 ++
>   2 files changed, 81 insertions(+)
> 
> diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
> index 4d063eb9cd0b..a703a27d17bf 100644
> --- a/drivers/gpu/drm/tidss/tidss_drv.c
> +++ b/drivers/gpu/drm/tidss/tidss_drv.c
> @@ -8,6 +8,7 @@
>   #include <linux/of.h>
>   #include <linux/module.h>
>   #include <linux/pm_runtime.h>
> +#include <linux/pm_domain.h>
>   
>   #include <drm/drm_atomic.h>
>   #include <drm/drm_atomic_helper.h>
> @@ -114,6 +115,72 @@ static const struct drm_driver tidss_driver = {
>   	.minor			= 0,
>   };
>   
> +static int tidss_detach_pm_domains(struct tidss_device *tidss)
> +{
> +	int i;
> +
> +	if (tidss->num_domains <= 1)
> +		return 0;
> +
> +	for (i = 0; i < tidss->num_domains; i++) {
> +		if (tidss->pd_link[i] && !IS_ERR(tidss->pd_link[i]))
> +			device_link_del(tidss->pd_link[i]);
> +		if (tidss->pd_dev[i] && !IS_ERR(tidss->pd_dev[i]))
> +			dev_pm_domain_detach(tidss->pd_dev[i], true);
> +		tidss->pd_dev[i] = NULL;
> +		tidss->pd_link[i] = NULL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int tidss_attach_pm_domains(struct tidss_device *tidss)
> +{
> +	struct device *dev = tidss->dev;
> +	int i;
> +	int ret;
> +	struct platform_device *pdev = to_platform_device(dev);
> +	struct device_node *np = pdev->dev.of_node;
> +
> +	tidss->num_domains = of_count_phandle_with_args(np, "power-domains",
> +							"#power-domain-cells");
> +	if (tidss->num_domains <= 1) {
> +		dev_dbg(dev, "One or less power domains, no need to do attach domains\n");
> +		return 0;
> +	}
> +
> +	tidss->pd_dev = devm_kmalloc_array(dev, tidss->num_domains,
> +					   sizeof(*tidss->pd_dev), GFP_KERNEL);
> +	if (!tidss->pd_dev)
> +		return -ENOMEM;
> +
> +	tidss->pd_link = devm_kmalloc_array(dev, tidss->num_domains,
> +					    sizeof(*tidss->pd_link), GFP_KERNEL);
> +	if (!tidss->pd_link)
> +		return -ENOMEM;
> +
> +	for (i = 0; i < tidss->num_domains; i++) {
> +		tidss->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
> +		if (IS_ERR(tidss->pd_dev[i])) {
> +			ret = PTR_ERR(tidss->pd_dev[i]);
> +			goto fail;
> +		}
> +
> +		tidss->pd_link[i] = device_link_add(dev, tidss->pd_dev[i],
> +						    DL_FLAG_STATELESS |
> +						    DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
> +		if (!tidss->pd_link[i]) {
> +			ret = -EINVAL;
> +			goto fail;
> +		}
> +	}
> +
> +	return 0;
> +fail:
> +	tidss_detach_pm_domains(tidss);
> +	return ret;
> +}
> +
>   static int tidss_probe(struct platform_device *pdev)
>   {
>   	struct device *dev = &pdev->dev;
> @@ -136,6 +203,13 @@ static int tidss_probe(struct platform_device *pdev)
>   
>   	platform_set_drvdata(pdev, tidss);
>   
> +	/* powering up associated OLDI domains */
> +	ret = tidss_attach_pm_domains(tidss);
> +	if (ret < 0) {
> +		dev_err(dev, "failed to attach power domains %d\n", ret);
> +		return ret;
> +	}
> +
>   	ret = dispc_init(tidss);
>   	if (ret) {
>   		dev_err(dev, "failed to initialize dispc: %d\n", ret);
> @@ -193,6 +267,7 @@ static int tidss_probe(struct platform_device *pdev)
>   	dispc_runtime_suspend(tidss->dispc);
>   #endif
>   	pm_runtime_disable(dev);
> +	tidss_detach_pm_domains(tidss);
>   
>   	return ret;
>   }
> @@ -220,6 +295,7 @@ static void tidss_remove(struct platform_device *pdev)
>   	/* devm allocated dispc goes away with the dev so mark it NULL */
>   	dispc_remove(tidss);
>   
> +	tidss_detach_pm_domains(tidss);
>   	dev_dbg(dev, "%s done\n", __func__);
>   }
>   
> diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
> index d7f27b0b0315..3c8b37b3aba6 100644
> --- a/drivers/gpu/drm/tidss/tidss_drv.h
> +++ b/drivers/gpu/drm/tidss/tidss_drv.h
> @@ -31,6 +31,11 @@ struct tidss_device {
>   
>   	spinlock_t wait_lock;	/* protects the irq masks */
>   	dispc_irq_t irq_mask;	/* enabled irqs in addition to wait_list */
> +
> +	int num_domains; /* Handle attached PM domains */
> +	struct device **pd_dev;
> +	struct device_link **pd_link;
> +
>   };
>   
>   #define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
Maxime Ripard Oct. 12, 2023, 12:40 p.m. UTC | #4
Hi,

On Thu, Oct 12, 2023 at 05:10:06PM +0530, Devarsh Thakkar wrote:
> On 09/10/23 13:20, Devarsh Thakkar wrote:
> > Some SoC's such as AM62P have dedicated power domains
> > for OLDI which need to be powered on separetely along
> > with display controller.
> > 
> > So during driver probe, power up all attached PM domains
> > enumerated in devicetree node for DSS.
> > 
> > This also prepares base to add display support for AM62P.
> > 
> 
> NAK, for this patch, as discussed with team there are already plans
> to have separate OLDI bridge driver which should eventually handle
> the additional power domains.

Just for the record in case your current plan doesn't work out and we
need to revisit this patch: my point was that it's something that
deviates by a margin from what drivers are usually expected to do, so we
should document why that deviation is there.

The patch itself looks reasonable to me otherwise.

Maxime
Devarsh Thakkar Oct. 16, 2023, 7:37 a.m. UTC | #5
Hi Maxime,

On 12/10/23 18:10, Maxime Ripard wrote:
> Hi,
> 
> On Thu, Oct 12, 2023 at 05:10:06PM +0530, Devarsh Thakkar wrote:
>> On 09/10/23 13:20, Devarsh Thakkar wrote:
>>> Some SoC's such as AM62P have dedicated power domains
>>> for OLDI which need to be powered on separetely along
>>> with display controller.
>>>
>>> So during driver probe, power up all attached PM domains
>>> enumerated in devicetree node for DSS.
>>>
>>> This also prepares base to add display support for AM62P.
>>>
>>
>> NAK, for this patch, as discussed with team there are already plans
>> to have separate OLDI bridge driver which should eventually handle
>> the additional power domains.
> 
> Just for the record in case your current plan doesn't work out and we
> need to revisit this patch: my point was that it's something that
> deviates by a margin from what drivers are usually expected to do, so we
> should document why that deviation is there.
> 

Sure, thanks for suggesting, I agree that if going with this logic, we should
definitely put a comment in driver regarding same.

> The patch itself looks reasonable to me otherwise.

Yes, it's just that we are planning a separate driver for OLDI. We will see if
we need similar logic in that driver too.

Regards
Devarsh

> 
> Maxime
diff mbox series

Patch

diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 4d063eb9cd0b..a703a27d17bf 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -8,6 +8,7 @@ 
 #include <linux/of.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
@@ -114,6 +115,72 @@  static const struct drm_driver tidss_driver = {
 	.minor			= 0,
 };
 
+static int tidss_detach_pm_domains(struct tidss_device *tidss)
+{
+	int i;
+
+	if (tidss->num_domains <= 1)
+		return 0;
+
+	for (i = 0; i < tidss->num_domains; i++) {
+		if (tidss->pd_link[i] && !IS_ERR(tidss->pd_link[i]))
+			device_link_del(tidss->pd_link[i]);
+		if (tidss->pd_dev[i] && !IS_ERR(tidss->pd_dev[i]))
+			dev_pm_domain_detach(tidss->pd_dev[i], true);
+		tidss->pd_dev[i] = NULL;
+		tidss->pd_link[i] = NULL;
+	}
+
+	return 0;
+}
+
+static int tidss_attach_pm_domains(struct tidss_device *tidss)
+{
+	struct device *dev = tidss->dev;
+	int i;
+	int ret;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct device_node *np = pdev->dev.of_node;
+
+	tidss->num_domains = of_count_phandle_with_args(np, "power-domains",
+							"#power-domain-cells");
+	if (tidss->num_domains <= 1) {
+		dev_dbg(dev, "One or less power domains, no need to do attach domains\n");
+		return 0;
+	}
+
+	tidss->pd_dev = devm_kmalloc_array(dev, tidss->num_domains,
+					   sizeof(*tidss->pd_dev), GFP_KERNEL);
+	if (!tidss->pd_dev)
+		return -ENOMEM;
+
+	tidss->pd_link = devm_kmalloc_array(dev, tidss->num_domains,
+					    sizeof(*tidss->pd_link), GFP_KERNEL);
+	if (!tidss->pd_link)
+		return -ENOMEM;
+
+	for (i = 0; i < tidss->num_domains; i++) {
+		tidss->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+		if (IS_ERR(tidss->pd_dev[i])) {
+			ret = PTR_ERR(tidss->pd_dev[i]);
+			goto fail;
+		}
+
+		tidss->pd_link[i] = device_link_add(dev, tidss->pd_dev[i],
+						    DL_FLAG_STATELESS |
+						    DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+		if (!tidss->pd_link[i]) {
+			ret = -EINVAL;
+			goto fail;
+		}
+	}
+
+	return 0;
+fail:
+	tidss_detach_pm_domains(tidss);
+	return ret;
+}
+
 static int tidss_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -136,6 +203,13 @@  static int tidss_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, tidss);
 
+	/* powering up associated OLDI domains */
+	ret = tidss_attach_pm_domains(tidss);
+	if (ret < 0) {
+		dev_err(dev, "failed to attach power domains %d\n", ret);
+		return ret;
+	}
+
 	ret = dispc_init(tidss);
 	if (ret) {
 		dev_err(dev, "failed to initialize dispc: %d\n", ret);
@@ -193,6 +267,7 @@  static int tidss_probe(struct platform_device *pdev)
 	dispc_runtime_suspend(tidss->dispc);
 #endif
 	pm_runtime_disable(dev);
+	tidss_detach_pm_domains(tidss);
 
 	return ret;
 }
@@ -220,6 +295,7 @@  static void tidss_remove(struct platform_device *pdev)
 	/* devm allocated dispc goes away with the dev so mark it NULL */
 	dispc_remove(tidss);
 
+	tidss_detach_pm_domains(tidss);
 	dev_dbg(dev, "%s done\n", __func__);
 }
 
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index d7f27b0b0315..3c8b37b3aba6 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -31,6 +31,11 @@  struct tidss_device {
 
 	spinlock_t wait_lock;	/* protects the irq masks */
 	dispc_irq_t irq_mask;	/* enabled irqs in addition to wait_list */
+
+	int num_domains; /* Handle attached PM domains */
+	struct device **pd_dev;
+	struct device_link **pd_link;
+
 };
 
 #define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)