diff mbox

[v1,2/2] rockchip: power-domain: support qos save and restore

Message ID 1458285444-31129-3-git-send-email-zhangqing@rock-chips.com (mailing list archive)
State New, archived
Headers show

Commit Message

zhangqing March 18, 2016, 7:17 a.m. UTC
support qos save and restore when power domain on/off.

Signed-off-by: Elaine Zhang <zhangqing@rock-chips.com>
---
 drivers/soc/rockchip/pm_domains.c | 87 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 84 insertions(+), 3 deletions(-)

Comments

Heiko Stuebner March 31, 2016, 4:31 p.m. UTC | #1
Hi Elaine,

Am Freitag, 18. März 2016, 15:17:24 schrieb Elaine Zhang:
> support qos save and restore when power domain on/off.
> 
> Signed-off-by: Elaine Zhang <zhangqing@rock-chips.com>

overall looks nice already ... some implementation-specific comments below.

> ---
>  drivers/soc/rockchip/pm_domains.c | 87
> +++++++++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+),
> 3 deletions(-)
> 
> diff --git a/drivers/soc/rockchip/pm_domains.c
> b/drivers/soc/rockchip/pm_domains.c index 18aee6b..c5f4be6 100644
> --- a/drivers/soc/rockchip/pm_domains.c
> +++ b/drivers/soc/rockchip/pm_domains.c
> @@ -45,10 +45,21 @@ struct rockchip_pmu_info {
>  	const struct rockchip_domain_info *domain_info;
>  };
> 
> +#define MAX_QOS_NODE_NUM	20
> +#define MAX_QOS_REGS_NUM	5
> +#define QOS_PRIORITY		0x08
> +#define QOS_MODE		0x0c
> +#define QOS_BANDWIDTH		0x10
> +#define QOS_SATURATION		0x14
> +#define QOS_EXTCONTROL		0x18
> +
>  struct rockchip_pm_domain {
>  	struct generic_pm_domain genpd;
>  	const struct rockchip_domain_info *info;
>  	struct rockchip_pmu *pmu;
> +	int num_qos;
> +	struct regmap *qos_regmap[MAX_QOS_NODE_NUM];
> +	u32 qos_save_regs[MAX_QOS_NODE_NUM][MAX_QOS_REGS_NUM];

struct regmap **qos_regmap;
u32 *qos_save_regs;


>  	int num_clks;
>  	struct clk *clks[];
>  };
> @@ -111,6 +122,55 @@ static int rockchip_pmu_set_idle_request(struct
> rockchip_pm_domain *pd, return 0;
>  }
> 
> +static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
> +{
> +	int i;
> +
> +	for (i = 0; i < pd->num_qos; i++) {
> +		regmap_read(pd->qos_regmap[i],
> +			    QOS_PRIORITY,
> +			    &pd->qos_save_regs[i][0]);
> +		regmap_read(pd->qos_regmap[i],
> +			    QOS_MODE,
> +			    &pd->qos_save_regs[i][1]);
> +		regmap_read(pd->qos_regmap[i],
> +			    QOS_BANDWIDTH,
> +			    &pd->qos_save_regs[i][2]);
> +		regmap_read(pd->qos_regmap[i],
> +			    QOS_SATURATION,
> +			    &pd->qos_save_regs[i][3]);
> +		regmap_read(pd->qos_regmap[i],
> +			    QOS_EXTCONTROL,
> +			    &pd->qos_save_regs[i][4]);
> +	}
> +	return 0;
> +}
> +
> +static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
> +{
> +	int i;
> +
> +	for (i = 0; i < pd->num_qos; i++) {
> +		regmap_write(pd->qos_regmap[i],
> +			     QOS_PRIORITY,
> +			     pd->qos_save_regs[i][0]);
> +		regmap_write(pd->qos_regmap[i],
> +			     QOS_MODE,
> +			     pd->qos_save_regs[i][1]);
> +		regmap_write(pd->qos_regmap[i],
> +			     QOS_BANDWIDTH,
> +			     pd->qos_save_regs[i][2]);
> +		regmap_write(pd->qos_regmap[i],
> +			     QOS_SATURATION,
> +			     pd->qos_save_regs[i][3]);
> +		regmap_write(pd->qos_regmap[i],
> +			     QOS_EXTCONTROL,
> +			     pd->qos_save_regs[i][4]);
> +	}
> +
> +	return 0;
> +}
> +
>  static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
>  {
>  	struct rockchip_pmu *pmu = pd->pmu;
> @@ -147,7 +207,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain
> *pd, bool power_on) clk_enable(pd->clks[i]);
> 
>  		if (!power_on) {
> -			/* FIXME: add code to save AXI_QOS */
> +			rockchip_pmu_save_qos(pd);
> 
>  			/* if powering down, idle request to NIU first */
>  			rockchip_pmu_set_idle_request(pd, true);
> @@ -159,7 +219,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain
> *pd, bool power_on) /* if powering up, leave idle mode */
>  			rockchip_pmu_set_idle_request(pd, false);
> 
> -			/* FIXME: add code to restore AXI_QOS */
> +			rockchip_pmu_restore_qos(pd);
>  		}
> 
>  		for (i = pd->num_clks - 1; i >= 0; i--)
> @@ -227,9 +287,10 @@ static int rockchip_pm_add_one_domain(struct
> rockchip_pmu *pmu, {
>  	const struct rockchip_domain_info *pd_info;
>  	struct rockchip_pm_domain *pd;
> +	struct device_node *qos_node;
>  	struct clk *clk;
>  	int clk_cnt;
> -	int i;
> +	int i, j;
>  	u32 id;
>  	int error;
> 
> @@ -289,6 +350,26 @@ static int rockchip_pm_add_one_domain(struct
> rockchip_pmu *pmu, clk, node->name);
>  	}
> 
> +	pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
> +						 NULL);

missing error handling here:

if (pd->num_qos < 0) {
	error = pd->num_qos;
	goto err_out;
}

Right now, you always allocate MAX_QOS_NODE_NUM entries for regmaps and 
registers for each domain - a bit of a waste over all domains, so maybe 
like:

pd->qos_regmap = kcalloc(pd->num_qos, sizeof(*pd->qos_regmap), GFP_KERNEL);

pd->qos_save_regs = kcalloc, pd->num_qos * MAX_QOS_REGS_NUM, sizeof(u32), 
GFP_KERNEL);

+ of course error handling for both + cleanup in rockchip_remove_one_domain

> +
> +	for (j = 0; j < pd->num_qos; j++) {
> +		qos_node = of_parse_phandle(node, "pm_qos", j);
> +		if (!qos_node) {
> +			error = -ENODEV;
> +			goto err_out;
> +		}
> +		pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);

missing
if (IS_ERR(pd->qos_regmap[j])) { ...}

> +		of_node_put(qos_node);
> +	}
> +
>  	error = rockchip_pd_power(pd, true);
>  	if (error) {
>  		dev_err(pmu->dev,
zhangqing April 1, 2016, 2:33 a.m. UTC | #2
hi, Heiko

I agree with most of your modifications.
Except, the u32 *qos_save_regs below

On 04/01/2016 12:31 AM, Heiko Stuebner wrote:
> Hi Elaine,
>
> Am Freitag, 18. März 2016, 15:17:24 schrieb Elaine Zhang:
>> support qos save and restore when power domain on/off.
>>
>> Signed-off-by: Elaine Zhang <zhangqing@rock-chips.com>
>
> overall looks nice already ... some implementation-specific comments below.
>
>> ---
>>   drivers/soc/rockchip/pm_domains.c | 87
>> +++++++++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+),
>> 3 deletions(-)
>>
>> diff --git a/drivers/soc/rockchip/pm_domains.c
>> b/drivers/soc/rockchip/pm_domains.c index 18aee6b..c5f4be6 100644
>> --- a/drivers/soc/rockchip/pm_domains.c
>> +++ b/drivers/soc/rockchip/pm_domains.c
>> @@ -45,10 +45,21 @@ struct rockchip_pmu_info {
>>   	const struct rockchip_domain_info *domain_info;
>>   };
>>
>> +#define MAX_QOS_NODE_NUM	20
>> +#define MAX_QOS_REGS_NUM	5
>> +#define QOS_PRIORITY		0x08
>> +#define QOS_MODE		0x0c
>> +#define QOS_BANDWIDTH		0x10
>> +#define QOS_SATURATION		0x14
>> +#define QOS_EXTCONTROL		0x18
>> +
>>   struct rockchip_pm_domain {
>>   	struct generic_pm_domain genpd;
>>   	const struct rockchip_domain_info *info;
>>   	struct rockchip_pmu *pmu;
>> +	int num_qos;
>> +	struct regmap *qos_regmap[MAX_QOS_NODE_NUM];
>> +	u32 qos_save_regs[MAX_QOS_NODE_NUM][MAX_QOS_REGS_NUM];
>
> struct regmap **qos_regmap;
> u32 *qos_save_regs;
when we save and restore qos registers we need save five regs for every qos.
like this :
for (i = 0; i < pd->num_qos; i++) {
		regmap_read(pd->qos_regmap[i],
			    QOS_PRIORITY,
			    &pd->qos_save_regs[i][0]);
		regmap_read(pd->qos_regmap[i],
			    QOS_MODE,
			    &pd->qos_save_regs[i][1]);
		regmap_read(pd->qos_regmap[i],
			    QOS_BANDWIDTH,
			    &pd->qos_save_regs[i][2]);
		regmap_read(pd->qos_regmap[i],
			    QOS_SATURATION,
			    &pd->qos_save_regs[i][3]);
		regmap_read(pd->qos_regmap[i],
			    QOS_EXTCONTROL,
			    &pd->qos_save_regs[i][4]);
	}
so we can not define qos_save_regs like u32 *qos_save_regs;,
and apply buff like
pd->qos_save_regs = kcalloc(pd->num_qos * MAX_QOS_REGS_NUM, sizeof(u32),
GFP_KERNEL);
>
>
>>   	int num_clks;
>>   	struct clk *clks[];
>>   };
>> @@ -111,6 +122,55 @@ static int rockchip_pmu_set_idle_request(struct
>> rockchip_pm_domain *pd, return 0;
>>   }
>>
>> +static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
>> +{
>> +	int i;
>> +
>> +	for (i = 0; i < pd->num_qos; i++) {
>> +		regmap_read(pd->qos_regmap[i],
>> +			    QOS_PRIORITY,
>> +			    &pd->qos_save_regs[i][0]);
>> +		regmap_read(pd->qos_regmap[i],
>> +			    QOS_MODE,
>> +			    &pd->qos_save_regs[i][1]);
>> +		regmap_read(pd->qos_regmap[i],
>> +			    QOS_BANDWIDTH,
>> +			    &pd->qos_save_regs[i][2]);
>> +		regmap_read(pd->qos_regmap[i],
>> +			    QOS_SATURATION,
>> +			    &pd->qos_save_regs[i][3]);
>> +		regmap_read(pd->qos_regmap[i],
>> +			    QOS_EXTCONTROL,
>> +			    &pd->qos_save_regs[i][4]);
>> +	}
>> +	return 0;
>> +}
>> +
>> +static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
>> +{
>> +	int i;
>> +
>> +	for (i = 0; i < pd->num_qos; i++) {
>> +		regmap_write(pd->qos_regmap[i],
>> +			     QOS_PRIORITY,
>> +			     pd->qos_save_regs[i][0]);
>> +		regmap_write(pd->qos_regmap[i],
>> +			     QOS_MODE,
>> +			     pd->qos_save_regs[i][1]);
>> +		regmap_write(pd->qos_regmap[i],
>> +			     QOS_BANDWIDTH,
>> +			     pd->qos_save_regs[i][2]);
>> +		regmap_write(pd->qos_regmap[i],
>> +			     QOS_SATURATION,
>> +			     pd->qos_save_regs[i][3]);
>> +		regmap_write(pd->qos_regmap[i],
>> +			     QOS_EXTCONTROL,
>> +			     pd->qos_save_regs[i][4]);
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>>   static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
>>   {
>>   	struct rockchip_pmu *pmu = pd->pmu;
>> @@ -147,7 +207,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain
>> *pd, bool power_on) clk_enable(pd->clks[i]);
>>
>>   		if (!power_on) {
>> -			/* FIXME: add code to save AXI_QOS */
>> +			rockchip_pmu_save_qos(pd);
>>
>>   			/* if powering down, idle request to NIU first */
>>   			rockchip_pmu_set_idle_request(pd, true);
>> @@ -159,7 +219,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain
>> *pd, bool power_on) /* if powering up, leave idle mode */
>>   			rockchip_pmu_set_idle_request(pd, false);
>>
>> -			/* FIXME: add code to restore AXI_QOS */
>> +			rockchip_pmu_restore_qos(pd);
>>   		}
>>
>>   		for (i = pd->num_clks - 1; i >= 0; i--)
>> @@ -227,9 +287,10 @@ static int rockchip_pm_add_one_domain(struct
>> rockchip_pmu *pmu, {
>>   	const struct rockchip_domain_info *pd_info;
>>   	struct rockchip_pm_domain *pd;
>> +	struct device_node *qos_node;
>>   	struct clk *clk;
>>   	int clk_cnt;
>> -	int i;
>> +	int i, j;
>>   	u32 id;
>>   	int error;
>>
>> @@ -289,6 +350,26 @@ static int rockchip_pm_add_one_domain(struct
>> rockchip_pmu *pmu, clk, node->name);
>>   	}
>>
>> +	pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
>> +						 NULL);
>
> missing error handling here:
>
> if (pd->num_qos < 0) {
> 	error = pd->num_qos;
> 	goto err_out;
> }
>
> Right now, you always allocate MAX_QOS_NODE_NUM entries for regmaps and
> registers for each domain - a bit of a waste over all domains, so maybe
> like:
>
> pd->qos_regmap = kcalloc(pd->num_qos, sizeof(*pd->qos_regmap), GFP_KERNEL);
>
> pd->qos_save_regs = kcalloc, pd->num_qos * MAX_QOS_REGS_NUM, sizeof(u32),
> GFP_KERNEL);
>
> + of course error handling for both + cleanup in rockchip_remove_one_domain
>
>> +
>> +	for (j = 0; j < pd->num_qos; j++) {
>> +		qos_node = of_parse_phandle(node, "pm_qos", j);
>> +		if (!qos_node) {
>> +			error = -ENODEV;
>> +			goto err_out;
>> +		}
>> +		pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
>
> missing
> if (IS_ERR(pd->qos_regmap[j])) { ...}
>
>> +		of_node_put(qos_node);
>> +	}
>> +
>>   	error = rockchip_pd_power(pd, true);
>>   	if (error) {
>>   		dev_err(pmu->dev,
>
>
>
>
Heiko Stuebner April 1, 2016, 4:19 p.m. UTC | #3
Hi Elaine,

Am Freitag, 1. April 2016, 10:33:45 schrieb Elaine Zhang:
> I agree with most of your modifications.
> Except, the u32 *qos_save_regs below

you're right. I didn't take that into account when my open-coding my idea.
A bit more below:

> On 04/01/2016 12:31 AM, Heiko Stuebner wrote:
> > Hi Elaine,
> > 
> > Am Freitag, 18. März 2016, 15:17:24 schrieb Elaine Zhang:
> >> support qos save and restore when power domain on/off.
> >> 
> >> Signed-off-by: Elaine Zhang <zhangqing@rock-chips.com>
> > 
> > overall looks nice already ... some implementation-specific comments
> > below.> 
> >> ---
> >> 
> >>   drivers/soc/rockchip/pm_domains.c | 87
> >> 
> >> +++++++++++++++++++++++++++++++++++++-- 1 file changed, 84
> >> insertions(+),
> >> 3 deletions(-)
> >> 
> >> diff --git a/drivers/soc/rockchip/pm_domains.c
> >> b/drivers/soc/rockchip/pm_domains.c index 18aee6b..c5f4be6 100644
> >> --- a/drivers/soc/rockchip/pm_domains.c
> >> +++ b/drivers/soc/rockchip/pm_domains.c
> >> @@ -45,10 +45,21 @@ struct rockchip_pmu_info {
> >> 
> >>   	const struct rockchip_domain_info *domain_info;
> >>   
> >>   };
> >> 
> >> +#define MAX_QOS_NODE_NUM	20
> >> +#define MAX_QOS_REGS_NUM	5
> >> +#define QOS_PRIORITY		0x08
> >> +#define QOS_MODE		0x0c
> >> +#define QOS_BANDWIDTH		0x10
> >> +#define QOS_SATURATION		0x14
> >> +#define QOS_EXTCONTROL		0x18
> >> +
> >> 
> >>   struct rockchip_pm_domain {
> >>   
> >>   	struct generic_pm_domain genpd;
> >>   	const struct rockchip_domain_info *info;
> >>   	struct rockchip_pmu *pmu;
> >> 
> >> +	int num_qos;
> >> +	struct regmap *qos_regmap[MAX_QOS_NODE_NUM];
> >> +	u32 qos_save_regs[MAX_QOS_NODE_NUM][MAX_QOS_REGS_NUM];
> > 
> > struct regmap **qos_regmap;
> > u32 *qos_save_regs;
> 
> when we save and restore qos registers we need save five regs for every
> qos. like this :
> for (i = 0; i < pd->num_qos; i++) {
> 		regmap_read(pd->qos_regmap[i],
> 			    QOS_PRIORITY,
> 			    &pd->qos_save_regs[i][0]);
> 		regmap_read(pd->qos_regmap[i],
> 			    QOS_MODE,
> 			    &pd->qos_save_regs[i][1]);
> 		regmap_read(pd->qos_regmap[i],
> 			    QOS_BANDWIDTH,
> 			    &pd->qos_save_regs[i][2]);
> 		regmap_read(pd->qos_regmap[i],
> 			    QOS_SATURATION,
> 			    &pd->qos_save_regs[i][3]);
> 		regmap_read(pd->qos_regmap[i],
> 			    QOS_EXTCONTROL,
> 			    &pd->qos_save_regs[i][4]);
> 	}
> so we can not define qos_save_regs like u32 *qos_save_regs;,
> and apply buff like
> pd->qos_save_regs = kcalloc(pd->num_qos * MAX_QOS_REGS_NUM, sizeof(u32),
> GFP_KERNEL);

so how about simply swapping indices and doing it like

u32 *qos_save_regs[MAX_QOS_REGS_NUM];

for (i = 0; i < MAX_QOS_REGS_NUM; i++) {
	qos_save_regs[i] = kcalloc(pd->num_qos, sizeof(u32));
	/* error handling here */
}

...
		regmap_read(pd->qos_regmap[i],
			    QOS_SATURATION,
			    &pd->qos_save_regs[3][i]);
...


Asked the other way around, how did you measure to set MAX_QOS_REGS_NUM to 
20? From looking at the rk3399 TRM, it seems there are only 38 QoS 
generators on the SoC in general (24 on the rk3288 with PD_VIO having a 
maximum of 9 qos generators), so preparing for 20 seems a bit overkill ;-)


Heiko
zhangqing April 5, 2016, 1:57 a.m. UTC | #4
hi, Heiko:

Thanks for your replay.
For your questions, I also have the same concerns.

On 04/02/2016 12:19 AM, Heiko Stuebner wrote:
> Hi Elaine,
>
> Am Freitag, 1. April 2016, 10:33:45 schrieb Elaine Zhang:
>> I agree with most of your modifications.
>> Except, the u32 *qos_save_regs below
>
> you're right. I didn't take that into account when my open-coding my idea.
> A bit more below:
>
>> On 04/01/2016 12:31 AM, Heiko Stuebner wrote:
>>> Hi Elaine,
>>>
>>> Am Freitag, 18. März 2016, 15:17:24 schrieb Elaine Zhang:
>>>> support qos save and restore when power domain on/off.
>>>>
>>>> Signed-off-by: Elaine Zhang <zhangqing@rock-chips.com>
>>>
>>> overall looks nice already ... some implementation-specific comments
>>> below.>
>>>> ---
>>>>
>>>>    drivers/soc/rockchip/pm_domains.c | 87
>>>>
>>>> +++++++++++++++++++++++++++++++++++++-- 1 file changed, 84
>>>> insertions(+),
>>>> 3 deletions(-)
>>>>
>>>> diff --git a/drivers/soc/rockchip/pm_domains.c
>>>> b/drivers/soc/rockchip/pm_domains.c index 18aee6b..c5f4be6 100644
>>>> --- a/drivers/soc/rockchip/pm_domains.c
>>>> +++ b/drivers/soc/rockchip/pm_domains.c
>>>> @@ -45,10 +45,21 @@ struct rockchip_pmu_info {
>>>>
>>>>    	const struct rockchip_domain_info *domain_info;
>>>>
>>>>    };
>>>>
>>>> +#define MAX_QOS_NODE_NUM	20
>>>> +#define MAX_QOS_REGS_NUM	5
>>>> +#define QOS_PRIORITY		0x08
>>>> +#define QOS_MODE		0x0c
>>>> +#define QOS_BANDWIDTH		0x10
>>>> +#define QOS_SATURATION		0x14
>>>> +#define QOS_EXTCONTROL		0x18
>>>> +
>>>>
>>>>    struct rockchip_pm_domain {
>>>>
>>>>    	struct generic_pm_domain genpd;
>>>>    	const struct rockchip_domain_info *info;
>>>>    	struct rockchip_pmu *pmu;
>>>>
>>>> +	int num_qos;
>>>> +	struct regmap *qos_regmap[MAX_QOS_NODE_NUM];
>>>> +	u32 qos_save_regs[MAX_QOS_NODE_NUM][MAX_QOS_REGS_NUM];
>>>
>>> struct regmap **qos_regmap;
>>> u32 *qos_save_regs;
>>
>> when we save and restore qos registers we need save five regs for every
>> qos. like this :
>> for (i = 0; i < pd->num_qos; i++) {
>> 		regmap_read(pd->qos_regmap[i],
>> 			    QOS_PRIORITY,
>> 			    &pd->qos_save_regs[i][0]);
>> 		regmap_read(pd->qos_regmap[i],
>> 			    QOS_MODE,
>> 			    &pd->qos_save_regs[i][1]);
>> 		regmap_read(pd->qos_regmap[i],
>> 			    QOS_BANDWIDTH,
>> 			    &pd->qos_save_regs[i][2]);
>> 		regmap_read(pd->qos_regmap[i],
>> 			    QOS_SATURATION,
>> 			    &pd->qos_save_regs[i][3]);
>> 		regmap_read(pd->qos_regmap[i],
>> 			    QOS_EXTCONTROL,
>> 			    &pd->qos_save_regs[i][4]);
>> 	}
>> so we can not define qos_save_regs like u32 *qos_save_regs;,
>> and apply buff like
>> pd->qos_save_regs = kcalloc(pd->num_qos * MAX_QOS_REGS_NUM, sizeof(u32),
>> GFP_KERNEL);
>
> so how about simply swapping indices and doing it like
>
> u32 *qos_save_regs[MAX_QOS_REGS_NUM];
>
> for (i = 0; i < MAX_QOS_REGS_NUM; i++) {
> 	qos_save_regs[i] = kcalloc(pd->num_qos, sizeof(u32));
> 	/* error handling here */
> }
>
> ...
> 		regmap_read(pd->qos_regmap[i],
> 			    QOS_SATURATION,
> 			    &pd->qos_save_regs[3][i]);
> ...

I agree with you on this modification.

>
>
> Asked the other way around, how did you measure to set MAX_QOS_REGS_NUM to
> 20? From looking at the rk3399 TRM, it seems there are only 38 QoS
> generators on the SoC in general (24 on the rk3288 with PD_VIO having a
> maximum of 9 qos generators), so preparing for 20 seems a bit overkill ;-)
>
About the MAX_QOS_NODE_NUM I also have some uncertaibty.
Although there are only 38 QoS on the RK3399(24 on the rk3288),but not 
all of the pd need to power on/off.So not all QOS need save and restore.
So about the MAX_QOS_NODE_NUM, what do you suggest.

MAX_QOS_REGS_NUM is 5 because the QOS register is just 5 need save and 
restore.
like :
#define QOS_PRIORITY		0x08
#define QOS_MODE		0x0c
#define QOS_BANDWIDTH		0x10
#define QOS_SATURATION		0x14
#define QOS_EXTCONTROL		0x18
>
> Heiko
>
>
>
Heiko Stuebner April 5, 2016, 5:26 p.m. UTC | #5
Hi Elaine,

Am Dienstag, 5. April 2016, 09:57:20 schrieb Elaine Zhang:
> Thanks for your replay.
> For your questions, I also have the same concerns.
> 
> On 04/02/2016 12:19 AM, Heiko Stuebner wrote:
> > Am Freitag, 1. April 2016, 10:33:45 schrieb Elaine Zhang:
> >> I agree with most of your modifications.
> >> Except, the u32 *qos_save_regs below
> > 
> > you're right. I didn't take that into account when my open-coding my
> > idea.> 
> > A bit more below:
> >> On 04/01/2016 12:31 AM, Heiko Stuebner wrote:
> >>> Hi Elaine,
> >>> 
> >>> Am Freitag, 18. März 2016, 15:17:24 schrieb Elaine Zhang:
> >>>> support qos save and restore when power domain on/off.
> >>>> 
> >>>> Signed-off-by: Elaine Zhang <zhangqing@rock-chips.com>
> >>> 
> >>> overall looks nice already ... some implementation-specific comments
> >>> below.>
> >>> 
> >>>> ---
> >>>> 
> >>>>    drivers/soc/rockchip/pm_domains.c | 87
> >>>> 
> >>>> +++++++++++++++++++++++++++++++++++++-- 1 file changed, 84
> >>>> insertions(+),
> >>>> 3 deletions(-)
> >>>> 
> >>>> diff --git a/drivers/soc/rockchip/pm_domains.c
> >>>> b/drivers/soc/rockchip/pm_domains.c index 18aee6b..c5f4be6 100644
> >>>> --- a/drivers/soc/rockchip/pm_domains.c
> >>>> +++ b/drivers/soc/rockchip/pm_domains.c
> >>>> @@ -45,10 +45,21 @@ struct rockchip_pmu_info {
> >>>> 
> >>>>    	const struct rockchip_domain_info *domain_info;
> >>>>    
> >>>>    };
> >>>> 
> >>>> +#define MAX_QOS_NODE_NUM	20
> >>>> +#define MAX_QOS_REGS_NUM	5
> >>>> +#define QOS_PRIORITY		0x08
> >>>> +#define QOS_MODE		0x0c
> >>>> +#define QOS_BANDWIDTH		0x10
> >>>> +#define QOS_SATURATION		0x14
> >>>> +#define QOS_EXTCONTROL		0x18
> >>>> +
> >>>> 
> >>>>    struct rockchip_pm_domain {
> >>>>    
> >>>>    	struct generic_pm_domain genpd;
> >>>>    	const struct rockchip_domain_info *info;
> >>>>    	struct rockchip_pmu *pmu;
> >>>> 
> >>>> +	int num_qos;
> >>>> +	struct regmap *qos_regmap[MAX_QOS_NODE_NUM];
> >>>> +	u32 qos_save_regs[MAX_QOS_NODE_NUM][MAX_QOS_REGS_NUM];
> >>> 
> >>> struct regmap **qos_regmap;
> >>> u32 *qos_save_regs;
> >> 
> >> when we save and restore qos registers we need save five regs for every
> >> qos. like this :
> >> for (i = 0; i < pd->num_qos; i++) {
> >> 
> >> 		regmap_read(pd->qos_regmap[i],
> >> 		
> >> 			    QOS_PRIORITY,
> >> 			    &pd->qos_save_regs[i][0]);
> >> 		
> >> 		regmap_read(pd->qos_regmap[i],
> >> 		
> >> 			    QOS_MODE,
> >> 			    &pd->qos_save_regs[i][1]);
> >> 		
> >> 		regmap_read(pd->qos_regmap[i],
> >> 		
> >> 			    QOS_BANDWIDTH,
> >> 			    &pd->qos_save_regs[i][2]);
> >> 		
> >> 		regmap_read(pd->qos_regmap[i],
> >> 		
> >> 			    QOS_SATURATION,
> >> 			    &pd->qos_save_regs[i][3]);
> >> 		
> >> 		regmap_read(pd->qos_regmap[i],
> >> 		
> >> 			    QOS_EXTCONTROL,
> >> 			    &pd->qos_save_regs[i][4]);
> >> 	
> >> 	}
> >> 
> >> so we can not define qos_save_regs like u32 *qos_save_regs;,
> >> and apply buff like
> >> pd->qos_save_regs = kcalloc(pd->num_qos * MAX_QOS_REGS_NUM,
> >> sizeof(u32),
> >> GFP_KERNEL);
> > 
> > so how about simply swapping indices and doing it like
> > 
> > u32 *qos_save_regs[MAX_QOS_REGS_NUM];
> > 
> > for (i = 0; i < MAX_QOS_REGS_NUM; i++) {
> > 
> > 	qos_save_regs[i] = kcalloc(pd->num_qos, sizeof(u32));
> > 	/* error handling here */
> > 
> > }
> > 
> > ...
> > 
> > 		regmap_read(pd->qos_regmap[i],
> > 		
> > 			    QOS_SATURATION,
> > 			    &pd->qos_save_regs[3][i]);
> > 
> > ...
> 
> I agree with you on this modification.
> 
> > Asked the other way around, how did you measure to set MAX_QOS_REGS_NUM
> > to 20? From looking at the rk3399 TRM, it seems there are only 38 QoS
> > generators on the SoC in general (24 on the rk3288 with PD_VIO having a
> > maximum of 9 qos generators), so preparing for 20 seems a bit overkill
> > ;-)
> About the MAX_QOS_NODE_NUM I also have some uncertaibty.
> Although there are only 38 QoS on the RK3399(24 on the rk3288),but not
> all of the pd need to power on/off.So not all QOS need save and restore.
> So about the MAX_QOS_NODE_NUM, what do you suggest.

if we go the way outlined above, we don't need MAX_QOS_NODE_NUM, as the 
driver will be counting the real number of qos nodes and allocate needed 
structs dynamically - problem solved :-D


> MAX_QOS_REGS_NUM is 5 because the QOS register is just 5 need save and
> restore.

yep, that is no problem and expected.


Heiko
diff mbox

Patch

diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 18aee6b..c5f4be6 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -45,10 +45,21 @@  struct rockchip_pmu_info {
 	const struct rockchip_domain_info *domain_info;
 };
 
+#define MAX_QOS_NODE_NUM	20
+#define MAX_QOS_REGS_NUM	5
+#define QOS_PRIORITY		0x08
+#define QOS_MODE		0x0c
+#define QOS_BANDWIDTH		0x10
+#define QOS_SATURATION		0x14
+#define QOS_EXTCONTROL		0x18
+
 struct rockchip_pm_domain {
 	struct generic_pm_domain genpd;
 	const struct rockchip_domain_info *info;
 	struct rockchip_pmu *pmu;
+	int num_qos;
+	struct regmap *qos_regmap[MAX_QOS_NODE_NUM];
+	u32 qos_save_regs[MAX_QOS_NODE_NUM][MAX_QOS_REGS_NUM];
 	int num_clks;
 	struct clk *clks[];
 };
@@ -111,6 +122,55 @@  static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
 	return 0;
 }
 
+static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
+{
+	int i;
+
+	for (i = 0; i < pd->num_qos; i++) {
+		regmap_read(pd->qos_regmap[i],
+			    QOS_PRIORITY,
+			    &pd->qos_save_regs[i][0]);
+		regmap_read(pd->qos_regmap[i],
+			    QOS_MODE,
+			    &pd->qos_save_regs[i][1]);
+		regmap_read(pd->qos_regmap[i],
+			    QOS_BANDWIDTH,
+			    &pd->qos_save_regs[i][2]);
+		regmap_read(pd->qos_regmap[i],
+			    QOS_SATURATION,
+			    &pd->qos_save_regs[i][3]);
+		regmap_read(pd->qos_regmap[i],
+			    QOS_EXTCONTROL,
+			    &pd->qos_save_regs[i][4]);
+	}
+	return 0;
+}
+
+static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
+{
+	int i;
+
+	for (i = 0; i < pd->num_qos; i++) {
+		regmap_write(pd->qos_regmap[i],
+			     QOS_PRIORITY,
+			     pd->qos_save_regs[i][0]);
+		regmap_write(pd->qos_regmap[i],
+			     QOS_MODE,
+			     pd->qos_save_regs[i][1]);
+		regmap_write(pd->qos_regmap[i],
+			     QOS_BANDWIDTH,
+			     pd->qos_save_regs[i][2]);
+		regmap_write(pd->qos_regmap[i],
+			     QOS_SATURATION,
+			     pd->qos_save_regs[i][3]);
+		regmap_write(pd->qos_regmap[i],
+			     QOS_EXTCONTROL,
+			     pd->qos_save_regs[i][4]);
+	}
+
+	return 0;
+}
+
 static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
 {
 	struct rockchip_pmu *pmu = pd->pmu;
@@ -147,7 +207,7 @@  static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
 			clk_enable(pd->clks[i]);
 
 		if (!power_on) {
-			/* FIXME: add code to save AXI_QOS */
+			rockchip_pmu_save_qos(pd);
 
 			/* if powering down, idle request to NIU first */
 			rockchip_pmu_set_idle_request(pd, true);
@@ -159,7 +219,7 @@  static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
 			/* if powering up, leave idle mode */
 			rockchip_pmu_set_idle_request(pd, false);
 
-			/* FIXME: add code to restore AXI_QOS */
+			rockchip_pmu_restore_qos(pd);
 		}
 
 		for (i = pd->num_clks - 1; i >= 0; i--)
@@ -227,9 +287,10 @@  static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
 {
 	const struct rockchip_domain_info *pd_info;
 	struct rockchip_pm_domain *pd;
+	struct device_node *qos_node;
 	struct clk *clk;
 	int clk_cnt;
-	int i;
+	int i, j;
 	u32 id;
 	int error;
 
@@ -289,6 +350,26 @@  static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
 			clk, node->name);
 	}
 
+	pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
+						 NULL);
+	if (pd->num_qos > MAX_QOS_NODE_NUM) {
+		dev_err(pmu->dev,
+			"the qos node num is overflow qos_num = %d\n",
+			pd->num_qos);
+		error = -EINVAL;
+		goto err_out;
+	}
+
+	for (j = 0; j < pd->num_qos; j++) {
+		qos_node = of_parse_phandle(node, "pm_qos", j);
+		if (!qos_node) {
+			error = -ENODEV;
+			goto err_out;
+		}
+		pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
+		of_node_put(qos_node);
+	}
+
 	error = rockchip_pd_power(pd, true);
 	if (error) {
 		dev_err(pmu->dev,