diff mbox series

[v2,12/16] cpufreq/amd-pstate: Always write EPP value when updating perf

Message ID 20241208063031.3113-13-mario.limonciello@amd.com (mailing list archive)
State Superseded, archived
Headers show
Series amd-pstate fixes and improvements for 6.14 | expand

Commit Message

Mario Limonciello Dec. 8, 2024, 6:30 a.m. UTC
For MSR systems the EPP value is in the same register as perf targets
and so divding them into two separate MSR writes is wasteful.

In msr_update_perf(), update both EPP and perf values in one write to
MSR_AMD_CPPC_REQ, and cache them if successful.

To accomplish this plumb the EPP value into the update_perf call and modify
all its callers to check the return value.

Reviewed-and-tested-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
 drivers/cpufreq/amd-pstate.c | 71 ++++++++++++++++++++++--------------
 1 file changed, 43 insertions(+), 28 deletions(-)

Comments

Gautham R. Shenoy Dec. 9, 2024, 8:42 a.m. UTC | #1
Hello Mario,

On Sun, Dec 08, 2024 at 12:30:27AM -0600, Mario Limonciello wrote:
> For MSR systems the EPP value is in the same register as perf targets
> and so divding them into two separate MSR writes is wasteful.
> 
> In msr_update_perf(), update both EPP and perf values in one write to
> MSR_AMD_CPPC_REQ, and cache them if successful.
> 
> To accomplish this plumb the EPP value into the update_perf call and modify
> all its callers to check the return value.
> 
> Reviewed-and-tested-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> ---
>  drivers/cpufreq/amd-pstate.c | 71 ++++++++++++++++++++++--------------
>  1 file changed, 43 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index d21acd961edcd..dd11ba6c00cc3 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -222,25 +222,36 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata)
>  }
>  
>  static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
> -			       u32 des_perf, u32 max_perf, bool fast_switch)
> +			   u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
>  {
> +	u64 value;
> +
> +	value = READ_ONCE(cpudata->cppc_req_cached);


There seems to be a mismatch here between what the API is passing and
parameters and how this function is *not* using them, and instead
using cpudata->cppc_req_cached.

The expectation seems to be that the max_perf, min_perf, des_perf and
epp fields in cpudata->cppc_req_cached would be the same as @des_perf,
@max_perf, @min_perf and @ep, no ?

Or is it that for the MSR update, the value in
cpudata->cppc_req_cached take precedence over the arguments passed ?

Ideally, the "value" should be recomputed here using (@min_perf |
@max_perf | @des_perf | @epp) and that value should be cached as you
are doing below.


>  	if (fast_switch) {
>  		wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
>  		return 0;
> +	} else {
> +		int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
> +					READ_ONCE(cpudata->cppc_req_cached));
> +		if (ret)
> +			return ret;
>  	}
>  
> -	return wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
> -			     READ_ONCE(cpudata->cppc_req_cached));
> +	WRITE_ONCE(cpudata->cppc_req_cached, value);

Since cppc_req_cached is not changed, why write it again ?

> +	WRITE_ONCE(cpudata->epp_cached, epp);
> +
> +	return 0;
>  }
>  
>  DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
>  
>  static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
>  					  u32 min_perf, u32 des_perf,
> -					  u32 max_perf, bool fast_switch)
> +					  u32 max_perf, u32 epp,
> +					  bool fast_switch)
>  {
>  	return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
> -						   max_perf, fast_switch);
> +						   max_perf, epp, fast_switch);
>  }
>  
>  static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
> @@ -459,12 +470,19 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
>  	return static_call(amd_pstate_init_perf)(cpudata);
>  }
>  
> -static int shmem_update_perf(struct amd_cpudata *cpudata,
> -			     u32 min_perf, u32 des_perf,
> -			     u32 max_perf, bool fast_switch)
> +static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
> +			     u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
>  {
>  	struct cppc_perf_ctrls perf_ctrls;
>  
> +	if (cppc_state == AMD_PSTATE_ACTIVE) {
> +		int ret = shmem_set_epp(cpudata, epp);
> +
> +		if (ret)
> +			return ret;
> +		WRITE_ONCE(cpudata->epp_cached, epp);
> +	}
> +
>  	perf_ctrls.max_perf = max_perf;
>  	perf_ctrls.min_perf = min_perf;
>  	perf_ctrls.desired_perf = des_perf;
> @@ -545,10 +563,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
>  
>  	WRITE_ONCE(cpudata->cppc_req_cached, value);
>  
> -	amd_pstate_update_perf(cpudata, min_perf, des_perf,
> -			       max_perf, fast_switch);
> +	amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
>  
>  cpufreq_policy_put:
> +
>  	cpufreq_cpu_put(policy);
>  }
>  
> @@ -1545,6 +1563,7 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
>  {
>  	struct amd_cpudata *cpudata = policy->driver_data;
>  	u64 value;
> +	u32 epp;
>  
>  	amd_pstate_update_min_max_limit(policy);
>  
> @@ -1557,23 +1576,19 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
>  	value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, cpudata->min_limit_perf);
>  
>  	if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
> -		WRITE_ONCE(cpudata->epp_cached, 0);
> -	value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, cpudata->epp_cached);
> -
> -	WRITE_ONCE(cpudata->cppc_req_cached, value);
> +		epp = 0;
> +	else
> +		epp = READ_ONCE(cpudata->epp_cached);
>  
>  	if (trace_amd_pstate_epp_perf_enabled()) {
> -		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
> -					  cpudata->epp_cached,
> +		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
>  					  cpudata->min_limit_perf,
>  					  cpudata->max_limit_perf,
>  					  policy->boost_enabled);
>  	}
>  
> -	amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
> -			       cpudata->max_limit_perf, false);
> -
> -	return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
> +	return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
> +				      cpudata->max_limit_perf, epp, false);
>  }
>  
>  static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
> @@ -1602,7 +1617,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
>  	return 0;
>  }
>  
> -static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
> +static int amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
>  {
>  	u64 max_perf;
>  	int ret;
> @@ -1620,17 +1635,19 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
>  					  max_perf, cpudata->boost_state);
>  	}
>  
> -	amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
> -	amd_pstate_set_epp(cpudata, cpudata->epp_cached);
> +	return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
                                               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
					       
On an MSR based system, none of the values passed here will be used,
and instead the value in cpudata->cppc_req_cached will be used, no?

>  }
>  
>  static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
>  {
>  	struct amd_cpudata *cpudata = policy->driver_data;
> +	int ret;
>  
>  	pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
>  
> -	amd_pstate_epp_reenable(cpudata);
> +	ret = amd_pstate_epp_reenable(cpudata);
> +	if (ret)
> +		return ret;
>  	cpudata->suspended = false;
>  
>  	return 0;
> @@ -1654,10 +1671,8 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
>  					  min_perf, min_perf, policy->boost_enabled);
>  	}
>  
> -	amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
> -	amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
> -
> -	return 0;
> +	return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
> +				      AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
>  }
>  
>  static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
> -- 
> 2.43.0
> 

--
Thanks and Regards
gautham.
Mario Limonciello Dec. 9, 2024, 4:49 p.m. UTC | #2
On 12/9/2024 02:42, Gautham R. Shenoy wrote:
> Hello Mario,
> 
> On Sun, Dec 08, 2024 at 12:30:27AM -0600, Mario Limonciello wrote:
>> For MSR systems the EPP value is in the same register as perf targets
>> and so divding them into two separate MSR writes is wasteful.
>>
>> In msr_update_perf(), update both EPP and perf values in one write to
>> MSR_AMD_CPPC_REQ, and cache them if successful.
>>
>> To accomplish this plumb the EPP value into the update_perf call and modify
>> all its callers to check the return value.
>>
>> Reviewed-and-tested-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
>> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
>> ---
>>   drivers/cpufreq/amd-pstate.c | 71 ++++++++++++++++++++++--------------
>>   1 file changed, 43 insertions(+), 28 deletions(-)
>>
>> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
>> index d21acd961edcd..dd11ba6c00cc3 100644
>> --- a/drivers/cpufreq/amd-pstate.c
>> +++ b/drivers/cpufreq/amd-pstate.c
>> @@ -222,25 +222,36 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata)
>>   }
>>   
>>   static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
>> -			       u32 des_perf, u32 max_perf, bool fast_switch)
>> +			   u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
>>   {
>> +	u64 value;
>> +
>> +	value = READ_ONCE(cpudata->cppc_req_cached);
> 
> 
> There seems to be a mismatch here between what the API is passing and
> parameters and how this function is *not* using them, and instead
> using cpudata->cppc_req_cached.
> 
> The expectation seems to be that the max_perf, min_perf, des_perf and
> epp fields in cpudata->cppc_req_cached would be the same as @des_perf,
> @max_perf, @min_perf and @ep, no ?
> 
> Or is it that for the MSR update, the value in
> cpudata->cppc_req_cached take precedence over the arguments passed ?
> 
> Ideally, the "value" should be recomputed here using (@min_perf |
> @max_perf | @des_perf | @epp) and that value should be cached as you
> are doing below.
> 

Yeah - that's what the next patch does (which I think you probably saw 
after you reviewed it).

Do you think maybe I should just squash the two?  Or would you be 
happier if I re-ordered the two?

> 
>>   	if (fast_switch) {
>>   		wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
>>   		return 0;
>> +	} else {
>> +		int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
>> +					READ_ONCE(cpudata->cppc_req_cached));
>> +		if (ret)
>> +			return ret;
>>   	}
>>   
>> -	return wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
>> -			     READ_ONCE(cpudata->cppc_req_cached));
>> +	WRITE_ONCE(cpudata->cppc_req_cached, value);
> 
> Since cppc_req_cached is not changed, why write it again ?

Because of the next patch.  It will look at cpudata->cppc_req_cached and 
determine if anything changed in it - including EPP.


> 
>> +	WRITE_ONCE(cpudata->epp_cached, epp);
>> +
>> +	return 0;
>>   }
>>   
>>   DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
>>   
>>   static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
>>   					  u32 min_perf, u32 des_perf,
>> -					  u32 max_perf, bool fast_switch)
>> +					  u32 max_perf, u32 epp,
>> +					  bool fast_switch)
>>   {
>>   	return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
>> -						   max_perf, fast_switch);
>> +						   max_perf, epp, fast_switch);
>>   }
>>   
>>   static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
>> @@ -459,12 +470,19 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
>>   	return static_call(amd_pstate_init_perf)(cpudata);
>>   }
>>   
>> -static int shmem_update_perf(struct amd_cpudata *cpudata,
>> -			     u32 min_perf, u32 des_perf,
>> -			     u32 max_perf, bool fast_switch)
>> +static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
>> +			     u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
>>   {
>>   	struct cppc_perf_ctrls perf_ctrls;
>>   
>> +	if (cppc_state == AMD_PSTATE_ACTIVE) {
>> +		int ret = shmem_set_epp(cpudata, epp);
>> +
>> +		if (ret)
>> +			return ret;
>> +		WRITE_ONCE(cpudata->epp_cached, epp);
>> +	}
>> +
>>   	perf_ctrls.max_perf = max_perf;
>>   	perf_ctrls.min_perf = min_perf;
>>   	perf_ctrls.desired_perf = des_perf;
>> @@ -545,10 +563,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
>>   
>>   	WRITE_ONCE(cpudata->cppc_req_cached, value);
>>   
>> -	amd_pstate_update_perf(cpudata, min_perf, des_perf,
>> -			       max_perf, fast_switch);
>> +	amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
>>   
>>   cpufreq_policy_put:
>> +
>>   	cpufreq_cpu_put(policy);
>>   }
>>   
>> @@ -1545,6 +1563,7 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
>>   {
>>   	struct amd_cpudata *cpudata = policy->driver_data;
>>   	u64 value;
>> +	u32 epp;
>>   
>>   	amd_pstate_update_min_max_limit(policy);
>>   
>> @@ -1557,23 +1576,19 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
>>   	value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, cpudata->min_limit_perf);
>>   
>>   	if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
>> -		WRITE_ONCE(cpudata->epp_cached, 0);
>> -	value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, cpudata->epp_cached);
>> -
>> -	WRITE_ONCE(cpudata->cppc_req_cached, value);
>> +		epp = 0;
>> +	else
>> +		epp = READ_ONCE(cpudata->epp_cached);
>>   
>>   	if (trace_amd_pstate_epp_perf_enabled()) {
>> -		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
>> -					  cpudata->epp_cached,
>> +		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
>>   					  cpudata->min_limit_perf,
>>   					  cpudata->max_limit_perf,
>>   					  policy->boost_enabled);
>>   	}
>>   
>> -	amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
>> -			       cpudata->max_limit_perf, false);
>> -
>> -	return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
>> +	return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
>> +				      cpudata->max_limit_perf, epp, false);
>>   }
>>   
>>   static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
>> @@ -1602,7 +1617,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
>>   	return 0;
>>   }
>>   
>> -static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
>> +static int amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
>>   {
>>   	u64 max_perf;
>>   	int ret;
>> @@ -1620,17 +1635,19 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
>>   					  max_perf, cpudata->boost_state);
>>   	}
>>   
>> -	amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
>> -	amd_pstate_set_epp(cpudata, cpudata->epp_cached);
>> +	return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
>                                                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> 					
> On an MSR based system, none of the values passed here will be used,
> and instead the value in cpudata->cppc_req_cached will be used, no?

Currently; yes.  After the next patch that changes.

> 
>>   }
>>   
>>   static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
>>   {
>>   	struct amd_cpudata *cpudata = policy->driver_data;
>> +	int ret;
>>   
>>   	pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
>>   
>> -	amd_pstate_epp_reenable(cpudata);
>> +	ret = amd_pstate_epp_reenable(cpudata);
>> +	if (ret)
>> +		return ret;
>>   	cpudata->suspended = false;
>>   
>>   	return 0;
>> @@ -1654,10 +1671,8 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
>>   					  min_perf, min_perf, policy->boost_enabled);
>>   	}
>>   
>> -	amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
>> -	amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
>> -
>> -	return 0;
>> +	return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
>> +				      AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
>>   }
>>   
>>   static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
>> -- 
>> 2.43.0
>>
> 
> --
> Thanks and Regards
> gautham.
Mario Limonciello Dec. 9, 2024, 5:15 p.m. UTC | #3
On 12/9/2024 10:49, Mario Limonciello wrote:
> On 12/9/2024 02:42, Gautham R. Shenoy wrote:
>> Hello Mario,
>>
>> On Sun, Dec 08, 2024 at 12:30:27AM -0600, Mario Limonciello wrote:
>>> For MSR systems the EPP value is in the same register as perf targets
>>> and so divding them into two separate MSR writes is wasteful.
>>>
>>> In msr_update_perf(), update both EPP and perf values in one write to
>>> MSR_AMD_CPPC_REQ, and cache them if successful.
>>>
>>> To accomplish this plumb the EPP value into the update_perf call and 
>>> modify
>>> all its callers to check the return value.
>>>
>>> Reviewed-and-tested-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
>>> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
>>> ---
>>>   drivers/cpufreq/amd-pstate.c | 71 ++++++++++++++++++++++--------------
>>>   1 file changed, 43 insertions(+), 28 deletions(-)
>>>
>>> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
>>> index d21acd961edcd..dd11ba6c00cc3 100644
>>> --- a/drivers/cpufreq/amd-pstate.c
>>> +++ b/drivers/cpufreq/amd-pstate.c
>>> @@ -222,25 +222,36 @@ static s16 shmem_get_epp(struct amd_cpudata 
>>> *cpudata)
>>>   }
>>>   static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
>>> -                   u32 des_perf, u32 max_perf, bool fast_switch)
>>> +               u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
>>>   {
>>> +    u64 value;
>>> +
>>> +    value = READ_ONCE(cpudata->cppc_req_cached);
>>
>>
>> There seems to be a mismatch here between what the API is passing and
>> parameters and how this function is *not* using them, and instead
>> using cpudata->cppc_req_cached.
>>
>> The expectation seems to be that the max_perf, min_perf, des_perf and
>> epp fields in cpudata->cppc_req_cached would be the same as @des_perf,
>> @max_perf, @min_perf and @ep, no ?
>>
>> Or is it that for the MSR update, the value in
>> cpudata->cppc_req_cached take precedence over the arguments passed ?
>>
>> Ideally, the "value" should be recomputed here using (@min_perf |
>> @max_perf | @des_perf | @epp) and that value should be cached as you
>> are doing below.
>>
> 
> Yeah - that's what the next patch does (which I think you probably saw 
> after you reviewed it).
> 
> Do you think maybe I should just squash the two?  Or would you be 
> happier if I re-ordered the two?

FYI - I looked into re-ordering and it's not feasible because you need 
EPP plumbed in order to validate the result.

So I'm going to squash the two patches, and I'll do another one that 
adjusts tracing locations for your other feedback.

> 
>>
>>>       if (fast_switch) {
>>>           wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
>>>           return 0;
>>> +    } else {
>>> +        int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
>>> +                    READ_ONCE(cpudata->cppc_req_cached));
>>> +        if (ret)
>>> +            return ret;
>>>       }
>>> -    return wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
>>> -                 READ_ONCE(cpudata->cppc_req_cached));
>>> +    WRITE_ONCE(cpudata->cppc_req_cached, value);
>>
>> Since cppc_req_cached is not changed, why write it again ?
> 
> Because of the next patch.  It will look at cpudata->cppc_req_cached and 
> determine if anything changed in it - including EPP.
> 
> 
>>
>>> +    WRITE_ONCE(cpudata->epp_cached, epp);
>>> +
>>> +    return 0;
>>>   }
>>>   DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
>>>   static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
>>>                         u32 min_perf, u32 des_perf,
>>> -                      u32 max_perf, bool fast_switch)
>>> +                      u32 max_perf, u32 epp,
>>> +                      bool fast_switch)
>>>   {
>>>       return static_call(amd_pstate_update_perf)(cpudata, min_perf, 
>>> des_perf,
>>> -                           max_perf, fast_switch);
>>> +                           max_perf, epp, fast_switch);
>>>   }
>>>   static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
>>> @@ -459,12 +470,19 @@ static inline int amd_pstate_init_perf(struct 
>>> amd_cpudata *cpudata)
>>>       return static_call(amd_pstate_init_perf)(cpudata);
>>>   }
>>> -static int shmem_update_perf(struct amd_cpudata *cpudata,
>>> -                 u32 min_perf, u32 des_perf,
>>> -                 u32 max_perf, bool fast_switch)
>>> +static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
>>> +                 u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
>>>   {
>>>       struct cppc_perf_ctrls perf_ctrls;
>>> +    if (cppc_state == AMD_PSTATE_ACTIVE) {
>>> +        int ret = shmem_set_epp(cpudata, epp);
>>> +
>>> +        if (ret)
>>> +            return ret;
>>> +        WRITE_ONCE(cpudata->epp_cached, epp);
>>> +    }
>>> +
>>>       perf_ctrls.max_perf = max_perf;
>>>       perf_ctrls.min_perf = min_perf;
>>>       perf_ctrls.desired_perf = des_perf;
>>> @@ -545,10 +563,10 @@ static void amd_pstate_update(struct 
>>> amd_cpudata *cpudata, u32 min_perf,
>>>       WRITE_ONCE(cpudata->cppc_req_cached, value);
>>> -    amd_pstate_update_perf(cpudata, min_perf, des_perf,
>>> -                   max_perf, fast_switch);
>>> +    amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, 
>>> fast_switch);
>>>   cpufreq_policy_put:
>>> +
>>>       cpufreq_cpu_put(policy);
>>>   }
>>> @@ -1545,6 +1563,7 @@ static int amd_pstate_epp_update_limit(struct 
>>> cpufreq_policy *policy)
>>>   {
>>>       struct amd_cpudata *cpudata = policy->driver_data;
>>>       u64 value;
>>> +    u32 epp;
>>>       amd_pstate_update_min_max_limit(policy);
>>> @@ -1557,23 +1576,19 @@ static int amd_pstate_epp_update_limit(struct 
>>> cpufreq_policy *policy)
>>>       value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, cpudata- 
>>> >min_limit_perf);
>>>       if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
>>> -        WRITE_ONCE(cpudata->epp_cached, 0);
>>> -    value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, cpudata->epp_cached);
>>> -
>>> -    WRITE_ONCE(cpudata->cppc_req_cached, value);
>>> +        epp = 0;
>>> +    else
>>> +        epp = READ_ONCE(cpudata->epp_cached);
>>>       if (trace_amd_pstate_epp_perf_enabled()) {
>>> -        trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
>>> -                      cpudata->epp_cached,
>>> +        trace_amd_pstate_epp_perf(cpudata->cpu, cpudata- 
>>> >highest_perf, epp,
>>>                         cpudata->min_limit_perf,
>>>                         cpudata->max_limit_perf,
>>>                         policy->boost_enabled);
>>>       }
>>> -    amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
>>> -                   cpudata->max_limit_perf, false);
>>> -
>>> -    return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
>>> +    return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
>>> +                      cpudata->max_limit_perf, epp, false);
>>>   }
>>>   static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
>>> @@ -1602,7 +1617,7 @@ static int amd_pstate_epp_set_policy(struct 
>>> cpufreq_policy *policy)
>>>       return 0;
>>>   }
>>> -static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
>>> +static int amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
>>>   {
>>>       u64 max_perf;
>>>       int ret;
>>> @@ -1620,17 +1635,19 @@ static void amd_pstate_epp_reenable(struct 
>>> amd_cpudata *cpudata)
>>>                         max_perf, cpudata->boost_state);
>>>       }
>>> -    amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
>>> -    amd_pstate_set_epp(cpudata, cpudata->epp_cached);
>>> +    return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata- 
>>> >epp_cached, false);
>>                                                 
>> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>
>> On an MSR based system, none of the values passed here will be used,
>> and instead the value in cpudata->cppc_req_cached will be used, no?
> 
> Currently; yes.  After the next patch that changes.
> 
>>
>>>   }
>>>   static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
>>>   {
>>>       struct amd_cpudata *cpudata = policy->driver_data;
>>> +    int ret;
>>>       pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
>>> -    amd_pstate_epp_reenable(cpudata);
>>> +    ret = amd_pstate_epp_reenable(cpudata);
>>> +    if (ret)
>>> +        return ret;
>>>       cpudata->suspended = false;
>>>       return 0;
>>> @@ -1654,10 +1671,8 @@ static int amd_pstate_epp_cpu_offline(struct 
>>> cpufreq_policy *policy)
>>>                         min_perf, min_perf, policy->boost_enabled);
>>>       }
>>> -    amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
>>> -    amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
>>> -
>>> -    return 0;
>>> +    return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
>>> +                      AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
>>>   }
>>>   static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
>>> -- 
>>> 2.43.0
>>>
>>
>> -- 
>> Thanks and Regards
>> gautham.
>
Gautham R. Shenoy Dec. 10, 2024, 11:10 a.m. UTC | #4
On Mon, Dec 09, 2024 at 11:15:49AM -0600, Mario Limonciello wrote:
> On 12/9/2024 10:49, Mario Limonciello wrote:
> > On 12/9/2024 02:42, Gautham R. Shenoy wrote:
> > > Hello Mario,
> > > 
> > > On Sun, Dec 08, 2024 at 12:30:27AM -0600, Mario Limonciello wrote:
> > > > For MSR systems the EPP value is in the same register as perf targets
> > > > and so divding them into two separate MSR writes is wasteful.
> > > > 
> > > > In msr_update_perf(), update both EPP and perf values in one write to
> > > > MSR_AMD_CPPC_REQ, and cache them if successful.
> > > > 
> > > > To accomplish this plumb the EPP value into the update_perf call
> > > > and modify
> > > > all its callers to check the return value.
> > > > 
> > > > Reviewed-and-tested-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
> > > > Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
> > > > ---
> > > >   drivers/cpufreq/amd-pstate.c | 71 ++++++++++++++++++++++--------------
> > > >   1 file changed, 43 insertions(+), 28 deletions(-)
> > > > 
> > > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> > > > index d21acd961edcd..dd11ba6c00cc3 100644
> > > > --- a/drivers/cpufreq/amd-pstate.c
> > > > +++ b/drivers/cpufreq/amd-pstate.c
> > > > @@ -222,25 +222,36 @@ static s16 shmem_get_epp(struct
> > > > amd_cpudata *cpudata)
> > > >   }
> > > >   static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
> > > > -                   u32 des_perf, u32 max_perf, bool fast_switch)
> > > > +               u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
> > > >   {
> > > > +    u64 value;
> > > > +
> > > > +    value = READ_ONCE(cpudata->cppc_req_cached);
> > > 
> > > 
> > > There seems to be a mismatch here between what the API is passing and
> > > parameters and how this function is *not* using them, and instead
> > > using cpudata->cppc_req_cached.
> > > 
> > > The expectation seems to be that the max_perf, min_perf, des_perf and
> > > epp fields in cpudata->cppc_req_cached would be the same as @des_perf,
> > > @max_perf, @min_perf and @ep, no ?
> > > 
> > > Or is it that for the MSR update, the value in
> > > cpudata->cppc_req_cached take precedence over the arguments passed ?
> > > 
> > > Ideally, the "value" should be recomputed here using (@min_perf |
> > > @max_perf | @des_perf | @epp) and that value should be cached as you
> > > are doing below.
> > > 
> > 
> > Yeah - that's what the next patch does (which I think you probably saw
> > after you reviewed it).
> > 
> > Do you think maybe I should just squash the two?  Or would you be
> > happier if I re-ordered the two?
> 
> FYI - I looked into re-ordering and it's not feasible because you need EPP
> plumbed in order to validate the result.
> 
> So I'm going to squash the two patches, and I'll do another one that adjusts
> tracing locations for your other feedback.

Yeah, it is not possible to reorder the two. I like the newer version
better.


--
Thanks and Regards
gautham.
diff mbox series

Patch

diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index d21acd961edcd..dd11ba6c00cc3 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -222,25 +222,36 @@  static s16 shmem_get_epp(struct amd_cpudata *cpudata)
 }
 
 static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
-			       u32 des_perf, u32 max_perf, bool fast_switch)
+			   u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
 {
+	u64 value;
+
+	value = READ_ONCE(cpudata->cppc_req_cached);
 	if (fast_switch) {
 		wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
 		return 0;
+	} else {
+		int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+					READ_ONCE(cpudata->cppc_req_cached));
+		if (ret)
+			return ret;
 	}
 
-	return wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
-			     READ_ONCE(cpudata->cppc_req_cached));
+	WRITE_ONCE(cpudata->cppc_req_cached, value);
+	WRITE_ONCE(cpudata->epp_cached, epp);
+
+	return 0;
 }
 
 DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
 
 static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
 					  u32 min_perf, u32 des_perf,
-					  u32 max_perf, bool fast_switch)
+					  u32 max_perf, u32 epp,
+					  bool fast_switch)
 {
 	return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
-						   max_perf, fast_switch);
+						   max_perf, epp, fast_switch);
 }
 
 static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
@@ -459,12 +470,19 @@  static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
 	return static_call(amd_pstate_init_perf)(cpudata);
 }
 
-static int shmem_update_perf(struct amd_cpudata *cpudata,
-			     u32 min_perf, u32 des_perf,
-			     u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+			     u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
 {
 	struct cppc_perf_ctrls perf_ctrls;
 
+	if (cppc_state == AMD_PSTATE_ACTIVE) {
+		int ret = shmem_set_epp(cpudata, epp);
+
+		if (ret)
+			return ret;
+		WRITE_ONCE(cpudata->epp_cached, epp);
+	}
+
 	perf_ctrls.max_perf = max_perf;
 	perf_ctrls.min_perf = min_perf;
 	perf_ctrls.desired_perf = des_perf;
@@ -545,10 +563,10 @@  static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
 
 	WRITE_ONCE(cpudata->cppc_req_cached, value);
 
-	amd_pstate_update_perf(cpudata, min_perf, des_perf,
-			       max_perf, fast_switch);
+	amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
 
 cpufreq_policy_put:
+
 	cpufreq_cpu_put(policy);
 }
 
@@ -1545,6 +1563,7 @@  static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
 	u64 value;
+	u32 epp;
 
 	amd_pstate_update_min_max_limit(policy);
 
@@ -1557,23 +1576,19 @@  static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 	value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, cpudata->min_limit_perf);
 
 	if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
-		WRITE_ONCE(cpudata->epp_cached, 0);
-	value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, cpudata->epp_cached);
-
-	WRITE_ONCE(cpudata->cppc_req_cached, value);
+		epp = 0;
+	else
+		epp = READ_ONCE(cpudata->epp_cached);
 
 	if (trace_amd_pstate_epp_perf_enabled()) {
-		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
-					  cpudata->epp_cached,
+		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
 					  cpudata->min_limit_perf,
 					  cpudata->max_limit_perf,
 					  policy->boost_enabled);
 	}
 
-	amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
-			       cpudata->max_limit_perf, false);
-
-	return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
+	return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+				      cpudata->max_limit_perf, epp, false);
 }
 
 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1602,7 +1617,7 @@  static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
 	return 0;
 }
 
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+static int amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
 {
 	u64 max_perf;
 	int ret;
@@ -1620,17 +1635,19 @@  static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
 					  max_perf, cpudata->boost_state);
 	}
 
-	amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
-	amd_pstate_set_epp(cpudata, cpudata->epp_cached);
+	return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
 }
 
 static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
+	int ret;
 
 	pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
 
-	amd_pstate_epp_reenable(cpudata);
+	ret = amd_pstate_epp_reenable(cpudata);
+	if (ret)
+		return ret;
 	cpudata->suspended = false;
 
 	return 0;
@@ -1654,10 +1671,8 @@  static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
 					  min_perf, min_perf, policy->boost_enabled);
 	}
 
-	amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
-	amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
-
-	return 0;
+	return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
+				      AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
 }
 
 static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)