diff mbox series

[06/12] cpufreq/amd-pstate: Convert all perf values to u8

Message ID 20250205112523.201101-7-dhananjay.ugwekar@amd.com (mailing list archive)
State New
Delegated to: Mario Limonciello
Headers show
Series cpufreq/amd-pstate: Fixes and optimizations | expand

Commit Message

Dhananjay Ugwekar Feb. 5, 2025, 11:25 a.m. UTC
All perf values are always within 0-255 range, hence convert their
datatype to u8 everywhere.

Signed-off-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>
---
 drivers/cpufreq/amd-pstate-trace.h | 46 +++++++++++------------
 drivers/cpufreq/amd-pstate.c       | 60 +++++++++++++++---------------
 drivers/cpufreq/amd-pstate.h       | 18 ++++-----
 3 files changed, 62 insertions(+), 62 deletions(-)

Comments

Mario Limonciello Feb. 5, 2025, 5:46 p.m. UTC | #1
On 2/5/2025 05:25, Dhananjay Ugwekar wrote:
> All perf values are always within 0-255 range, hence convert their
> datatype to u8 everywhere.
> 
> Signed-off-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com>

Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>

I'll queue this for 6.15.

> ---
>   drivers/cpufreq/amd-pstate-trace.h | 46 +++++++++++------------
>   drivers/cpufreq/amd-pstate.c       | 60 +++++++++++++++---------------
>   drivers/cpufreq/amd-pstate.h       | 18 ++++-----
>   3 files changed, 62 insertions(+), 62 deletions(-)
> 
> diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
> index 8d692415d905..f457d4af2c62 100644
> --- a/drivers/cpufreq/amd-pstate-trace.h
> +++ b/drivers/cpufreq/amd-pstate-trace.h
> @@ -24,9 +24,9 @@
>   
>   TRACE_EVENT(amd_pstate_perf,
>   
> -	TP_PROTO(unsigned long min_perf,
> -		 unsigned long target_perf,
> -		 unsigned long capacity,
> +	TP_PROTO(u8 min_perf,
> +		 u8 target_perf,
> +		 u8 capacity,
>   		 u64 freq,
>   		 u64 mperf,
>   		 u64 aperf,
> @@ -47,9 +47,9 @@ TRACE_EVENT(amd_pstate_perf,
>   		),
>   
>   	TP_STRUCT__entry(
> -		__field(unsigned long, min_perf)
> -		__field(unsigned long, target_perf)
> -		__field(unsigned long, capacity)
> +		__field(u8, min_perf)
> +		__field(u8, target_perf)
> +		__field(u8, capacity)
>   		__field(unsigned long long, freq)
>   		__field(unsigned long long, mperf)
>   		__field(unsigned long long, aperf)
> @@ -70,10 +70,10 @@ TRACE_EVENT(amd_pstate_perf,
>   		__entry->fast_switch = fast_switch;
>   		),
>   
> -	TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
> -		  (unsigned long)__entry->min_perf,
> -		  (unsigned long)__entry->target_perf,
> -		  (unsigned long)__entry->capacity,
> +	TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
> +		  (u8)__entry->min_perf,
> +		  (u8)__entry->target_perf,
> +		  (u8)__entry->capacity,
>   		  (unsigned long long)__entry->freq,
>   		  (unsigned long long)__entry->mperf,
>   		  (unsigned long long)__entry->aperf,
> @@ -86,10 +86,10 @@ TRACE_EVENT(amd_pstate_perf,
>   TRACE_EVENT(amd_pstate_epp_perf,
>   
>   	TP_PROTO(unsigned int cpu_id,
> -		 unsigned int highest_perf,
> -		 unsigned int epp,
> -		 unsigned int min_perf,
> -		 unsigned int max_perf,
> +		 u8 highest_perf,
> +		 u8 epp,
> +		 u8 min_perf,
> +		 u8 max_perf,
>   		 bool boost
>   		 ),
>   
> @@ -102,10 +102,10 @@ TRACE_EVENT(amd_pstate_epp_perf,
>   
>   	TP_STRUCT__entry(
>   		__field(unsigned int, cpu_id)
> -		__field(unsigned int, highest_perf)
> -		__field(unsigned int, epp)
> -		__field(unsigned int, min_perf)
> -		__field(unsigned int, max_perf)
> +		__field(u8, highest_perf)
> +		__field(u8, epp)
> +		__field(u8, min_perf)
> +		__field(u8, max_perf)
>   		__field(bool, boost)
>   		),
>   
> @@ -118,12 +118,12 @@ TRACE_EVENT(amd_pstate_epp_perf,
>   		__entry->boost = boost;
>   		),
>   
> -	TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
> +	TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u",
>   		  (unsigned int)__entry->cpu_id,
> -		  (unsigned int)__entry->min_perf,
> -		  (unsigned int)__entry->max_perf,
> -		  (unsigned int)__entry->highest_perf,
> -		  (unsigned int)__entry->epp,
> +		  (u8)__entry->min_perf,
> +		  (u8)__entry->max_perf,
> +		  (u8)__entry->highest_perf,
> +		  (u8)__entry->epp,
>   		  (bool)__entry->boost
>   		 )
>   );
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index e179e929b941..dd4f23fa2587 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -186,7 +186,7 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
>   static DEFINE_MUTEX(amd_pstate_limits_lock);
>   static DEFINE_MUTEX(amd_pstate_driver_lock);
>   
> -static s16 msr_get_epp(struct amd_cpudata *cpudata)
> +static u8 msr_get_epp(struct amd_cpudata *cpudata)
>   {
>   	u64 value;
>   	int ret;
> @@ -207,7 +207,7 @@ static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
>   	return static_call(amd_pstate_get_epp)(cpudata);
>   }
>   
> -static s16 shmem_get_epp(struct amd_cpudata *cpudata)
> +static u8 shmem_get_epp(struct amd_cpudata *cpudata)
>   {
>   	u64 epp;
>   	int ret;
> @@ -218,11 +218,11 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata)
>   		return ret;
>   	}
>   
> -	return (s16)(epp & 0xff);
> +	return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
>   }
>   
> -static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
> -			   u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
> +static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
> +			   u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
>   {
>   	u64 value, prev;
>   
> @@ -257,15 +257,15 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
>   DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
>   
>   static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
> -					  u32 min_perf, u32 des_perf,
> -					  u32 max_perf, u32 epp,
> +					  u8 min_perf, u8 des_perf,
> +					  u8 max_perf, u8 epp,
>   					  bool fast_switch)
>   {
>   	return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
>   						   max_perf, epp, fast_switch);
>   }
>   
> -static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
> +static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
>   {
>   	u64 value, prev;
>   	int ret;
> @@ -292,12 +292,12 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
>   
>   DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
>   
> -static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
> +static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp)
>   {
>   	return static_call(amd_pstate_set_epp)(cpudata, epp);
>   }
>   
> -static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
> +static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp)
>   {
>   	int ret;
>   	struct cppc_perf_ctrls perf_ctrls;
> @@ -320,7 +320,7 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
>   					    int pref_index)
>   {
>   	struct amd_cpudata *cpudata = policy->driver_data;
> -	int epp;
> +	u8 epp;
>   
>   	if (!pref_index)
>   		epp = cpudata->epp_default;
> @@ -479,8 +479,8 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
>   	return static_call(amd_pstate_init_perf)(cpudata);
>   }
>   
> -static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
> -			     u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
> +static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
> +			     u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
>   {
>   	struct cppc_perf_ctrls perf_ctrls;
>   
> @@ -531,14 +531,14 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
>   	return true;
>   }
>   
> -static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
> -			      u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
> +static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
> +			      u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags)
>   {
>   	unsigned long max_freq;
>   	struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
> -	u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
> +	u8 nominal_perf = READ_ONCE(cpudata->nominal_perf);
>   
> -	des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
> +	des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
>   
>   	max_freq = READ_ONCE(cpudata->max_limit_freq);
>   	policy->cur = div_u64(des_perf * max_freq, max_perf);
> @@ -550,7 +550,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
>   
>   	/* limit the max perf when core performance boost feature is disabled */
>   	if (!cpudata->boost_supported)
> -		max_perf = min_t(unsigned long, nominal_perf, max_perf);
> +		max_perf = min_t(u8, nominal_perf, max_perf);
>   
>   	if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
>   		trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
> @@ -591,7 +591,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
>   
>   static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
>   {
> -	u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
> +	u8 max_limit_perf, min_limit_perf, max_perf;
> +	u32 max_freq;
>   	struct amd_cpudata *cpudata = policy->driver_data;
>   
>   	max_perf = READ_ONCE(cpudata->highest_perf);
> @@ -615,7 +616,7 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
>   {
>   	struct cpufreq_freqs freqs;
>   	struct amd_cpudata *cpudata = policy->driver_data;
> -	unsigned long des_perf, cap_perf;
> +	u8 des_perf, cap_perf;
>   
>   	if (!cpudata->max_freq)
>   		return -ENODEV;
> @@ -670,8 +671,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
>   				   unsigned long target_perf,
>   				   unsigned long capacity)
>   {
> -	unsigned long max_perf, min_perf, des_perf,
> -		      cap_perf, min_limit_perf;
> +	u8 max_perf, min_perf, des_perf, cap_perf, min_limit_perf;
>   	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
>   	struct amd_cpudata *cpudata;
>   
> @@ -904,8 +904,8 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
>   {
>   	int ret;
>   	u32 min_freq, max_freq;
> -	u32 highest_perf, nominal_perf, nominal_freq;
> -	u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
> +	u8 highest_perf, nominal_perf, lowest_nonlinear_perf;
> +	u32 nominal_freq, lowest_nonlinear_freq;
>   	struct cppc_perf_caps cppc_perf;
>   
>   	ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
> @@ -1112,7 +1112,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
>   static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
>   					    char *buf)
>   {
> -	u32 perf;
> +	u8 perf;
>   	struct amd_cpudata *cpudata = policy->driver_data;
>   
>   	perf = READ_ONCE(cpudata->highest_perf);
> @@ -1123,7 +1123,7 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
>   static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
>   						char *buf)
>   {
> -	u32 perf;
> +	u8 perf;
>   	struct amd_cpudata *cpudata = policy->driver_data;
>   
>   	perf = READ_ONCE(cpudata->prefcore_ranking);
> @@ -1186,7 +1186,7 @@ static ssize_t show_energy_performance_preference(
>   				struct cpufreq_policy *policy, char *buf)
>   {
>   	struct amd_cpudata *cpudata = policy->driver_data;
> -	int preference;
> +	u8 preference;
>   
>   	switch (cpudata->epp_cached) {
>   	case AMD_CPPC_EPP_PERFORMANCE:
> @@ -1548,7 +1548,7 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
>   static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
>   {
>   	struct amd_cpudata *cpudata = policy->driver_data;
> -	u32 epp;
> +	u8 epp;
>   
>   	amd_pstate_update_min_max_limit(policy);
>   
> @@ -1597,7 +1597,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
>   static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
>   {
>   	struct amd_cpudata *cpudata = policy->driver_data;
> -	u64 max_perf;
> +	u8 max_perf;
>   	int ret;
>   
>   	ret = amd_pstate_cppc_enable(true);
> @@ -1634,7 +1634,7 @@ static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
>   static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
>   {
>   	struct amd_cpudata *cpudata = policy->driver_data;
> -	int min_perf;
> +	u8 min_perf;
>   
>   	if (cpudata->suspended)
>   		return 0;
> diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
> index 9747e3be6cee..19d405c6d805 100644
> --- a/drivers/cpufreq/amd-pstate.h
> +++ b/drivers/cpufreq/amd-pstate.h
> @@ -70,13 +70,13 @@ struct amd_cpudata {
>   	struct	freq_qos_request req[2];
>   	u64	cppc_req_cached;
>   
> -	u32	highest_perf;
> -	u32	nominal_perf;
> -	u32	lowest_nonlinear_perf;
> -	u32	lowest_perf;
> -	u32     prefcore_ranking;
> -	u32     min_limit_perf;
> -	u32     max_limit_perf;
> +	u8	highest_perf;
> +	u8	nominal_perf;
> +	u8	lowest_nonlinear_perf;
> +	u8	lowest_perf;
> +	u8	prefcore_ranking;
> +	u8	min_limit_perf;
> +	u8	max_limit_perf;
>   	u32     min_limit_freq;
>   	u32     max_limit_freq;
>   
> @@ -93,11 +93,11 @@ struct amd_cpudata {
>   	bool	hw_prefcore;
>   
>   	/* EPP feature related attributes*/
> -	s16	epp_cached;
> +	u8	epp_cached;
>   	u32	policy;
>   	u64	cppc_cap1_cached;
>   	bool	suspended;
> -	s16	epp_default;
> +	u8	epp_default;
>   };
>   
>   /*
diff mbox series

Patch

diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 8d692415d905..f457d4af2c62 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -24,9 +24,9 @@ 
 
 TRACE_EVENT(amd_pstate_perf,
 
-	TP_PROTO(unsigned long min_perf,
-		 unsigned long target_perf,
-		 unsigned long capacity,
+	TP_PROTO(u8 min_perf,
+		 u8 target_perf,
+		 u8 capacity,
 		 u64 freq,
 		 u64 mperf,
 		 u64 aperf,
@@ -47,9 +47,9 @@  TRACE_EVENT(amd_pstate_perf,
 		),
 
 	TP_STRUCT__entry(
-		__field(unsigned long, min_perf)
-		__field(unsigned long, target_perf)
-		__field(unsigned long, capacity)
+		__field(u8, min_perf)
+		__field(u8, target_perf)
+		__field(u8, capacity)
 		__field(unsigned long long, freq)
 		__field(unsigned long long, mperf)
 		__field(unsigned long long, aperf)
@@ -70,10 +70,10 @@  TRACE_EVENT(amd_pstate_perf,
 		__entry->fast_switch = fast_switch;
 		),
 
-	TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
-		  (unsigned long)__entry->min_perf,
-		  (unsigned long)__entry->target_perf,
-		  (unsigned long)__entry->capacity,
+	TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
+		  (u8)__entry->min_perf,
+		  (u8)__entry->target_perf,
+		  (u8)__entry->capacity,
 		  (unsigned long long)__entry->freq,
 		  (unsigned long long)__entry->mperf,
 		  (unsigned long long)__entry->aperf,
@@ -86,10 +86,10 @@  TRACE_EVENT(amd_pstate_perf,
 TRACE_EVENT(amd_pstate_epp_perf,
 
 	TP_PROTO(unsigned int cpu_id,
-		 unsigned int highest_perf,
-		 unsigned int epp,
-		 unsigned int min_perf,
-		 unsigned int max_perf,
+		 u8 highest_perf,
+		 u8 epp,
+		 u8 min_perf,
+		 u8 max_perf,
 		 bool boost
 		 ),
 
@@ -102,10 +102,10 @@  TRACE_EVENT(amd_pstate_epp_perf,
 
 	TP_STRUCT__entry(
 		__field(unsigned int, cpu_id)
-		__field(unsigned int, highest_perf)
-		__field(unsigned int, epp)
-		__field(unsigned int, min_perf)
-		__field(unsigned int, max_perf)
+		__field(u8, highest_perf)
+		__field(u8, epp)
+		__field(u8, min_perf)
+		__field(u8, max_perf)
 		__field(bool, boost)
 		),
 
@@ -118,12 +118,12 @@  TRACE_EVENT(amd_pstate_epp_perf,
 		__entry->boost = boost;
 		),
 
-	TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+	TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u",
 		  (unsigned int)__entry->cpu_id,
-		  (unsigned int)__entry->min_perf,
-		  (unsigned int)__entry->max_perf,
-		  (unsigned int)__entry->highest_perf,
-		  (unsigned int)__entry->epp,
+		  (u8)__entry->min_perf,
+		  (u8)__entry->max_perf,
+		  (u8)__entry->highest_perf,
+		  (u8)__entry->epp,
 		  (bool)__entry->boost
 		 )
 );
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index e179e929b941..dd4f23fa2587 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -186,7 +186,7 @@  static inline int get_mode_idx_from_str(const char *str, size_t size)
 static DEFINE_MUTEX(amd_pstate_limits_lock);
 static DEFINE_MUTEX(amd_pstate_driver_lock);
 
-static s16 msr_get_epp(struct amd_cpudata *cpudata)
+static u8 msr_get_epp(struct amd_cpudata *cpudata)
 {
 	u64 value;
 	int ret;
@@ -207,7 +207,7 @@  static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
 	return static_call(amd_pstate_get_epp)(cpudata);
 }
 
-static s16 shmem_get_epp(struct amd_cpudata *cpudata)
+static u8 shmem_get_epp(struct amd_cpudata *cpudata)
 {
 	u64 epp;
 	int ret;
@@ -218,11 +218,11 @@  static s16 shmem_get_epp(struct amd_cpudata *cpudata)
 		return ret;
 	}
 
-	return (s16)(epp & 0xff);
+	return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
 }
 
-static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
-			   u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
+static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
+			   u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
 {
 	u64 value, prev;
 
@@ -257,15 +257,15 @@  static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
 DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
 
 static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
-					  u32 min_perf, u32 des_perf,
-					  u32 max_perf, u32 epp,
+					  u8 min_perf, u8 des_perf,
+					  u8 max_perf, u8 epp,
 					  bool fast_switch)
 {
 	return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
 						   max_perf, epp, fast_switch);
 }
 
-static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp)
 {
 	u64 value, prev;
 	int ret;
@@ -292,12 +292,12 @@  static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
 
 DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
 
-static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp)
 {
 	return static_call(amd_pstate_set_epp)(cpudata, epp);
 }
 
-static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp)
 {
 	int ret;
 	struct cppc_perf_ctrls perf_ctrls;
@@ -320,7 +320,7 @@  static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
 					    int pref_index)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
-	int epp;
+	u8 epp;
 
 	if (!pref_index)
 		epp = cpudata->epp_default;
@@ -479,8 +479,8 @@  static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
 	return static_call(amd_pstate_init_perf)(cpudata);
 }
 
-static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
-			     u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
+static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf,
+			     u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
 {
 	struct cppc_perf_ctrls perf_ctrls;
 
@@ -531,14 +531,14 @@  static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
 	return true;
 }
 
-static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
-			      u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
+static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
+			      u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags)
 {
 	unsigned long max_freq;
 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
-	u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
+	u8 nominal_perf = READ_ONCE(cpudata->nominal_perf);
 
-	des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+	des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
 
 	max_freq = READ_ONCE(cpudata->max_limit_freq);
 	policy->cur = div_u64(des_perf * max_freq, max_perf);
@@ -550,7 +550,7 @@  static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
 
 	/* limit the max perf when core performance boost feature is disabled */
 	if (!cpudata->boost_supported)
-		max_perf = min_t(unsigned long, nominal_perf, max_perf);
+		max_perf = min_t(u8, nominal_perf, max_perf);
 
 	if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
 		trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
@@ -591,7 +591,8 @@  static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
 
 static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
 {
-	u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
+	u8 max_limit_perf, min_limit_perf, max_perf;
+	u32 max_freq;
 	struct amd_cpudata *cpudata = policy->driver_data;
 
 	max_perf = READ_ONCE(cpudata->highest_perf);
@@ -615,7 +616,7 @@  static int amd_pstate_update_freq(struct cpufreq_policy *policy,
 {
 	struct cpufreq_freqs freqs;
 	struct amd_cpudata *cpudata = policy->driver_data;
-	unsigned long des_perf, cap_perf;
+	u8 des_perf, cap_perf;
 
 	if (!cpudata->max_freq)
 		return -ENODEV;
@@ -670,8 +671,7 @@  static void amd_pstate_adjust_perf(unsigned int cpu,
 				   unsigned long target_perf,
 				   unsigned long capacity)
 {
-	unsigned long max_perf, min_perf, des_perf,
-		      cap_perf, min_limit_perf;
+	u8 max_perf, min_perf, des_perf, cap_perf, min_limit_perf;
 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 	struct amd_cpudata *cpudata;
 
@@ -904,8 +904,8 @@  static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
 {
 	int ret;
 	u32 min_freq, max_freq;
-	u32 highest_perf, nominal_perf, nominal_freq;
-	u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
+	u8 highest_perf, nominal_perf, lowest_nonlinear_perf;
+	u32 nominal_freq, lowest_nonlinear_freq;
 	struct cppc_perf_caps cppc_perf;
 
 	ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
@@ -1112,7 +1112,7 @@  static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
 					    char *buf)
 {
-	u32 perf;
+	u8 perf;
 	struct amd_cpudata *cpudata = policy->driver_data;
 
 	perf = READ_ONCE(cpudata->highest_perf);
@@ -1123,7 +1123,7 @@  static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
 static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
 						char *buf)
 {
-	u32 perf;
+	u8 perf;
 	struct amd_cpudata *cpudata = policy->driver_data;
 
 	perf = READ_ONCE(cpudata->prefcore_ranking);
@@ -1186,7 +1186,7 @@  static ssize_t show_energy_performance_preference(
 				struct cpufreq_policy *policy, char *buf)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
-	int preference;
+	u8 preference;
 
 	switch (cpudata->epp_cached) {
 	case AMD_CPPC_EPP_PERFORMANCE:
@@ -1548,7 +1548,7 @@  static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
 static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
-	u32 epp;
+	u8 epp;
 
 	amd_pstate_update_min_max_limit(policy);
 
@@ -1597,7 +1597,7 @@  static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
 static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
-	u64 max_perf;
+	u8 max_perf;
 	int ret;
 
 	ret = amd_pstate_cppc_enable(true);
@@ -1634,7 +1634,7 @@  static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
 static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
-	int min_perf;
+	u8 min_perf;
 
 	if (cpudata->suspended)
 		return 0;
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index 9747e3be6cee..19d405c6d805 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -70,13 +70,13 @@  struct amd_cpudata {
 	struct	freq_qos_request req[2];
 	u64	cppc_req_cached;
 
-	u32	highest_perf;
-	u32	nominal_perf;
-	u32	lowest_nonlinear_perf;
-	u32	lowest_perf;
-	u32     prefcore_ranking;
-	u32     min_limit_perf;
-	u32     max_limit_perf;
+	u8	highest_perf;
+	u8	nominal_perf;
+	u8	lowest_nonlinear_perf;
+	u8	lowest_perf;
+	u8	prefcore_ranking;
+	u8	min_limit_perf;
+	u8	max_limit_perf;
 	u32     min_limit_freq;
 	u32     max_limit_freq;
 
@@ -93,11 +93,11 @@  struct amd_cpudata {
 	bool	hw_prefcore;
 
 	/* EPP feature related attributes*/
-	s16	epp_cached;
+	u8	epp_cached;
 	u32	policy;
 	u64	cppc_cap1_cached;
 	bool	suspended;
-	s16	epp_default;
+	u8	epp_default;
 };
 
 /*