Message ID | 20250206215659.3350066-10-superm1@kernel.org (mailing list archive) |
---|---|
State | Superseded, archived |
Delegated to: | Mario Limonciello |
Headers | show |
Series | amd-pstate cleanups | expand |
On 2/7/2025 3:26 AM, Mario Limonciello wrote: > From: Mario Limonciello <mario.limonciello@amd.com> > > The EPP tracing is done by the caller today, but this precludes the > information about whether the CPPC request has changed. > > Move it into the update_perf and set_epp functions and include information > about whether the request has changed from the last one. Looks good to me, Reviewed-by: Dhananjay Ugwekar <dhananjay.ugwekar@amd.com> Thanks, Dhananjay > > Signed-off-by: Mario Limonciello <mario.limonciello@amd.com> > --- > drivers/cpufreq/amd-pstate-trace.h | 13 +++- > drivers/cpufreq/amd-pstate.c | 119 ++++++++++++++++++----------- > 2 files changed, 83 insertions(+), 49 deletions(-) > > diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h > index f457d4af2c62e..32e1bdc588c52 100644 > --- a/drivers/cpufreq/amd-pstate-trace.h > +++ b/drivers/cpufreq/amd-pstate-trace.h > @@ -90,7 +90,8 @@ TRACE_EVENT(amd_pstate_epp_perf, > u8 epp, > u8 min_perf, > u8 max_perf, > - bool boost > + bool boost, > + bool changed > ), > > TP_ARGS(cpu_id, > @@ -98,7 +99,8 @@ TRACE_EVENT(amd_pstate_epp_perf, > epp, > min_perf, > max_perf, > - boost), > + boost, > + changed), > > TP_STRUCT__entry( > __field(unsigned int, cpu_id) > @@ -107,6 +109,7 @@ TRACE_EVENT(amd_pstate_epp_perf, > __field(u8, min_perf) > __field(u8, max_perf) > __field(bool, boost) > + __field(bool, changed) > ), > > TP_fast_assign( > @@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf, > __entry->min_perf = min_perf; > __entry->max_perf = max_perf; > __entry->boost = boost; > + __entry->changed = changed; > ), > > - TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u", > + TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u", > (unsigned int)__entry->cpu_id, > (u8)__entry->min_perf, > (u8)__entry->max_perf, > (u8)__entry->highest_perf, > (u8)__entry->epp, > - (bool)__entry->boost > + (bool)__entry->boost, > + (bool)__entry->changed > ) > ); > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index 2aa3d5be2efe5..e66ccfce5893f 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -228,9 +228,10 @@ static u8 shmem_get_epp(struct amd_cpudata *cpudata) > return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp); > } > > -static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf, > +static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf, > u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) > { > + struct amd_cpudata *cpudata = policy->driver_data; > u64 value, prev; > > value = prev = READ_ONCE(cpudata->cppc_req_cached); > @@ -242,6 +243,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf, > value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); > value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); > > + if (trace_amd_pstate_epp_perf_enabled()) { > + union perf_cached perf = cpudata->perf; > + > + trace_amd_pstate_epp_perf(cpudata->cpu, > + perf.highest_perf, > + epp, > + min_perf, > + max_perf, > + policy->boost_enabled, > + value != prev); > + } > + > if (value == prev) > return 0; > > @@ -256,24 +269,26 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf, > } > > WRITE_ONCE(cpudata->cppc_req_cached, value); > - WRITE_ONCE(cpudata->epp_cached, epp); > + if (epp != cpudata->epp_cached) > + WRITE_ONCE(cpudata->epp_cached, epp); > > return 0; > } > > DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf); > > -static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata, > +static inline int amd_pstate_update_perf(struct cpufreq_policy *policy, > u8 min_perf, u8 des_perf, > u8 max_perf, u8 epp, > bool fast_switch) > { > - return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, > + return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf, > max_perf, epp, fast_switch); > } > > -static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) > +static int msr_set_epp(struct cpufreq_policy *policy, u8 epp) > { > + struct amd_cpudata *cpudata = policy->driver_data; > u64 value, prev; > int ret; > > @@ -283,6 +298,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) > value &= ~AMD_CPPC_EPP_PERF_MASK; > value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); > > + if (trace_amd_pstate_epp_perf_enabled()) { > + union perf_cached perf = cpudata->perf; > + > + trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, > + epp, > + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, > + cpudata->cppc_req_cached), > + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, > + cpudata->cppc_req_cached), > + policy->boost_enabled, > + value != prev); > + } > + > if (value == prev) > return 0; > > @@ -301,18 +329,32 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) > > DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp); > > -static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp) > +static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp) > { > - return static_call(amd_pstate_set_epp)(cpudata, epp); > + return static_call(amd_pstate_set_epp)(policy, epp); > } > > -static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) > +static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp) > { > - int ret; > + struct amd_cpudata *cpudata = policy->driver_data; > struct cppc_perf_ctrls perf_ctrls; > + int ret; > > lockdep_assert_held(&cpudata->lock); > > + if (trace_amd_pstate_epp_perf_enabled()) { > + union perf_cached perf = cpudata->perf; > + > + trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, > + epp, > + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, > + cpudata->cppc_req_cached), > + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, > + cpudata->cppc_req_cached), > + policy->boost_enabled, > + epp != cpudata->epp_cached); > + } > + > if (epp == cpudata->epp_cached) > return 0; > > @@ -345,17 +387,7 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy, > return -EBUSY; > } > > - if (trace_amd_pstate_epp_perf_enabled()) { > - union perf_cached perf = cpudata->perf; > - > - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, > - epp, > - FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached), > - FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached), > - policy->boost_enabled); > - } > - > - return amd_pstate_set_epp(cpudata, epp); > + return amd_pstate_set_epp(policy, epp); > } > > static inline int msr_cppc_enable(bool enable) > @@ -498,15 +530,16 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) > return static_call(amd_pstate_init_perf)(cpudata); > } > > -static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf, > +static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf, > u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) > { > + struct amd_cpudata *cpudata = policy->driver_data; > struct cppc_perf_ctrls perf_ctrls; > u64 value, prev; > int ret; > > if (cppc_state == AMD_PSTATE_ACTIVE) { > - int ret = shmem_set_epp(cpudata, epp); > + int ret = shmem_set_epp(policy, epp); > > if (ret) > return ret; > @@ -521,6 +554,18 @@ static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf, > value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); > value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); > > + if (trace_amd_pstate_epp_perf_enabled()) { > + union perf_cached perf = cpudata->perf; > + > + trace_amd_pstate_epp_perf(cpudata->cpu, > + perf.highest_perf, > + epp, > + min_perf, > + max_perf, > + policy->boost_enabled, > + value != prev); > + } > + > if (value == prev) > return 0; > > @@ -598,7 +643,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf, > cpudata->cpu, fast_switch); > } > > - amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch); > + amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch); > } > > static int amd_pstate_verify(struct cpufreq_policy_data *policy_data) > @@ -1546,7 +1591,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) > return ret; > WRITE_ONCE(cpudata->cppc_req_cached, value); > } > - ret = amd_pstate_set_epp(cpudata, cpudata->epp_default); > + ret = amd_pstate_set_epp(policy, cpudata->epp_default); > if (ret) > return ret; > > @@ -1588,14 +1633,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) > epp = READ_ONCE(cpudata->epp_cached); > > perf = READ_ONCE(cpudata->perf); > - if (trace_amd_pstate_epp_perf_enabled()) { > - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, epp, > - perf.min_limit_perf, > - perf.max_limit_perf, > - policy->boost_enabled); > - } > > - return amd_pstate_update_perf(cpudata, perf.min_limit_perf, 0U, > + return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U, > perf.max_limit_perf, epp, false); > } > > @@ -1635,14 +1674,9 @@ static int amd_pstate_epp_reenable(struct cpufreq_policy *policy) > if (ret) > pr_err("failed to enable amd pstate during resume, return %d\n", ret); > > - if (trace_amd_pstate_epp_perf_enabled()) { > - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, > - cpudata->epp_cached, > - FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached), > - perf.highest_perf, policy->boost_enabled); > - } > + guard(mutex)(&cpudata->lock); > > - return amd_pstate_update_perf(cpudata, 0, 0, perf.highest_perf, cpudata->epp_cached, false); > + return amd_pstate_update_perf(policy, 0, 0, perf.highest_perf, cpudata->epp_cached, false); > } > > static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) > @@ -1668,14 +1702,9 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) > if (cpudata->suspended) > return 0; > > - if (trace_amd_pstate_epp_perf_enabled()) { > - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, > - AMD_CPPC_EPP_BALANCE_POWERSAVE, > - perf.lowest_perf, perf.lowest_perf, > - policy->boost_enabled); > - } > + guard(mutex)(&cpudata->lock); > > - return amd_pstate_update_perf(cpudata, perf.lowest_perf, 0, perf.lowest_perf, > + return amd_pstate_update_perf(policy, perf.lowest_perf, 0, perf.lowest_perf, > AMD_CPPC_EPP_BALANCE_POWERSAVE, false); > } >
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h index f457d4af2c62e..32e1bdc588c52 100644 --- a/drivers/cpufreq/amd-pstate-trace.h +++ b/drivers/cpufreq/amd-pstate-trace.h @@ -90,7 +90,8 @@ TRACE_EVENT(amd_pstate_epp_perf, u8 epp, u8 min_perf, u8 max_perf, - bool boost + bool boost, + bool changed ), TP_ARGS(cpu_id, @@ -98,7 +99,8 @@ TRACE_EVENT(amd_pstate_epp_perf, epp, min_perf, max_perf, - boost), + boost, + changed), TP_STRUCT__entry( __field(unsigned int, cpu_id) @@ -107,6 +109,7 @@ TRACE_EVENT(amd_pstate_epp_perf, __field(u8, min_perf) __field(u8, max_perf) __field(bool, boost) + __field(bool, changed) ), TP_fast_assign( @@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf, __entry->min_perf = min_perf; __entry->max_perf = max_perf; __entry->boost = boost; + __entry->changed = changed; ), - TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u", + TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u", (unsigned int)__entry->cpu_id, (u8)__entry->min_perf, (u8)__entry->max_perf, (u8)__entry->highest_perf, (u8)__entry->epp, - (bool)__entry->boost + (bool)__entry->boost, + (bool)__entry->changed ) ); diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 2aa3d5be2efe5..e66ccfce5893f 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -228,9 +228,10 @@ static u8 shmem_get_epp(struct amd_cpudata *cpudata) return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp); } -static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf, +static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf, u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { + struct amd_cpudata *cpudata = policy->driver_data; u64 value, prev; value = prev = READ_ONCE(cpudata->cppc_req_cached); @@ -242,6 +243,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf, value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf = cpudata->perf; + + trace_amd_pstate_epp_perf(cpudata->cpu, + perf.highest_perf, + epp, + min_perf, + max_perf, + policy->boost_enabled, + value != prev); + } + if (value == prev) return 0; @@ -256,24 +269,26 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u8 min_perf, } WRITE_ONCE(cpudata->cppc_req_cached, value); - WRITE_ONCE(cpudata->epp_cached, epp); + if (epp != cpudata->epp_cached) + WRITE_ONCE(cpudata->epp_cached, epp); return 0; } DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf); -static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata, +static inline int amd_pstate_update_perf(struct cpufreq_policy *policy, u8 min_perf, u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { - return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, + return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf, max_perf, epp, fast_switch); } -static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) +static int msr_set_epp(struct cpufreq_policy *policy, u8 epp) { + struct amd_cpudata *cpudata = policy->driver_data; u64 value, prev; int ret; @@ -283,6 +298,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) value &= ~AMD_CPPC_EPP_PERF_MASK; value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf = cpudata->perf; + + trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, + epp, + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, + cpudata->cppc_req_cached), + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, + cpudata->cppc_req_cached), + policy->boost_enabled, + value != prev); + } + if (value == prev) return 0; @@ -301,18 +329,32 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u8 epp) DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp); -static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u8 epp) +static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp) { - return static_call(amd_pstate_set_epp)(cpudata, epp); + return static_call(amd_pstate_set_epp)(policy, epp); } -static int shmem_set_epp(struct amd_cpudata *cpudata, u8 epp) +static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp) { - int ret; + struct amd_cpudata *cpudata = policy->driver_data; struct cppc_perf_ctrls perf_ctrls; + int ret; lockdep_assert_held(&cpudata->lock); + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf = cpudata->perf; + + trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, + epp, + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, + cpudata->cppc_req_cached), + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, + cpudata->cppc_req_cached), + policy->boost_enabled, + epp != cpudata->epp_cached); + } + if (epp == cpudata->epp_cached) return 0; @@ -345,17 +387,7 @@ static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy, return -EBUSY; } - if (trace_amd_pstate_epp_perf_enabled()) { - union perf_cached perf = cpudata->perf; - - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, - epp, - FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached), - FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached), - policy->boost_enabled); - } - - return amd_pstate_set_epp(cpudata, epp); + return amd_pstate_set_epp(policy, epp); } static inline int msr_cppc_enable(bool enable) @@ -498,15 +530,16 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) return static_call(amd_pstate_init_perf)(cpudata); } -static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf, +static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf, u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) { + struct amd_cpudata *cpudata = policy->driver_data; struct cppc_perf_ctrls perf_ctrls; u64 value, prev; int ret; if (cppc_state == AMD_PSTATE_ACTIVE) { - int ret = shmem_set_epp(cpudata, epp); + int ret = shmem_set_epp(policy, epp); if (ret) return ret; @@ -521,6 +554,18 @@ static int shmem_update_perf(struct amd_cpudata *cpudata, u8 min_perf, value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); + if (trace_amd_pstate_epp_perf_enabled()) { + union perf_cached perf = cpudata->perf; + + trace_amd_pstate_epp_perf(cpudata->cpu, + perf.highest_perf, + epp, + min_perf, + max_perf, + policy->boost_enabled, + value != prev); + } + if (value == prev) return 0; @@ -598,7 +643,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf, cpudata->cpu, fast_switch); } - amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch); + amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch); } static int amd_pstate_verify(struct cpufreq_policy_data *policy_data) @@ -1546,7 +1591,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) return ret; WRITE_ONCE(cpudata->cppc_req_cached, value); } - ret = amd_pstate_set_epp(cpudata, cpudata->epp_default); + ret = amd_pstate_set_epp(policy, cpudata->epp_default); if (ret) return ret; @@ -1588,14 +1633,8 @@ static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) epp = READ_ONCE(cpudata->epp_cached); perf = READ_ONCE(cpudata->perf); - if (trace_amd_pstate_epp_perf_enabled()) { - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, epp, - perf.min_limit_perf, - perf.max_limit_perf, - policy->boost_enabled); - } - return amd_pstate_update_perf(cpudata, perf.min_limit_perf, 0U, + return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U, perf.max_limit_perf, epp, false); } @@ -1635,14 +1674,9 @@ static int amd_pstate_epp_reenable(struct cpufreq_policy *policy) if (ret) pr_err("failed to enable amd pstate during resume, return %d\n", ret); - if (trace_amd_pstate_epp_perf_enabled()) { - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, - cpudata->epp_cached, - FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached), - perf.highest_perf, policy->boost_enabled); - } + guard(mutex)(&cpudata->lock); - return amd_pstate_update_perf(cpudata, 0, 0, perf.highest_perf, cpudata->epp_cached, false); + return amd_pstate_update_perf(policy, 0, 0, perf.highest_perf, cpudata->epp_cached, false); } static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) @@ -1668,14 +1702,9 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) if (cpudata->suspended) return 0; - if (trace_amd_pstate_epp_perf_enabled()) { - trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, - AMD_CPPC_EPP_BALANCE_POWERSAVE, - perf.lowest_perf, perf.lowest_perf, - policy->boost_enabled); - } + guard(mutex)(&cpudata->lock); - return amd_pstate_update_perf(cpudata, perf.lowest_perf, 0, perf.lowest_perf, + return amd_pstate_update_perf(policy, perf.lowest_perf, 0, perf.lowest_perf, AMD_CPPC_EPP_BALANCE_POWERSAVE, false); }