Message ID | 39ce335d80306e7e9ed9e169994348e1f65e1679.1715065568.git.perry.yuan@amd.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | AMD Pstate Driver Fixes and Improvements | expand |
On 5/7/2024 02:15, Perry Yuan wrote: > replace the usage of the deprecated boot_cpu_has() function with One nit. Capitalize the "R" in replace. > the modern cpu_feature_enabled() function. The switch to cpu_feature_enabled() > ensures compatibility with the latest CPU feature detection mechanisms and > improves code maintainability. > > Suggested-by: Borislav Petkov (AMD) <bp@alien8.de> > Signed-off-by: Perry Yuan <perry.yuan@amd.com> Acked-by: Mario Limonciello <mario.limonciello@amd.com> > --- > drivers/cpufreq/amd-pstate.c | 24 ++++++++++++------------ > 1 file changed, 12 insertions(+), 12 deletions(-) > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index e94b55a7bb59..7145248b38ec 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -124,7 +124,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) > * broken BIOS lack of nominal_freq and lowest_freq capabilities > * definition in ACPI tables > */ > - if (boot_cpu_has(X86_FEATURE_ZEN2)) { > + if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { > quirks = dmi->driver_data; > pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); > return 1; > @@ -166,7 +166,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached) > u64 epp; > int ret; > > - if (boot_cpu_has(X86_FEATURE_CPPC)) { > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { > if (!cppc_req_cached) { > epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, > &cppc_req_cached); > @@ -219,7 +219,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) > int ret; > struct cppc_perf_ctrls perf_ctrls; > > - if (boot_cpu_has(X86_FEATURE_CPPC)) { > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { > u64 value = READ_ONCE(cpudata->cppc_req_cached); > > value &= ~GENMASK_ULL(31, 24); > @@ -705,7 +705,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) > { > int ret; > > - if (boot_cpu_has(X86_FEATURE_CPPC)) { > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { > u64 cap1; > > ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); > @@ -941,7 +941,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) > /* It will be updated by governor */ > policy->cur = policy->cpuinfo.min_freq; > > - if (boot_cpu_has(X86_FEATURE_CPPC)) > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) > policy->fast_switch_possible = true; > > ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], > @@ -1174,7 +1174,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode) > > cppc_state = mode; > > - if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) > + if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) > return 0; > > for_each_present_cpu(cpu) { > @@ -1404,7 +1404,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) > else > policy->policy = CPUFREQ_POLICY_POWERSAVE; > > - if (boot_cpu_has(X86_FEATURE_CPPC)) { > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { > ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); > if (ret) > return ret; > @@ -1487,7 +1487,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) > epp = 0; > > /* Set initial EPP value */ > - if (boot_cpu_has(X86_FEATURE_CPPC)) { > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { > value &= ~GENMASK_ULL(31, 24); > value |= (u64)epp << 24; > } > @@ -1526,7 +1526,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) > value = READ_ONCE(cpudata->cppc_req_cached); > max_perf = READ_ONCE(cpudata->highest_perf); > > - if (boot_cpu_has(X86_FEATURE_CPPC)) { > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { > wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); > } else { > perf_ctrls.max_perf = max_perf; > @@ -1560,7 +1560,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy) > value = READ_ONCE(cpudata->cppc_req_cached); > > mutex_lock(&amd_pstate_limits_lock); > - if (boot_cpu_has(X86_FEATURE_CPPC)) { > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { > cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; > > /* Set max perf same as min perf */ > @@ -1748,7 +1748,7 @@ static int __init amd_pstate_init(void) > */ > if (amd_pstate_acpi_pm_profile_undefined() || > amd_pstate_acpi_pm_profile_server() || > - !boot_cpu_has(X86_FEATURE_CPPC)) { > + !cpu_feature_enabled(X86_FEATURE_CPPC)) { > pr_info("driver load is disabled, boot with specific mode to enable this\n"); > return -ENODEV; > } > @@ -1767,7 +1767,7 @@ static int __init amd_pstate_init(void) > } > > /* capability check */ > - if (boot_cpu_has(X86_FEATURE_CPPC)) { > + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { > pr_debug("AMD CPPC MSR based functionality is supported\n"); > if (cppc_state != AMD_PSTATE_ACTIVE) > current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index e94b55a7bb59..7145248b38ec 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -124,7 +124,7 @@ static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) * broken BIOS lack of nominal_freq and lowest_freq capabilities * definition in ACPI tables */ - if (boot_cpu_has(X86_FEATURE_ZEN2)) { + if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { quirks = dmi->driver_data; pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); return 1; @@ -166,7 +166,7 @@ static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached) u64 epp; int ret; - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { if (!cppc_req_cached) { epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req_cached); @@ -219,7 +219,7 @@ static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) int ret; struct cppc_perf_ctrls perf_ctrls; - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { u64 value = READ_ONCE(cpudata->cppc_req_cached); value &= ~GENMASK_ULL(31, 24); @@ -705,7 +705,7 @@ static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) { int ret; - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { u64 cap1; ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); @@ -941,7 +941,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) /* It will be updated by governor */ policy->cur = policy->cpuinfo.min_freq; - if (boot_cpu_has(X86_FEATURE_CPPC)) + if (cpu_feature_enabled(X86_FEATURE_CPPC)) policy->fast_switch_possible = true; ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], @@ -1174,7 +1174,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode) cppc_state = mode; - if (boot_cpu_has(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) + if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) return 0; for_each_present_cpu(cpu) { @@ -1404,7 +1404,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) else policy->policy = CPUFREQ_POLICY_POWERSAVE; - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); if (ret) return ret; @@ -1487,7 +1487,7 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy) epp = 0; /* Set initial EPP value */ - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { value &= ~GENMASK_ULL(31, 24); value |= (u64)epp << 24; } @@ -1526,7 +1526,7 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) value = READ_ONCE(cpudata->cppc_req_cached); max_perf = READ_ONCE(cpudata->highest_perf); - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); } else { perf_ctrls.max_perf = max_perf; @@ -1560,7 +1560,7 @@ static void amd_pstate_epp_offline(struct cpufreq_policy *policy) value = READ_ONCE(cpudata->cppc_req_cached); mutex_lock(&amd_pstate_limits_lock); - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; /* Set max perf same as min perf */ @@ -1748,7 +1748,7 @@ static int __init amd_pstate_init(void) */ if (amd_pstate_acpi_pm_profile_undefined() || amd_pstate_acpi_pm_profile_server() || - !boot_cpu_has(X86_FEATURE_CPPC)) { + !cpu_feature_enabled(X86_FEATURE_CPPC)) { pr_info("driver load is disabled, boot with specific mode to enable this\n"); return -ENODEV; } @@ -1767,7 +1767,7 @@ static int __init amd_pstate_init(void) } /* capability check */ - if (boot_cpu_has(X86_FEATURE_CPPC)) { + if (cpu_feature_enabled(X86_FEATURE_CPPC)) { pr_debug("AMD CPPC MSR based functionality is supported\n"); if (cppc_state != AMD_PSTATE_ACTIVE) current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
replace the usage of the deprecated boot_cpu_has() function with the modern cpu_feature_enabled() function. The switch to cpu_feature_enabled() ensures compatibility with the latest CPU feature detection mechanisms and improves code maintainability. Suggested-by: Borislav Petkov (AMD) <bp@alien8.de> Signed-off-by: Perry Yuan <perry.yuan@amd.com> --- drivers/cpufreq/amd-pstate.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-)