Message ID | 20211029130241.1984459-9-ray.huang@amd.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | cpufreq: introduce a new AMD CPU frequency control mechanism | expand |
On 10/29/2021 08:02, Huang Rui wrote: > In some old Zen based processors, they are using the shared memory that > exposed from ACPI SBIOS. I don't think this is only "old" processors. I think there are "new" processors that just don't happen to implement the MSR too. > > Signed-off-by: Jinzhou Su <Jinzhou.Su@amd.com> > Signed-off-by: Huang Rui <ray.huang@amd.com> > --- > drivers/cpufreq/amd-pstate.c | 58 ++++++++++++++++++++++++++++++++---- > 1 file changed, 53 insertions(+), 5 deletions(-) > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index 55ff03f85608..d399938d6d85 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -73,6 +73,19 @@ static inline int pstate_enable(bool enable) > return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable ? 1 : 0); > } > > +static int cppc_enable(bool enable) > +{ > + int cpu, ret = 0; > + > + for_each_online_cpu(cpu) { I wonder if this should also be changed to present CPU instead of offline CPU. Otherwise could this turn into a situation that the user starts with some CPU's offlined and enables them later but this doesn't end up applying to the CPUs that were started offlined and changed? > + ret = cppc_set_enable(cpu, enable ? 1 : 0); > + if (ret) > + return ret; > + } > + > + return ret; > +} > + > DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); > > static inline int amd_pstate_enable(bool enable) > @@ -103,6 +116,24 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) > return 0; > } > > +static int cppc_init_perf(struct amd_cpudata *cpudata) > +{ > + struct cppc_perf_caps cppc_perf; > + > + int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); > + if (ret) > + return ret; > + > + WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); > + > + WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); > + WRITE_ONCE(cpudata->lowest_nonlinear_perf, > + cppc_perf.lowest_nonlinear_perf); > + WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); > + > + return 0; > +} > + > DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); > > static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) > @@ -120,6 +151,19 @@ static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, > READ_ONCE(cpudata->cppc_req_cached)); > } > > +static void cppc_update_perf(struct amd_cpudata *cpudata, > + u32 min_perf, u32 des_perf, > + u32 max_perf, bool fast_switch) > +{ > + struct cppc_perf_ctrls perf_ctrls; > + > + perf_ctrls.max_perf = max_perf; > + perf_ctrls.min_perf = min_perf; > + perf_ctrls.desired_perf = des_perf; > + > + cppc_set_perf(cpudata->cpu, &perf_ctrls); > +} > + > DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); > > static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, > @@ -346,7 +390,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) > /* It will be updated by governor */ > policy->cur = policy->cpuinfo.min_freq; > > - policy->fast_switch_possible = true; > + if (boot_cpu_has(X86_FEATURE_AMD_CPPC)) > + policy->fast_switch_possible = true; > > ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], > FREQ_QOS_MIN, policy->cpuinfo.min_freq); > @@ -397,7 +442,6 @@ static struct cpufreq_driver amd_pstate_driver = { > .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, > .verify = amd_pstate_verify, > .target = amd_pstate_target, > - .adjust_perf = amd_pstate_adjust_perf, > .init = amd_pstate_cpu_init, > .exit = amd_pstate_cpu_exit, > .name = "amd-pstate", > @@ -421,10 +465,14 @@ static int __init amd_pstate_init(void) > return -EEXIST; > > /* capability check */ > - if (!boot_cpu_has(X86_FEATURE_AMD_CPPC)) { > - pr_debug("%s, AMD CPPC MSR based functionality is not supported\n", > + if (boot_cpu_has(X86_FEATURE_AMD_CPPC)) { > + pr_debug("%s, AMD CPPC MSR based functionality is supported\n", > __func__); > - return -ENODEV; > + amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf; > + } else { > + static_call_update(amd_pstate_enable, cppc_enable); > + static_call_update(amd_pstate_init_perf, cppc_init_perf); > + static_call_update(amd_pstate_update_perf, cppc_update_perf); > } > > /* enable amd pstate feature */ >
On Fri, Oct 29, 2021 at 10:20:09PM +0800, Limonciello, Mario wrote: > On 10/29/2021 08:02, Huang Rui wrote: > > In some old Zen based processors, they are using the shared memory that > > exposed from ACPI SBIOS. > > I don't think this is only "old" processors. I think there are "new" > processors that just don't happen to implement the MSR too. > Yes, I will correct the description. > > > > Signed-off-by: Jinzhou Su <Jinzhou.Su@amd.com> > > Signed-off-by: Huang Rui <ray.huang@amd.com> > > --- > > drivers/cpufreq/amd-pstate.c | 58 ++++++++++++++++++++++++++++++++---- > > 1 file changed, 53 insertions(+), 5 deletions(-) > > > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > > index 55ff03f85608..d399938d6d85 100644 > > --- a/drivers/cpufreq/amd-pstate.c > > +++ b/drivers/cpufreq/amd-pstate.c > > @@ -73,6 +73,19 @@ static inline int pstate_enable(bool enable) > > return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable ? 1 : 0); > > } > > > > +static int cppc_enable(bool enable) > > +{ > > + int cpu, ret = 0; > > + > > + for_each_online_cpu(cpu) { > > I wonder if this should also be changed to present CPU instead of > offline CPU. Otherwise could this turn into a situation that the user > starts with some CPU's offlined and enables them later but this doesn't > end up applying to the CPUs that were started offlined and changed? > Yes, make sense. It is actually similiar with previous acpi_cpc_valid fix patch. I will update it in V4. Thanks, Ray
On 10/29/21 8:02 AM, Huang Rui wrote: > In some old Zen based processors, they are using the shared memory that > exposed from ACPI SBIOS. With this you present two different approaches for support in the driver, MSRs and shared memory. For processors using shared memory you use the shared memory defined in the ACPI tables but access the MSRs directly. Is there any concern that the MSR registers (defined in patch 2/21) can differ from what is defined in the ACPI tables? Should you use the drivers/acpi interfaces for MSRs also? -Nathan > > Signed-off-by: Jinzhou Su <Jinzhou.Su@amd.com> > Signed-off-by: Huang Rui <ray.huang@amd.com> > --- > drivers/cpufreq/amd-pstate.c | 58 ++++++++++++++++++++++++++++++++---- > 1 file changed, 53 insertions(+), 5 deletions(-) > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index 55ff03f85608..d399938d6d85 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -73,6 +73,19 @@ static inline int pstate_enable(bool enable) > return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable ? 1 : 0); > } > > +static int cppc_enable(bool enable) > +{ > + int cpu, ret = 0; > + > + for_each_online_cpu(cpu) { > + ret = cppc_set_enable(cpu, enable ? 1 : 0); > + if (ret) > + return ret; > + } > + > + return ret; > +} > + > DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); > > static inline int amd_pstate_enable(bool enable) > @@ -103,6 +116,24 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) > return 0; > } > > +static int cppc_init_perf(struct amd_cpudata *cpudata) > +{ > + struct cppc_perf_caps cppc_perf; > + > + int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); > + if (ret) > + return ret; > + > + WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); > + > + WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); > + WRITE_ONCE(cpudata->lowest_nonlinear_perf, > + cppc_perf.lowest_nonlinear_perf); > + WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); > + > + return 0; > +} > + > DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); > > static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) > @@ -120,6 +151,19 @@ static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, > READ_ONCE(cpudata->cppc_req_cached)); > } > > +static void cppc_update_perf(struct amd_cpudata *cpudata, > + u32 min_perf, u32 des_perf, > + u32 max_perf, bool fast_switch) > +{ > + struct cppc_perf_ctrls perf_ctrls; > + > + perf_ctrls.max_perf = max_perf; > + perf_ctrls.min_perf = min_perf; > + perf_ctrls.desired_perf = des_perf; > + > + cppc_set_perf(cpudata->cpu, &perf_ctrls); > +} > + > DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); > > static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, > @@ -346,7 +390,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) > /* It will be updated by governor */ > policy->cur = policy->cpuinfo.min_freq; > > - policy->fast_switch_possible = true; > + if (boot_cpu_has(X86_FEATURE_AMD_CPPC)) > + policy->fast_switch_possible = true; > > ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], > FREQ_QOS_MIN, policy->cpuinfo.min_freq); > @@ -397,7 +442,6 @@ static struct cpufreq_driver amd_pstate_driver = { > .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, > .verify = amd_pstate_verify, > .target = amd_pstate_target, > - .adjust_perf = amd_pstate_adjust_perf, > .init = amd_pstate_cpu_init, > .exit = amd_pstate_cpu_exit, > .name = "amd-pstate", > @@ -421,10 +465,14 @@ static int __init amd_pstate_init(void) > return -EEXIST; > > /* capability check */ > - if (!boot_cpu_has(X86_FEATURE_AMD_CPPC)) { > - pr_debug("%s, AMD CPPC MSR based functionality is not supported\n", > + if (boot_cpu_has(X86_FEATURE_AMD_CPPC)) { > + pr_debug("%s, AMD CPPC MSR based functionality is supported\n", > __func__); > - return -ENODEV; > + amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf; > + } else { > + static_call_update(amd_pstate_enable, cppc_enable); > + static_call_update(amd_pstate_init_perf, cppc_init_perf); > + static_call_update(amd_pstate_update_perf, cppc_update_perf); > } > > /* enable amd pstate feature */ >
On Wed, Nov 03, 2021 at 02:46:37AM +0800, Fontenot, Nathan wrote: > On 10/29/21 8:02 AM, Huang Rui wrote: > > In some old Zen based processors, they are using the shared memory that > > exposed from ACPI SBIOS. > > With this you present two different approaches for support in the driver, > MSRs and shared memory. For processors using shared memory you use the > shared memory defined in the ACPI tables but access the MSRs directly. > > Is there any concern that the MSR registers (defined in patch 2/21) can > differ from what is defined in the ACPI tables? > > Should you use the drivers/acpi interfaces for MSRs also? That's very good question. Thanks to raise this. I consider the reasons below: 1. We would like to support fast switch function, this function requires the directly MSR register operation. And it will have better performance for schedutil governor. 2. There are some differences between MSR and shared memory definitions. E.X. CPPCEnableRegister of shared memory solution required us to enable the field on each thread. However the one of full MSR is per package, and we only programs it one-off. 3. So far, I received many issues which reported from community, most of them are caused by SBIOS issues. E.X. Steven's SBIOS has additional object which modified by motherboard OEM vendor. (Thanks Steven to co-work with us addressing the issue). Using the MSR definitions directly is friendly for more platforms. 4. I would like to keep the cppc_acpi as common for ACPI spec, because it's also used by ARM SOCs. And won't add x86/amd specific things in cppc_acpi. Using the MSR directly can be more straightforward in the amd-pstate driver like intel_pstate as well. Thanks, Ray > > -Nathan > > > > > Signed-off-by: Jinzhou Su <Jinzhou.Su@amd.com> > > Signed-off-by: Huang Rui <ray.huang@amd.com> > > --- > > drivers/cpufreq/amd-pstate.c | 58 ++++++++++++++++++++++++++++++++---- > > 1 file changed, 53 insertions(+), 5 deletions(-) > > > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > > index 55ff03f85608..d399938d6d85 100644 > > --- a/drivers/cpufreq/amd-pstate.c > > +++ b/drivers/cpufreq/amd-pstate.c > > @@ -73,6 +73,19 @@ static inline int pstate_enable(bool enable) > > return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable ? 1 : 0); > > } > > > > +static int cppc_enable(bool enable) > > +{ > > + int cpu, ret = 0; > > + > > + for_each_online_cpu(cpu) { > > + ret = cppc_set_enable(cpu, enable ? 1 : 0); > > + if (ret) > > + return ret; > > + } > > + > > + return ret; > > +} > > + > > DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); > > > > static inline int amd_pstate_enable(bool enable) > > @@ -103,6 +116,24 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) > > return 0; > > } > > > > +static int cppc_init_perf(struct amd_cpudata *cpudata) > > +{ > > + struct cppc_perf_caps cppc_perf; > > + > > + int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); > > + if (ret) > > + return ret; > > + > > + WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); > > + > > + WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); > > + WRITE_ONCE(cpudata->lowest_nonlinear_perf, > > + cppc_perf.lowest_nonlinear_perf); > > + WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); > > + > > + return 0; > > +} > > + > > DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); > > > > static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) > > @@ -120,6 +151,19 @@ static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, > > READ_ONCE(cpudata->cppc_req_cached)); > > } > > > > +static void cppc_update_perf(struct amd_cpudata *cpudata, > > + u32 min_perf, u32 des_perf, > > + u32 max_perf, bool fast_switch) > > +{ > > + struct cppc_perf_ctrls perf_ctrls; > > + > > + perf_ctrls.max_perf = max_perf; > > + perf_ctrls.min_perf = min_perf; > > + perf_ctrls.desired_perf = des_perf; > > + > > + cppc_set_perf(cpudata->cpu, &perf_ctrls); > > +} > > + > > DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); > > > > static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, > > @@ -346,7 +390,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) > > /* It will be updated by governor */ > > policy->cur = policy->cpuinfo.min_freq; > > > > - policy->fast_switch_possible = true; > > + if (boot_cpu_has(X86_FEATURE_AMD_CPPC)) > > + policy->fast_switch_possible = true; > > > > ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], > > FREQ_QOS_MIN, policy->cpuinfo.min_freq); > > @@ -397,7 +442,6 @@ static struct cpufreq_driver amd_pstate_driver = { > > .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, > > .verify = amd_pstate_verify, > > .target = amd_pstate_target, > > - .adjust_perf = amd_pstate_adjust_perf, > > .init = amd_pstate_cpu_init, > > .exit = amd_pstate_cpu_exit, > > .name = "amd-pstate", > > @@ -421,10 +465,14 @@ static int __init amd_pstate_init(void) > > return -EEXIST; > > > > /* capability check */ > > - if (!boot_cpu_has(X86_FEATURE_AMD_CPPC)) { > > - pr_debug("%s, AMD CPPC MSR based functionality is not supported\n", > > + if (boot_cpu_has(X86_FEATURE_AMD_CPPC)) { > > + pr_debug("%s, AMD CPPC MSR based functionality is supported\n", > > __func__); > > - return -ENODEV; > > + amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf; > > + } else { > > + static_call_update(amd_pstate_enable, cppc_enable); > > + static_call_update(amd_pstate_init_perf, cppc_init_perf); > > + static_call_update(amd_pstate_update_perf, cppc_update_perf); > > } > > > > /* enable amd pstate feature */ > >
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 55ff03f85608..d399938d6d85 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -73,6 +73,19 @@ static inline int pstate_enable(bool enable) return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable ? 1 : 0); } +static int cppc_enable(bool enable) +{ + int cpu, ret = 0; + + for_each_online_cpu(cpu) { + ret = cppc_set_enable(cpu, enable ? 1 : 0); + if (ret) + return ret; + } + + return ret; +} + DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); static inline int amd_pstate_enable(bool enable) @@ -103,6 +116,24 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) return 0; } +static int cppc_init_perf(struct amd_cpudata *cpudata) +{ + struct cppc_perf_caps cppc_perf; + + int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); + if (ret) + return ret; + + WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf()); + + WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); + WRITE_ONCE(cpudata->lowest_nonlinear_perf, + cppc_perf.lowest_nonlinear_perf); + WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); + + return 0; +} + DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) @@ -120,6 +151,19 @@ static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, READ_ONCE(cpudata->cppc_req_cached)); } +static void cppc_update_perf(struct amd_cpudata *cpudata, + u32 min_perf, u32 des_perf, + u32 max_perf, bool fast_switch) +{ + struct cppc_perf_ctrls perf_ctrls; + + perf_ctrls.max_perf = max_perf; + perf_ctrls.min_perf = min_perf; + perf_ctrls.desired_perf = des_perf; + + cppc_set_perf(cpudata->cpu, &perf_ctrls); +} + DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, @@ -346,7 +390,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) /* It will be updated by governor */ policy->cur = policy->cpuinfo.min_freq; - policy->fast_switch_possible = true; + if (boot_cpu_has(X86_FEATURE_AMD_CPPC)) + policy->fast_switch_possible = true; ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], FREQ_QOS_MIN, policy->cpuinfo.min_freq); @@ -397,7 +442,6 @@ static struct cpufreq_driver amd_pstate_driver = { .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, .verify = amd_pstate_verify, .target = amd_pstate_target, - .adjust_perf = amd_pstate_adjust_perf, .init = amd_pstate_cpu_init, .exit = amd_pstate_cpu_exit, .name = "amd-pstate", @@ -421,10 +465,14 @@ static int __init amd_pstate_init(void) return -EEXIST; /* capability check */ - if (!boot_cpu_has(X86_FEATURE_AMD_CPPC)) { - pr_debug("%s, AMD CPPC MSR based functionality is not supported\n", + if (boot_cpu_has(X86_FEATURE_AMD_CPPC)) { + pr_debug("%s, AMD CPPC MSR based functionality is supported\n", __func__); - return -ENODEV; + amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf; + } else { + static_call_update(amd_pstate_enable, cppc_enable); + static_call_update(amd_pstate_init_perf, cppc_init_perf); + static_call_update(amd_pstate_update_perf, cppc_update_perf); } /* enable amd pstate feature */