Message ID | 20211029130241.1984459-8-ray.huang@amd.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | cpufreq: introduce a new AMD CPU frequency control mechanism | expand |
On 10/29/2021 08:02, Huang Rui wrote: > Introduce the fast switch function for amd-pstate on the AMD processors > which support the full MSR register control. It's able to decrease the > lattency on interrupt context. s/lattency/latency/ > > Signed-off-by: Huang Rui <ray.huang@amd.com> > --- > drivers/cpufreq/amd-pstate.c | 38 ++++++++++++++++++++++++++++++++++++ > 1 file changed, 38 insertions(+) > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index a400861c7fdc..55ff03f85608 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -191,6 +191,41 @@ static int amd_pstate_target(struct cpufreq_policy *policy, > return 0; > } > > +static void amd_pstate_adjust_perf(unsigned int cpu, > + unsigned long min_perf, > + unsigned long target_perf, > + unsigned long capacity) > +{ > + unsigned long amd_max_perf, amd_min_perf, amd_des_perf, > + amd_cap_perf, lowest_nonlinear_perf; > + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); > + struct amd_cpudata *cpudata = policy->driver_data; > + > + amd_cap_perf = READ_ONCE(cpudata->highest_perf); > + lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); > + > + if (target_perf < capacity) > + amd_des_perf = DIV_ROUND_UP(amd_cap_perf * target_perf, > + capacity); > + > + amd_min_perf = READ_ONCE(cpudata->highest_perf); > + if (min_perf < capacity) > + amd_min_perf = DIV_ROUND_UP(amd_cap_perf * min_perf, capacity); > + > + if (amd_min_perf < lowest_nonlinear_perf) > + amd_min_perf = lowest_nonlinear_perf; > + > + amd_max_perf = amd_cap_perf; > + if (amd_max_perf < amd_min_perf) > + amd_max_perf = amd_min_perf; > + > + amd_des_perf = clamp_t(unsigned long, amd_des_perf, > + amd_min_perf, amd_max_perf); > + > + amd_pstate_update(cpudata, amd_min_perf, amd_des_perf, > + amd_max_perf, true); > +} > + > static int amd_get_min_freq(struct amd_cpudata *cpudata) > { > struct cppc_perf_caps cppc_perf; > @@ -311,6 +346,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) > /* It will be updated by governor */ > policy->cur = policy->cpuinfo.min_freq; > > + policy->fast_switch_possible = true; > + > ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], > FREQ_QOS_MIN, policy->cpuinfo.min_freq); > if (ret < 0) { > @@ -360,6 +397,7 @@ static struct cpufreq_driver amd_pstate_driver = { > .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, > .verify = amd_pstate_verify, > .target = amd_pstate_target, > + .adjust_perf = amd_pstate_adjust_perf, > .init = amd_pstate_cpu_init, > .exit = amd_pstate_cpu_exit, > .name = "amd-pstate", >
On 10/29/21 8:02 AM, Huang Rui wrote: > Introduce the fast switch function for amd-pstate on the AMD processors > which support the full MSR register control. It's able to decrease the > lattency on interrupt context. > > Signed-off-by: Huang Rui <ray.huang@amd.com> > --- > drivers/cpufreq/amd-pstate.c | 38 ++++++++++++++++++++++++++++++++++++ > 1 file changed, 38 insertions(+) > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index a400861c7fdc..55ff03f85608 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -191,6 +191,41 @@ static int amd_pstate_target(struct cpufreq_policy *policy, > return 0; > } > > +static void amd_pstate_adjust_perf(unsigned int cpu, > + unsigned long min_perf, > + unsigned long target_perf, > + unsigned long capacity) > +{ > + unsigned long amd_max_perf, amd_min_perf, amd_des_perf, > + amd_cap_perf, lowest_nonlinear_perf; You could drop the amd_ prefix to these local variables. -Nathan > + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); > + struct amd_cpudata *cpudata = policy->driver_data; > + > + amd_cap_perf = READ_ONCE(cpudata->highest_perf); > + lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); > + > + if (target_perf < capacity) > + amd_des_perf = DIV_ROUND_UP(amd_cap_perf * target_perf, > + capacity); > + > + amd_min_perf = READ_ONCE(cpudata->highest_perf); > + if (min_perf < capacity) > + amd_min_perf = DIV_ROUND_UP(amd_cap_perf * min_perf, capacity); > + > + if (amd_min_perf < lowest_nonlinear_perf) > + amd_min_perf = lowest_nonlinear_perf; > + > + amd_max_perf = amd_cap_perf; > + if (amd_max_perf < amd_min_perf) > + amd_max_perf = amd_min_perf; > + > + amd_des_perf = clamp_t(unsigned long, amd_des_perf, > + amd_min_perf, amd_max_perf); > + > + amd_pstate_update(cpudata, amd_min_perf, amd_des_perf, > + amd_max_perf, true); > +} > + > static int amd_get_min_freq(struct amd_cpudata *cpudata) > { > struct cppc_perf_caps cppc_perf; > @@ -311,6 +346,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) > /* It will be updated by governor */ > policy->cur = policy->cpuinfo.min_freq; > > + policy->fast_switch_possible = true; > + > ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], > FREQ_QOS_MIN, policy->cpuinfo.min_freq); > if (ret < 0) { > @@ -360,6 +397,7 @@ static struct cpufreq_driver amd_pstate_driver = { > .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, > .verify = amd_pstate_verify, > .target = amd_pstate_target, > + .adjust_perf = amd_pstate_adjust_perf, > .init = amd_pstate_cpu_init, > .exit = amd_pstate_cpu_exit, > .name = "amd-pstate", >
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index a400861c7fdc..55ff03f85608 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -191,6 +191,41 @@ static int amd_pstate_target(struct cpufreq_policy *policy, return 0; } +static void amd_pstate_adjust_perf(unsigned int cpu, + unsigned long min_perf, + unsigned long target_perf, + unsigned long capacity) +{ + unsigned long amd_max_perf, amd_min_perf, amd_des_perf, + amd_cap_perf, lowest_nonlinear_perf; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + struct amd_cpudata *cpudata = policy->driver_data; + + amd_cap_perf = READ_ONCE(cpudata->highest_perf); + lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); + + if (target_perf < capacity) + amd_des_perf = DIV_ROUND_UP(amd_cap_perf * target_perf, + capacity); + + amd_min_perf = READ_ONCE(cpudata->highest_perf); + if (min_perf < capacity) + amd_min_perf = DIV_ROUND_UP(amd_cap_perf * min_perf, capacity); + + if (amd_min_perf < lowest_nonlinear_perf) + amd_min_perf = lowest_nonlinear_perf; + + amd_max_perf = amd_cap_perf; + if (amd_max_perf < amd_min_perf) + amd_max_perf = amd_min_perf; + + amd_des_perf = clamp_t(unsigned long, amd_des_perf, + amd_min_perf, amd_max_perf); + + amd_pstate_update(cpudata, amd_min_perf, amd_des_perf, + amd_max_perf, true); +} + static int amd_get_min_freq(struct amd_cpudata *cpudata) { struct cppc_perf_caps cppc_perf; @@ -311,6 +346,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy) /* It will be updated by governor */ policy->cur = policy->cpuinfo.min_freq; + policy->fast_switch_possible = true; + ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], FREQ_QOS_MIN, policy->cpuinfo.min_freq); if (ret < 0) { @@ -360,6 +397,7 @@ static struct cpufreq_driver amd_pstate_driver = { .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, .verify = amd_pstate_verify, .target = amd_pstate_target, + .adjust_perf = amd_pstate_adjust_perf, .init = amd_pstate_cpu_init, .exit = amd_pstate_cpu_exit, .name = "amd-pstate",
Introduce the fast switch function for amd-pstate on the AMD processors which support the full MSR register control. It's able to decrease the lattency on interrupt context. Signed-off-by: Huang Rui <ray.huang@amd.com> --- drivers/cpufreq/amd-pstate.c | 38 ++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+)