diff mbox series

[V10,3/7] cpufreq: amd-pstate: Enable amd-pstate preferred core supporting.

Message ID 20231030063403.3502816-4-li.meng@amd.com (mailing list archive)
State Superseded, archived
Headers show
Series amd-pstate preferred core | expand

Commit Message

Meng, Li (Jassmine) Oct. 30, 2023, 6:33 a.m. UTC
amd-pstate driver utilizes the functions and data structures
provided by the ITMT architecture to enable the scheduler to
favor scheduling on cores which can be get a higher frequency
with lower voltage. We call it amd-pstate preferrred core.

Here sched_set_itmt_core_prio() is called to set priorities and
sched_set_itmt_support() is called to enable ITMT feature.
amd-pstate driver uses the highest performance value to indicate
the priority of CPU. The higher value has a higher priority.

The initial core rankings are set up by amd-pstate when the
system boots.

Add a variable hw_prefcore in cpudata structure. It will check
if the processor and power firmware support preferred core
feature.

Add one new early parameter `disable` to allow user to disable
the preferred core.

Only when hardware supports preferred core and user set `enabled`
in early parameter, amd pstate driver supports preferred core featue.

Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Wyes Karny <wyes.karny@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
Co-developed-by: Perry Yuan <Perry.Yuan@amd.com>
Signed-off-by: Perry Yuan <Perry.Yuan@amd.com>
Signed-off-by: Meng Li <li.meng@amd.com>
---
 drivers/cpufreq/amd-pstate.c | 141 +++++++++++++++++++++++++++++++----
 include/linux/amd-pstate.h   |   4 +
 2 files changed, 129 insertions(+), 16 deletions(-)

Comments

Yuan, Perry Nov. 2, 2023, 9:05 a.m. UTC | #1
[AMD Official Use Only - General]

Hi meng Li,


> -----Original Message-----
> From: Meng, Li (Jassmine) <Li.Meng@amd.com>
> Sent: Monday, October 30, 2023 2:34 PM
> To: Rafael J . Wysocki <rafael.j.wysocki@intel.com>; Huang, Ray
> <Ray.Huang@amd.com>
> Cc: linux-pm@vger.kernel.org; linux-kernel@vger.kernel.org; x86@kernel.org;
> linux-acpi@vger.kernel.org; Shuah Khan <skhan@linuxfoundation.org>; linux-
> kselftest@vger.kernel.org; Fontenot, Nathan <Nathan.Fontenot@amd.com>;
> Sharma, Deepak <Deepak.Sharma@amd.com>; Deucher, Alexander
> <Alexander.Deucher@amd.com>; Limonciello, Mario
> <Mario.Limonciello@amd.com>; Huang, Shimmer
> <Shimmer.Huang@amd.com>; Yuan, Perry <Perry.Yuan@amd.com>; Du,
> Xiaojian <Xiaojian.Du@amd.com>; Viresh Kumar <viresh.kumar@linaro.org>;
> Borislav Petkov <bp@alien8.de>; Oleksandr Natalenko
> <oleksandr@natalenko.name>; Meng, Li (Jassmine) <Li.Meng@amd.com>;
> Karny, Wyes <Wyes.Karny@amd.com>
> Subject: [PATCH V10 3/7] cpufreq: amd-pstate: Enable amd-pstate preferred
> core supporting.
>
> amd-pstate driver utilizes the functions and data structures provided by the
> ITMT architecture to enable the scheduler to favor scheduling on cores which
> can be get a higher frequency with lower voltage. We call it amd-pstate
> preferrred core.
>
> Here sched_set_itmt_core_prio() is called to set priorities and
> sched_set_itmt_support() is called to enable ITMT feature.
> amd-pstate driver uses the highest performance value to indicate the priority
> of CPU. The higher value has a higher priority.
>
> The initial core rankings are set up by amd-pstate when the system boots.
>
> Add a variable hw_prefcore in cpudata structure. It will check if the processor
> and power firmware support preferred core feature.
>
> Add one new early parameter `disable` to allow user to disable the preferred
> core.
>
> Only when hardware supports preferred core and user set `enabled` in early
> parameter, amd pstate driver supports preferred core featue.
>
> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
> Reviewed-by: Huang Rui <ray.huang@amd.com>
> Reviewed-by: Wyes Karny <wyes.karny@amd.com>
> Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
> Co-developed-by: Perry Yuan <Perry.Yuan@amd.com>
> Signed-off-by: Perry Yuan <Perry.Yuan@amd.com>
> Signed-off-by: Meng Li <li.meng@amd.com>
> ---
>  drivers/cpufreq/amd-pstate.c | 141
> +++++++++++++++++++++++++++++++----
>  include/linux/amd-pstate.h   |   4 +
>  2 files changed, 129 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
> index 9a1e194d5cf8..2033e5e70017 100644
> --- a/drivers/cpufreq/amd-pstate.c
> +++ b/drivers/cpufreq/amd-pstate.c
> @@ -37,6 +37,7 @@
>  #include <linux/uaccess.h>
>  #include <linux/static_call.h>
>  #include <linux/amd-pstate.h>
> +#include <linux/topology.h>
>
>  #include <acpi/processor.h>
>  #include <acpi/cppc_acpi.h>
> @@ -49,6 +50,7 @@
>
>  #define AMD_PSTATE_TRANSITION_LATENCY        20000
>  #define AMD_PSTATE_TRANSITION_DELAY  1000
> +#define AMD_PSTATE_PREFCORE_THRESHOLD        166
>
>  /*
>   * TODO: We need more time to fine tune processors with shared memory
> solution @@ -64,6 +66,7 @@ static struct cpufreq_driver amd_pstate_driver;
> static struct cpufreq_driver amd_pstate_epp_driver;  static int cppc_state =
> AMD_PSTATE_UNDEFINED;  static bool cppc_enabled;
> +static bool amd_pstate_prefcore = true;
>
>  /*
>   * AMD Energy Preference Performance (EPP) @@ -290,23 +293,21 @@
> static inline int amd_pstate_enable(bool enable)  static int
> pstate_init_perf(struct amd_cpudata *cpudata)  {
>       u64 cap1;
> -     u32 highest_perf;
>
>       int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
>                                    &cap1);
>       if (ret)
>               return ret;
>
> -     /*
> -      * TODO: Introduce AMD specific power feature.
> -      *
> -      * CPPC entry doesn't indicate the highest performance in some ASICs.
> +     /* For platforms that do not support the preferred core feature, the
> +      * highest_pef may be configured with 166 or 255, to avoid max
> frequency
> +      * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1)
> value as
> +      * the default max perf.
>        */
> -     highest_perf = amd_get_highest_perf();
> -     if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
> -             highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
> -
> -     WRITE_ONCE(cpudata->highest_perf, highest_perf);
> +     if (cpudata->hw_prefcore)
> +             WRITE_ONCE(cpudata->highest_perf,
> AMD_PSTATE_PREFCORE_THRESHOLD);
> +     else
> +             WRITE_ONCE(cpudata->highest_perf,
> AMD_CPPC_HIGHEST_PERF(cap1));
>
>       WRITE_ONCE(cpudata->nominal_perf,
> AMD_CPPC_NOMINAL_PERF(cap1));
>       WRITE_ONCE(cpudata->lowest_nonlinear_perf,
> AMD_CPPC_LOWNONLIN_PERF(cap1)); @@ -318,17 +319,15 @@ static int
> pstate_init_perf(struct amd_cpudata *cpudata)  static int
> cppc_init_perf(struct amd_cpudata *cpudata)  {
>       struct cppc_perf_caps cppc_perf;
> -     u32 highest_perf;
>
>       int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
>       if (ret)
>               return ret;
>
> -     highest_perf = amd_get_highest_perf();
> -     if (highest_perf > cppc_perf.highest_perf)
> -             highest_perf = cppc_perf.highest_perf;
> -
> -     WRITE_ONCE(cpudata->highest_perf, highest_perf);
> +     if (cpudata->hw_prefcore)
> +             WRITE_ONCE(cpudata->highest_perf,
> AMD_PSTATE_PREFCORE_THRESHOLD);
> +     else
> +             WRITE_ONCE(cpudata->highest_perf,
> cppc_perf.highest_perf);
>
>       WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
>       WRITE_ONCE(cpudata->lowest_nonlinear_perf,
> @@ -676,6 +675,80 @@ static void amd_perf_ctl_reset(unsigned int cpu)
>       wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);  }
>
> +/*
> + * Set amd-pstate preferred core enable can't be done directly from
> +cpufreq callbacks
> + * due to locking, so queue the work for later.
> + */
> +static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
> +{
> +     sched_set_itmt_support();
> +}
> +static DECLARE_WORK(sched_prefcore_work,
> +amd_pstste_sched_prefcore_workfn);
> +
> +/*
> + * Get the highest performance register value.
> + * @cpu: CPU from which to get highest performance.
> + * @highest_perf: Return address.
> + *
> + * Return: 0 for success, -EIO otherwise.
> + */
> +static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) {
> +     int ret;
> +
> +     if (boot_cpu_has(X86_FEATURE_CPPC)) {
> +             u64 cap1;
> +
> +             ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1,
> &cap1);
> +             if (ret)
> +                     return ret;
> +             WRITE_ONCE(*highest_perf,
> AMD_CPPC_HIGHEST_PERF(cap1));
> +     } else {
> +             u64 cppc_highest_perf;
> +
> +             ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
> +             if (ret)
> +                     return ret;
> +             WRITE_ONCE(*highest_perf, cppc_highest_perf);
> +     }
> +
> +     return (ret);
> +}
> +
> +#define CPPC_MAX_PERF        U8_MAX
> +
> +static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) {
> +     int ret, prio;
> +     u32 highest_perf;
> +
> +     ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
> +     if (ret)
> +             return;
> +
> +     cpudata->hw_prefcore = true;
> +     /* check if CPPC preferred core feature is enabled*/
> +     if (highest_perf < CPPC_MAX_PERF)
> +             prio = (int)highest_perf;
> +     else {
> +             pr_debug("AMD CPPC preferred core is unsupported!\n");
> +             cpudata->hw_prefcore = false;
> +             return;
> +     }
> +
> +     if (!amd_pstate_prefcore)
> +             return;

Move the feature state check to earlier before you call amd_pstate_get_highest_perf().
If the feature is not enabled, you have no need to read highest perf.


> +
> +     /*
> +      * The priorities can be set regardless of whether or not
> +      * sched_set_itmt_support(true) has been called and it is valid to
> +      * update them at any time after it has been called.
> +      */
> +     sched_set_itmt_core_prio(prio, cpudata->cpu);
> +
> +     schedule_work(&sched_prefcore_work);
> +}
> +
>  static int amd_pstate_cpu_init(struct cpufreq_policy *policy)  {
>       int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
> @@ -697,6 +770,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy
> *policy)
>
>       cpudata->cpu = policy->cpu;
>
> +     amd_pstate_init_prefcore(cpudata);
> +
>       ret = amd_pstate_init_perf(cpudata);
>       if (ret)
>               goto free_cpudata1;
> @@ -845,6 +920,17 @@ static ssize_t show_amd_pstate_highest_perf(struct
> cpufreq_policy *policy,
>       return sysfs_emit(buf, "%u\n", perf);
>  }
>
> +static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
> +                                        char *buf)
> +{
> +     bool hw_prefcore;
> +     struct amd_cpudata *cpudata = policy->driver_data;
> +
> +     hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
> +
> +     return sysfs_emit(buf, "%s\n", hw_prefcore ? "supported" :
> +"unsupported"); }

Replace the string with str_enabled_disabled() function that is defined as below.

include/linux/string_helpers.h

static inline const char *str_enabled_disabled(bool v)
{
        return v ? "enabled" : "disabled";
}

Perry.

> +
>  static ssize_t show_energy_performance_available_preferences(
>                               struct cpufreq_policy *policy, char *buf)
> { @@ -1037,18 +1123,27 @@ static ssize_t status_store(struct device *a,
> struct device_attribute *b,
>       return ret < 0 ? ret : count;
>  }
>
> +static ssize_t prefcore_show(struct device *dev,
> +                          struct device_attribute *attr, char *buf) {
> +     return sysfs_emit(buf, "%s\n", amd_pstate_prefcore ? "enabled" :
> +"disabled"); }

Replace the string with str_enabled_disabled() function that is defined as below.

include/linux/string_helpers.h


> +
>  cpufreq_freq_attr_ro(amd_pstate_max_freq);
>  cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
>
>  cpufreq_freq_attr_ro(amd_pstate_highest_perf);
> +cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
>  cpufreq_freq_attr_rw(energy_performance_preference);
>  cpufreq_freq_attr_ro(energy_performance_available_preferences);
>  static DEVICE_ATTR_RW(status);
> +static DEVICE_ATTR_RO(prefcore);
>
>  static struct freq_attr *amd_pstate_attr[] = {
>       &amd_pstate_max_freq,
>       &amd_pstate_lowest_nonlinear_freq,
>       &amd_pstate_highest_perf,
> +     &amd_pstate_hw_prefcore,
>       NULL,
>  };
>
> @@ -1056,6 +1151,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
>       &amd_pstate_max_freq,
>       &amd_pstate_lowest_nonlinear_freq,
>       &amd_pstate_highest_perf,
> +     &amd_pstate_hw_prefcore,
>       &energy_performance_preference,
>       &energy_performance_available_preferences,
>       NULL,
> @@ -1063,6 +1159,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = {
>
>  static struct attribute *pstate_global_attributes[] = {
>       &dev_attr_status.attr,
> +     &dev_attr_prefcore.attr,
>       NULL
>  };
>
> @@ -1114,6 +1211,8 @@ static int amd_pstate_epp_cpu_init(struct
> cpufreq_policy *policy)
>       cpudata->cpu = policy->cpu;
>       cpudata->epp_policy = 0;
>
> +     amd_pstate_init_prefcore(cpudata);
> +
>       ret = amd_pstate_init_perf(cpudata);
>       if (ret)
>               goto free_cpudata1;
> @@ -1527,7 +1626,17 @@ static int __init amd_pstate_param(char *str)
>
>       return amd_pstate_set_driver(mode_idx);  }
> +
> +static int __init amd_prefcore_param(char *str) {
> +     if (!strcmp(str, "disable"))
> +             amd_pstate_prefcore = false;
> +
> +     return 0;
> +}
> +
>  early_param("amd_pstate", amd_pstate_param);
> +early_param("amd_prefcore", amd_prefcore_param);
>
>  MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
> MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); diff --git
> a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index
> 446394f84606..87e140e9e6db 100644
> --- a/include/linux/amd-pstate.h
> +++ b/include/linux/amd-pstate.h
> @@ -52,6 +52,9 @@ struct amd_aperf_mperf {
>   * @prev: Last Aperf/Mperf/tsc count value read from register
>   * @freq: current cpu frequency value
>   * @boost_supported: check whether the Processor or SBIOS supports boost
> mode
> + * @hw_prefcore: check whether HW supports preferred core featue.
> + *             Only when hw_prefcore and early prefcore param are true,
> + *             AMD P-State driver supports preferred core featue.
>   * @epp_policy: Last saved policy used to set energy-performance preference
>   * @epp_cached: Cached CPPC energy-performance preference value
>   * @policy: Cpufreq policy value
> @@ -81,6 +84,7 @@ struct amd_cpudata {
>
>       u64     freq;
>       bool    boost_supported;
> +     bool    hw_prefcore;
>
>       /* EPP feature related attributes*/
>       s16     epp_policy;
> --
> 2.34.1
Meng, Li (Jassmine) Nov. 6, 2023, 8:15 a.m. UTC | #2
[AMD Official Use Only - General]

Hi perry:

> -----Original Message-----
> From: Yuan, Perry <Perry.Yuan@amd.com>
> Sent: Thursday, November 2, 2023 5:06 PM
> To: Meng, Li (Jassmine) <Li.Meng@amd.com>; Rafael J . Wysocki
> <rafael.j.wysocki@intel.com>; Huang, Ray <Ray.Huang@amd.com>
> Cc: linux-pm@vger.kernel.org; linux-kernel@vger.kernel.org;
> x86@kernel.org; linux-acpi@vger.kernel.org; Shuah Khan
> <skhan@linuxfoundation.org>; linux-kselftest@vger.kernel.org; Fontenot,
> Nathan <Nathan.Fontenot@amd.com>; Sharma, Deepak
> <Deepak.Sharma@amd.com>; Deucher, Alexander
> <Alexander.Deucher@amd.com>; Limonciello, Mario
> <Mario.Limonciello@amd.com>; Huang, Shimmer
> <Shimmer.Huang@amd.com>; Du, Xiaojian <Xiaojian.Du@amd.com>; Viresh
> Kumar <viresh.kumar@linaro.org>; Borislav Petkov <bp@alien8.de>;
> Oleksandr Natalenko <oleksandr@natalenko.name>; Karny, Wyes
> <Wyes.Karny@amd.com>
> Subject: RE: [PATCH V10 3/7] cpufreq: amd-pstate: Enable amd-pstate
> preferred core supporting.
>
> [AMD Official Use Only - General]
>
> Hi meng Li,
>
>
> > -----Original Message-----
> > From: Meng, Li (Jassmine) <Li.Meng@amd.com>
> > Sent: Monday, October 30, 2023 2:34 PM
> > To: Rafael J . Wysocki <rafael.j.wysocki@intel.com>; Huang, Ray
> > <Ray.Huang@amd.com>
> > Cc: linux-pm@vger.kernel.org; linux-kernel@vger.kernel.org;
> > x86@kernel.org; linux-acpi@vger.kernel.org; Shuah Khan
> > <skhan@linuxfoundation.org>; linux- kselftest@vger.kernel.org;
> > Fontenot, Nathan <Nathan.Fontenot@amd.com>; Sharma, Deepak
> > <Deepak.Sharma@amd.com>; Deucher, Alexander
> > <Alexander.Deucher@amd.com>; Limonciello, Mario
> > <Mario.Limonciello@amd.com>; Huang, Shimmer
> <Shimmer.Huang@amd.com>;
> > Yuan, Perry <Perry.Yuan@amd.com>; Du, Xiaojian
> <Xiaojian.Du@amd.com>;
> > Viresh Kumar <viresh.kumar@linaro.org>; Borislav Petkov
> > <bp@alien8.de>; Oleksandr Natalenko <oleksandr@natalenko.name>;
> Meng,
> > Li (Jassmine) <Li.Meng@amd.com>; Karny, Wyes <Wyes.Karny@amd.com>
> > Subject: [PATCH V10 3/7] cpufreq: amd-pstate: Enable amd-pstate
> > preferred core supporting.
> >
> > amd-pstate driver utilizes the functions and data structures provided
> > by the ITMT architecture to enable the scheduler to favor scheduling
> > on cores which can be get a higher frequency with lower voltage. We
> > call it amd-pstate preferrred core.
> >
> > Here sched_set_itmt_core_prio() is called to set priorities and
> > sched_set_itmt_support() is called to enable ITMT feature.
> > amd-pstate driver uses the highest performance value to indicate the
> > priority of CPU. The higher value has a higher priority.
> >
> > The initial core rankings are set up by amd-pstate when the system boots.
> >
> > Add a variable hw_prefcore in cpudata structure. It will check if the
> > processor and power firmware support preferred core feature.
> >
> > Add one new early parameter `disable` to allow user to disable the
> > preferred core.
> >
> > Only when hardware supports preferred core and user set `enabled` in
> > early parameter, amd pstate driver supports preferred core featue.
> >
> > Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
> > Reviewed-by: Huang Rui <ray.huang@amd.com>
> > Reviewed-by: Wyes Karny <wyes.karny@amd.com>
> > Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
> > Co-developed-by: Perry Yuan <Perry.Yuan@amd.com>
> > Signed-off-by: Perry Yuan <Perry.Yuan@amd.com>
> > Signed-off-by: Meng Li <li.meng@amd.com>
> > ---
> >  drivers/cpufreq/amd-pstate.c | 141
> > +++++++++++++++++++++++++++++++----
> >  include/linux/amd-pstate.h   |   4 +
> >  2 files changed, 129 insertions(+), 16 deletions(-)
> >
> > diff --git a/drivers/cpufreq/amd-pstate.c
> > b/drivers/cpufreq/amd-pstate.c index 9a1e194d5cf8..2033e5e70017 100644
> > --- a/drivers/cpufreq/amd-pstate.c
> > +++ b/drivers/cpufreq/amd-pstate.c
> > @@ -37,6 +37,7 @@
> >  #include <linux/uaccess.h>
> >  #include <linux/static_call.h>
> >  #include <linux/amd-pstate.h>
> > +#include <linux/topology.h>
> >
> >  #include <acpi/processor.h>
> >  #include <acpi/cppc_acpi.h>
> > @@ -49,6 +50,7 @@
> >
> >  #define AMD_PSTATE_TRANSITION_LATENCY        20000
> >  #define AMD_PSTATE_TRANSITION_DELAY  1000
> > +#define AMD_PSTATE_PREFCORE_THRESHOLD        166
> >
> >  /*
> >   * TODO: We need more time to fine tune processors with shared memory
> > solution @@ -64,6 +66,7 @@ static struct cpufreq_driver
> > amd_pstate_driver; static struct cpufreq_driver amd_pstate_epp_driver;
> > static int cppc_state = AMD_PSTATE_UNDEFINED;  static bool
> > cppc_enabled;
> > +static bool amd_pstate_prefcore = true;
> >
> >  /*
> >   * AMD Energy Preference Performance (EPP) @@ -290,23 +293,21 @@
> > static inline int amd_pstate_enable(bool enable)  static int
> > pstate_init_perf(struct amd_cpudata *cpudata)  {
> >       u64 cap1;
> > -     u32 highest_perf;
> >
> >       int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
> >                                    &cap1);
> >       if (ret)
> >               return ret;
> >
> > -     /*
> > -      * TODO: Introduce AMD specific power feature.
> > -      *
> > -      * CPPC entry doesn't indicate the highest performance in some ASICs.
> > +     /* For platforms that do not support the preferred core feature, the
> > +      * highest_pef may be configured with 166 or 255, to avoid max
> > frequency
> > +      * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1)
> > value as
> > +      * the default max perf.
> >        */
> > -     highest_perf = amd_get_highest_perf();
> > -     if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
> > -             highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
> > -
> > -     WRITE_ONCE(cpudata->highest_perf, highest_perf);
> > +     if (cpudata->hw_prefcore)
> > +             WRITE_ONCE(cpudata->highest_perf,
> > AMD_PSTATE_PREFCORE_THRESHOLD);
> > +     else
> > +             WRITE_ONCE(cpudata->highest_perf,
> > AMD_CPPC_HIGHEST_PERF(cap1));
> >
> >       WRITE_ONCE(cpudata->nominal_perf,
> AMD_CPPC_NOMINAL_PERF(cap1));
> >       WRITE_ONCE(cpudata->lowest_nonlinear_perf,
> > AMD_CPPC_LOWNONLIN_PERF(cap1)); @@ -318,17 +319,15 @@ static int
> > pstate_init_perf(struct amd_cpudata *cpudata)  static int
> > cppc_init_perf(struct amd_cpudata *cpudata)  {
> >       struct cppc_perf_caps cppc_perf;
> > -     u32 highest_perf;
> >
> >       int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
> >       if (ret)
> >               return ret;
> >
> > -     highest_perf = amd_get_highest_perf();
> > -     if (highest_perf > cppc_perf.highest_perf)
> > -             highest_perf = cppc_perf.highest_perf;
> > -
> > -     WRITE_ONCE(cpudata->highest_perf, highest_perf);
> > +     if (cpudata->hw_prefcore)
> > +             WRITE_ONCE(cpudata->highest_perf,
> > AMD_PSTATE_PREFCORE_THRESHOLD);
> > +     else
> > +             WRITE_ONCE(cpudata->highest_perf,
> > cppc_perf.highest_perf);
> >
> >       WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
> >       WRITE_ONCE(cpudata->lowest_nonlinear_perf,
> > @@ -676,6 +675,80 @@ static void amd_perf_ctl_reset(unsigned int cpu)
> >       wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);  }
> >
> > +/*
> > + * Set amd-pstate preferred core enable can't be done directly from
> > +cpufreq callbacks
> > + * due to locking, so queue the work for later.
> > + */
> > +static void amd_pstste_sched_prefcore_workfn(struct work_struct
> > +*work) {
> > +     sched_set_itmt_support();
> > +}
> > +static DECLARE_WORK(sched_prefcore_work,
> > +amd_pstste_sched_prefcore_workfn);
> > +
> > +/*
> > + * Get the highest performance register value.
> > + * @cpu: CPU from which to get highest performance.
> > + * @highest_perf: Return address.
> > + *
> > + * Return: 0 for success, -EIO otherwise.
> > + */
> > +static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf) {
> > +     int ret;
> > +
> > +     if (boot_cpu_has(X86_FEATURE_CPPC)) {
> > +             u64 cap1;
> > +
> > +             ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1,
> > &cap1);
> > +             if (ret)
> > +                     return ret;
> > +             WRITE_ONCE(*highest_perf,
> > AMD_CPPC_HIGHEST_PERF(cap1));
> > +     } else {
> > +             u64 cppc_highest_perf;
> > +
> > +             ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
> > +             if (ret)
> > +                     return ret;
> > +             WRITE_ONCE(*highest_perf, cppc_highest_perf);
> > +     }
> > +
> > +     return (ret);
> > +}
> > +
> > +#define CPPC_MAX_PERF        U8_MAX
> > +
> > +static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) {
> > +     int ret, prio;
> > +     u32 highest_perf;
> > +
> > +     ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
> > +     if (ret)
> > +             return;
> > +
> > +     cpudata->hw_prefcore = true;
> > +     /* check if CPPC preferred core feature is enabled*/
> > +     if (highest_perf < CPPC_MAX_PERF)
> > +             prio = (int)highest_perf;
> > +     else {
> > +             pr_debug("AMD CPPC preferred core is unsupported!\n");
> > +             cpudata->hw_prefcore = false;
> > +             return;
> > +     }
> > +
> > +     if (!amd_pstate_prefcore)
> > +             return;
>
> Move the feature state check to earlier before you call
> amd_pstate_get_highest_perf().
> If the feature is not enabled, you have no need to read highest perf.
>
[Meng, Li (Jassmine)] I need to set hw_prefcore by highest_perf.
Therefore, the judgment condition “amd_pstate_prefcore” cannot be moved forward.
>
> > +
> > +     /*
> > +      * The priorities can be set regardless of whether or not
> > +      * sched_set_itmt_support(true) has been called and it is valid to
> > +      * update them at any time after it has been called.
> > +      */
> > +     sched_set_itmt_core_prio(prio, cpudata->cpu);
> > +
> > +     schedule_work(&sched_prefcore_work);
> > +}
> > +
> >  static int amd_pstate_cpu_init(struct cpufreq_policy *policy)  {
> >       int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq,
> > ret; @@ -697,6 +770,8 @@ static int amd_pstate_cpu_init(struct
> > cpufreq_policy
> > *policy)
> >
> >       cpudata->cpu = policy->cpu;
> >
> > +     amd_pstate_init_prefcore(cpudata);
> > +
> >       ret = amd_pstate_init_perf(cpudata);
> >       if (ret)
> >               goto free_cpudata1;
> > @@ -845,6 +920,17 @@ static ssize_t
> > show_amd_pstate_highest_perf(struct
> > cpufreq_policy *policy,
> >       return sysfs_emit(buf, "%u\n", perf);  }
> >
> > +static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy
> *policy,
> > +                                        char *buf) {
> > +     bool hw_prefcore;
> > +     struct amd_cpudata *cpudata = policy->driver_data;
> > +
> > +     hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
> > +
> > +     return sysfs_emit(buf, "%s\n", hw_prefcore ? "supported" :
> > +"unsupported"); }
>
> Replace the string with str_enabled_disabled() function that is defined as
> below.
>
> include/linux/string_helpers.h
>
> static inline const char *str_enabled_disabled(bool v) {
>         return v ? "enabled" : "disabled"; }
>
> Perry.
>
> > +
> >  static ssize_t show_energy_performance_available_preferences(
> >                               struct cpufreq_policy *policy, char
> > *buf) { @@ -1037,18 +1123,27 @@ static ssize_t status_store(struct
> > device *a, struct device_attribute *b,
> >       return ret < 0 ? ret : count;
> >  }
> >
> > +static ssize_t prefcore_show(struct device *dev,
> > +                          struct device_attribute *attr, char *buf) {
> > +     return sysfs_emit(buf, "%s\n", amd_pstate_prefcore ? "enabled" :
> > +"disabled"); }
>
> Replace the string with str_enabled_disabled() function that is defined as
> below.
>
> include/linux/string_helpers.h
>
>
> > +
> >  cpufreq_freq_attr_ro(amd_pstate_max_freq);
> >  cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
> >
> >  cpufreq_freq_attr_ro(amd_pstate_highest_perf);
> > +cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
> >  cpufreq_freq_attr_rw(energy_performance_preference);
> >  cpufreq_freq_attr_ro(energy_performance_available_preferences);
> >  static DEVICE_ATTR_RW(status);
> > +static DEVICE_ATTR_RO(prefcore);
> >
> >  static struct freq_attr *amd_pstate_attr[] = {
> >       &amd_pstate_max_freq,
> >       &amd_pstate_lowest_nonlinear_freq,
> >       &amd_pstate_highest_perf,
> > +     &amd_pstate_hw_prefcore,
> >       NULL,
> >  };
> >
> > @@ -1056,6 +1151,7 @@ static struct freq_attr *amd_pstate_epp_attr[] =
> {
> >       &amd_pstate_max_freq,
> >       &amd_pstate_lowest_nonlinear_freq,
> >       &amd_pstate_highest_perf,
> > +     &amd_pstate_hw_prefcore,
> >       &energy_performance_preference,
> >       &energy_performance_available_preferences,
> >       NULL,
> > @@ -1063,6 +1159,7 @@ static struct freq_attr *amd_pstate_epp_attr[] =
> > {
> >
> >  static struct attribute *pstate_global_attributes[] = {
> >       &dev_attr_status.attr,
> > +     &dev_attr_prefcore.attr,
> >       NULL
> >  };
> >
> > @@ -1114,6 +1211,8 @@ static int amd_pstate_epp_cpu_init(struct
> > cpufreq_policy *policy)
> >       cpudata->cpu = policy->cpu;
> >       cpudata->epp_policy = 0;
> >
> > +     amd_pstate_init_prefcore(cpudata);
> > +
> >       ret = amd_pstate_init_perf(cpudata);
> >       if (ret)
> >               goto free_cpudata1;
> > @@ -1527,7 +1626,17 @@ static int __init amd_pstate_param(char *str)
> >
> >       return amd_pstate_set_driver(mode_idx);  }
> > +
> > +static int __init amd_prefcore_param(char *str) {
> > +     if (!strcmp(str, "disable"))
> > +             amd_pstate_prefcore = false;
> > +
> > +     return 0;
> > +}
> > +
> >  early_param("amd_pstate", amd_pstate_param);
> > +early_param("amd_prefcore", amd_prefcore_param);
> >
> >  MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
> > MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); diff
> > --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index
> > 446394f84606..87e140e9e6db 100644
> > --- a/include/linux/amd-pstate.h
> > +++ b/include/linux/amd-pstate.h
> > @@ -52,6 +52,9 @@ struct amd_aperf_mperf {
> >   * @prev: Last Aperf/Mperf/tsc count value read from register
> >   * @freq: current cpu frequency value
> >   * @boost_supported: check whether the Processor or SBIOS supports
> > boost mode
> > + * @hw_prefcore: check whether HW supports preferred core featue.
> > + *             Only when hw_prefcore and early prefcore param are true,
> > + *             AMD P-State driver supports preferred core featue.
> >   * @epp_policy: Last saved policy used to set energy-performance
> preference
> >   * @epp_cached: Cached CPPC energy-performance preference value
> >   * @policy: Cpufreq policy value
> > @@ -81,6 +84,7 @@ struct amd_cpudata {
> >
> >       u64     freq;
> >       bool    boost_supported;
> > +     bool    hw_prefcore;
> >
> >       /* EPP feature related attributes*/
> >       s16     epp_policy;
> > --
> > 2.34.1
>
diff mbox series

Patch

diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9a1e194d5cf8..2033e5e70017 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -37,6 +37,7 @@ 
 #include <linux/uaccess.h>
 #include <linux/static_call.h>
 #include <linux/amd-pstate.h>
+#include <linux/topology.h>
 
 #include <acpi/processor.h>
 #include <acpi/cppc_acpi.h>
@@ -49,6 +50,7 @@ 
 
 #define AMD_PSTATE_TRANSITION_LATENCY	20000
 #define AMD_PSTATE_TRANSITION_DELAY	1000
+#define AMD_PSTATE_PREFCORE_THRESHOLD	166
 
 /*
  * TODO: We need more time to fine tune processors with shared memory solution
@@ -64,6 +66,7 @@  static struct cpufreq_driver amd_pstate_driver;
 static struct cpufreq_driver amd_pstate_epp_driver;
 static int cppc_state = AMD_PSTATE_UNDEFINED;
 static bool cppc_enabled;
+static bool amd_pstate_prefcore = true;
 
 /*
  * AMD Energy Preference Performance (EPP)
@@ -290,23 +293,21 @@  static inline int amd_pstate_enable(bool enable)
 static int pstate_init_perf(struct amd_cpudata *cpudata)
 {
 	u64 cap1;
-	u32 highest_perf;
 
 	int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
 				     &cap1);
 	if (ret)
 		return ret;
 
-	/*
-	 * TODO: Introduce AMD specific power feature.
-	 *
-	 * CPPC entry doesn't indicate the highest performance in some ASICs.
+	/* For platforms that do not support the preferred core feature, the
+	 * highest_pef may be configured with 166 or 255, to avoid max frequency
+	 * calculated wrongly. we take the AMD_CPPC_HIGHEST_PERF(cap1) value as
+	 * the default max perf.
 	 */
-	highest_perf = amd_get_highest_perf();
-	if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
-		highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
-
-	WRITE_ONCE(cpudata->highest_perf, highest_perf);
+	if (cpudata->hw_prefcore)
+		WRITE_ONCE(cpudata->highest_perf, AMD_PSTATE_PREFCORE_THRESHOLD);
+	else
+		WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
 
 	WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
 	WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
@@ -318,17 +319,15 @@  static int pstate_init_perf(struct amd_cpudata *cpudata)
 static int cppc_init_perf(struct amd_cpudata *cpudata)
 {
 	struct cppc_perf_caps cppc_perf;
-	u32 highest_perf;
 
 	int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
 	if (ret)
 		return ret;
 
-	highest_perf = amd_get_highest_perf();
-	if (highest_perf > cppc_perf.highest_perf)
-		highest_perf = cppc_perf.highest_perf;
-
-	WRITE_ONCE(cpudata->highest_perf, highest_perf);
+	if (cpudata->hw_prefcore)
+		WRITE_ONCE(cpudata->highest_perf, AMD_PSTATE_PREFCORE_THRESHOLD);
+	else
+		WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
 
 	WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
 	WRITE_ONCE(cpudata->lowest_nonlinear_perf,
@@ -676,6 +675,80 @@  static void amd_perf_ctl_reset(unsigned int cpu)
 	wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
 }
 
+/*
+ * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
+{
+	sched_set_itmt_support();
+}
+static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
+
+/*
+ * Get the highest performance register value.
+ * @cpu: CPU from which to get highest performance.
+ * @highest_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+static int amd_pstate_get_highest_perf(int cpu, u32 *highest_perf)
+{
+	int ret;
+
+	if (boot_cpu_has(X86_FEATURE_CPPC)) {
+		u64 cap1;
+
+		ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+		if (ret)
+			return ret;
+		WRITE_ONCE(*highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+	} else {
+		u64 cppc_highest_perf;
+
+		ret = cppc_get_highest_perf(cpu, &cppc_highest_perf);
+		if (ret)
+			return ret;
+		WRITE_ONCE(*highest_perf, cppc_highest_perf);
+	}
+
+	return (ret);
+}
+
+#define CPPC_MAX_PERF	U8_MAX
+
+static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
+{
+	int ret, prio;
+	u32 highest_perf;
+
+	ret = amd_pstate_get_highest_perf(cpudata->cpu, &highest_perf);
+	if (ret)
+		return;
+
+	cpudata->hw_prefcore = true;
+	/* check if CPPC preferred core feature is enabled*/
+	if (highest_perf < CPPC_MAX_PERF)
+		prio = (int)highest_perf;
+	else {
+		pr_debug("AMD CPPC preferred core is unsupported!\n");
+		cpudata->hw_prefcore = false;
+		return;
+	}
+
+	if (!amd_pstate_prefcore)
+		return;
+
+	/*
+	 * The priorities can be set regardless of whether or not
+	 * sched_set_itmt_support(true) has been called and it is valid to
+	 * update them at any time after it has been called.
+	 */
+	sched_set_itmt_core_prio(prio, cpudata->cpu);
+
+	schedule_work(&sched_prefcore_work);
+}
+
 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
 {
 	int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
@@ -697,6 +770,8 @@  static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
 
 	cpudata->cpu = policy->cpu;
 
+	amd_pstate_init_prefcore(cpudata);
+
 	ret = amd_pstate_init_perf(cpudata);
 	if (ret)
 		goto free_cpudata1;
@@ -845,6 +920,17 @@  static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
 	return sysfs_emit(buf, "%u\n", perf);
 }
 
+static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
+					   char *buf)
+{
+	bool hw_prefcore;
+	struct amd_cpudata *cpudata = policy->driver_data;
+
+	hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
+
+	return sysfs_emit(buf, "%s\n", hw_prefcore ? "supported" : "unsupported");
+}
+
 static ssize_t show_energy_performance_available_preferences(
 				struct cpufreq_policy *policy, char *buf)
 {
@@ -1037,18 +1123,27 @@  static ssize_t status_store(struct device *a, struct device_attribute *b,
 	return ret < 0 ? ret : count;
 }
 
+static ssize_t prefcore_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	return sysfs_emit(buf, "%s\n", amd_pstate_prefcore ? "enabled" : "disabled");
+}
+
 cpufreq_freq_attr_ro(amd_pstate_max_freq);
 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
 
 cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_ro(amd_pstate_hw_prefcore);
 cpufreq_freq_attr_rw(energy_performance_preference);
 cpufreq_freq_attr_ro(energy_performance_available_preferences);
 static DEVICE_ATTR_RW(status);
+static DEVICE_ATTR_RO(prefcore);
 
 static struct freq_attr *amd_pstate_attr[] = {
 	&amd_pstate_max_freq,
 	&amd_pstate_lowest_nonlinear_freq,
 	&amd_pstate_highest_perf,
+	&amd_pstate_hw_prefcore,
 	NULL,
 };
 
@@ -1056,6 +1151,7 @@  static struct freq_attr *amd_pstate_epp_attr[] = {
 	&amd_pstate_max_freq,
 	&amd_pstate_lowest_nonlinear_freq,
 	&amd_pstate_highest_perf,
+	&amd_pstate_hw_prefcore,
 	&energy_performance_preference,
 	&energy_performance_available_preferences,
 	NULL,
@@ -1063,6 +1159,7 @@  static struct freq_attr *amd_pstate_epp_attr[] = {
 
 static struct attribute *pstate_global_attributes[] = {
 	&dev_attr_status.attr,
+	&dev_attr_prefcore.attr,
 	NULL
 };
 
@@ -1114,6 +1211,8 @@  static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
 	cpudata->cpu = policy->cpu;
 	cpudata->epp_policy = 0;
 
+	amd_pstate_init_prefcore(cpudata);
+
 	ret = amd_pstate_init_perf(cpudata);
 	if (ret)
 		goto free_cpudata1;
@@ -1527,7 +1626,17 @@  static int __init amd_pstate_param(char *str)
 
 	return amd_pstate_set_driver(mode_idx);
 }
+
+static int __init amd_prefcore_param(char *str)
+{
+	if (!strcmp(str, "disable"))
+		amd_pstate_prefcore = false;
+
+	return 0;
+}
+
 early_param("amd_pstate", amd_pstate_param);
+early_param("amd_prefcore", amd_prefcore_param);
 
 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
index 446394f84606..87e140e9e6db 100644
--- a/include/linux/amd-pstate.h
+++ b/include/linux/amd-pstate.h
@@ -52,6 +52,9 @@  struct amd_aperf_mperf {
  * @prev: Last Aperf/Mperf/tsc count value read from register
  * @freq: current cpu frequency value
  * @boost_supported: check whether the Processor or SBIOS supports boost mode
+ * @hw_prefcore: check whether HW supports preferred core featue.
+ * 		  Only when hw_prefcore and early prefcore param are true,
+ * 		  AMD P-State driver supports preferred core featue.
  * @epp_policy: Last saved policy used to set energy-performance preference
  * @epp_cached: Cached CPPC energy-performance preference value
  * @policy: Cpufreq policy value
@@ -81,6 +84,7 @@  struct amd_cpudata {
 
 	u64	freq;
 	bool	boost_supported;
+	bool	hw_prefcore;
 
 	/* EPP feature related attributes*/
 	s16	epp_policy;