===================================================================
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/energy_model.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
@@ -938,6 +939,12 @@ static struct freq_attr *hwp_cpufreq_att
NULL,
};
+enum hybrid_cpu_type {
+ HYBRID_PCORE = 0,
+ HYBRID_ECORE,
+ HYBRID_NR_TYPES
+};
+
static struct cpudata *hybrid_max_perf_cpu __read_mostly;
/*
* Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
@@ -945,6 +952,86 @@ static struct cpudata *hybrid_max_perf_c
*/
static DEFINE_MUTEX(hybrid_capacity_lock);
+#ifdef CONFIG_ENERGY_MODEL
+struct hybrid_em_perf_domain {
+ cpumask_t cpumask;
+ struct device *dev;
+ struct em_data_callback cb;
+};
+
+static int hybrid_pcore_cost(struct device *dev, unsigned long freq,
+ unsigned long *cost)
+{
+ /*
+ * The number used here needs to be higher than the analogous
+ * one in hybrid_ecore_cost() below. The units and the actual
+ * values don't matter.
+ */
+ *cost = 2;
+ return 0;
+}
+
+static int hybrid_ecore_cost(struct device *dev, unsigned long freq,
+ unsigned long *cost)
+{
+ *cost = 1;
+ return 0;
+}
+
+static struct hybrid_em_perf_domain perf_domains[HYBRID_NR_TYPES] = {
+ [HYBRID_PCORE] = { .cb.get_cost = hybrid_pcore_cost, },
+ [HYBRID_ECORE] = { .cb.get_cost = hybrid_ecore_cost, }
+};
+
+static bool hybrid_register_perf_domain(struct hybrid_em_perf_domain *pd)
+{
+ /*
+ * Registering EM perf domains without asymmetric CPU capacity
+ * support enabled is wasteful, so don't do that.
+ */
+ if (!hybrid_max_perf_cpu)
+ return false;
+
+ pd->dev = get_cpu_device(cpumask_first(&pd->cpumask));
+ if (!pd->dev)
+ return false;
+
+ if (em_dev_register_perf_domain(pd->dev, 1, &pd->cb, &pd->cpumask, false)) {
+ pd->dev = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+static void hybrid_register_all_perf_domains(void)
+{
+ enum hybrid_cpu_type type;
+
+ for (type = HYBRID_PCORE; type < HYBRID_NR_TYPES; type++)
+ hybrid_register_perf_domain(&perf_domains[type]);
+}
+
+static void hybrid_add_to_perf_domain(int cpu, enum hybrid_cpu_type type)
+{
+ struct hybrid_em_perf_domain *pd = &perf_domains[type];
+
+ guard(mutex)(&hybrid_capacity_lock);
+
+ if (cpumask_test_cpu(cpu, &pd->cpumask))
+ return;
+
+ cpumask_set_cpu(cpu, &pd->cpumask);
+ if (pd->dev)
+ em_dev_expand_perf_domain(pd->dev, cpu);
+ else if (hybrid_register_perf_domain(pd))
+ em_rebuild_perf_domains();
+}
+#else /* CONFIG_ENERGY_MODEL */
+static inline void hybrid_register_all_perf_domains(void) {}
+static inline void hybrid_add_to_perf_domain(int cpu, enum hybrid_cpu_type type) {}
+#endif /* !CONFIG_ENERGY_MODEL */
+
static void hybrid_set_cpu_capacity(struct cpudata *cpu)
{
arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
@@ -1034,11 +1121,14 @@ static void __hybrid_refresh_cpu_capacit
hybrid_update_cpu_capacity_scaling();
}
-static void hybrid_refresh_cpu_capacity_scaling(void)
+static void hybrid_refresh_cpu_capacity_scaling(bool register_perf_domains)
{
guard(mutex)(&hybrid_capacity_lock);
__hybrid_refresh_cpu_capacity_scaling();
+
+ if (register_perf_domains)
+ hybrid_register_all_perf_domains();
}
static void hybrid_init_cpu_capacity_scaling(bool refresh)
@@ -1049,7 +1139,7 @@ static void hybrid_init_cpu_capacity_sca
* operation mode.
*/
if (refresh) {
- hybrid_refresh_cpu_capacity_scaling();
+ hybrid_refresh_cpu_capacity_scaling(false);
return;
}
@@ -1059,10 +1149,14 @@ static void hybrid_init_cpu_capacity_sca
* do not do that when SMT is in use.
*/
if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
- hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Perf domains are not registered before setting hybrid_max_perf_cpu,
+ * so register them all after setting up CPU capacity scaling.
+ */
+ hybrid_refresh_cpu_capacity_scaling(true);
/*
* Disabling ITMT causes sched domains to be rebuilt to disable asym
- * packing and enable asym capacity.
+ * packing and enable asym capacity and EAS.
*/
sched_clear_itmt_support();
}
@@ -2215,12 +2309,16 @@ static int hwp_get_cpu_scaling(int cpu)
smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
/* P-cores have a smaller perf level-to-freqency scaling factor. */
- if (cpu_type == 0x40)
+ if (cpu_type == 0x40) {
+ hybrid_add_to_perf_domain(cpu, HYBRID_PCORE);
return hybrid_scaling_factor;
+ }
/* Use default core scaling for E-cores */
- if (cpu_type == 0x20)
+ if (cpu_type == 0x20) {
+ hybrid_add_to_perf_domain(cpu, HYBRID_ECORE);
return core_get_scaling();
+ }
/*
* If reached here, this system is either non-hybrid (like Tiger