diff mbox series

[12/15] cpufreq/amd-pstate: Always write EPP value when updating perf

Message ID 20241205222847.7889-13-mario.limonciello@amd.com (mailing list archive)
State Superseded, archived
Headers show
Series amd-pstate 6.14 cleanups and improvements | expand

Commit Message

Mario Limonciello Dec. 5, 2024, 10:28 p.m. UTC
For MSR systems the EPP value is in the same register as perf targets
and so divding them into two separate MSR writes is wasteful.

In msr_update_perf(), update both EPP and perf values in one write to
MSR_AMD_CPPC_REQ, and cache them if successful.

To accomplish this plumb the EPP value into the update_perf call and modify
all its callers to check the return value.

Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
 drivers/cpufreq/amd-pstate.c | 71 ++++++++++++++++++++++--------------
 1 file changed, 43 insertions(+), 28 deletions(-)
diff mbox series

Patch

diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 20de3a9fd992d..8598f50e18b58 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -222,25 +222,36 @@  static s16 shmem_get_epp(struct amd_cpudata *cpudata)
 }
 
 static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
-			       u32 des_perf, u32 max_perf, bool fast_switch)
+			   u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
 {
+	u64 value;
+
+	value = READ_ONCE(cpudata->cppc_req_cached);
 	if (fast_switch) {
 		wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
 		return 0;
+	} else {
+		int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+					READ_ONCE(cpudata->cppc_req_cached));
+		if (ret)
+			return ret;
 	}
 
-	return wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
-			     READ_ONCE(cpudata->cppc_req_cached));
+	WRITE_ONCE(cpudata->cppc_req_cached, value);
+	WRITE_ONCE(cpudata->epp_cached, epp);
+
+	return 0;
 }
 
 DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
 
 static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
 					  u32 min_perf, u32 des_perf,
-					  u32 max_perf, bool fast_switch)
+					  u32 max_perf, u32 epp,
+					  bool fast_switch)
 {
 	return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
-						   max_perf, fast_switch);
+						   max_perf, epp, fast_switch);
 }
 
 static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
@@ -459,12 +470,19 @@  static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
 	return static_call(amd_pstate_init_perf)(cpudata);
 }
 
-static int shmem_update_perf(struct amd_cpudata *cpudata,
-			     u32 min_perf, u32 des_perf,
-			     u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+			     u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
 {
 	struct cppc_perf_ctrls perf_ctrls;
 
+	if (cppc_state == AMD_PSTATE_ACTIVE) {
+		int ret = shmem_set_epp(cpudata, epp);
+
+		if (ret)
+			return ret;
+		WRITE_ONCE(cpudata->epp_cached, epp);
+	}
+
 	perf_ctrls.max_perf = max_perf;
 	perf_ctrls.min_perf = min_perf;
 	perf_ctrls.desired_perf = des_perf;
@@ -545,10 +563,10 @@  static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
 
 	WRITE_ONCE(cpudata->cppc_req_cached, value);
 
-	amd_pstate_update_perf(cpudata, min_perf, des_perf,
-			       max_perf, fast_switch);
+	amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
 
 cpufreq_policy_put:
+
 	cpufreq_cpu_put(policy);
 }
 
@@ -1547,6 +1565,7 @@  static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
 	u64 value;
+	u32 epp;
 
 	amd_pstate_update_min_max_limit(policy);
 
@@ -1559,23 +1578,19 @@  static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 	value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, cpudata->min_limit_perf);
 
 	if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
-		WRITE_ONCE(cpudata->epp_cached, 0);
-	value |= FIELD_PREP(AMD_PSTATE_EPP_PERF_MASK, cpudata->epp_cached);
-
-	WRITE_ONCE(cpudata->cppc_req_cached, value);
+		epp = 0;
+	else
+		epp = READ_ONCE(cpudata->epp_cached);
 
 	if (trace_amd_pstate_epp_perf_enabled()) {
-		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
-					  cpudata->epp_cached,
+		trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
 					  cpudata->min_limit_perf,
 					  cpudata->max_limit_perf,
 					  cpudata->boost_state);
 	}
 
-	amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
-			       cpudata->max_limit_perf, false);
-
-	return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
+	return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+				      cpudata->max_limit_perf, epp, false);
 }
 
 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1606,7 +1621,7 @@  static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
 	return 0;
 }
 
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+static int amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
 {
 	u64 max_perf;
 	int ret;
@@ -1624,17 +1639,19 @@  static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
 					  max_perf, cpudata->boost_state);
 	}
 
-	amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
-	amd_pstate_set_epp(cpudata, cpudata->epp_cached);
+	return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
 }
 
 static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
 {
 	struct amd_cpudata *cpudata = policy->driver_data;
+	int ret;
 
 	pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
 
-	amd_pstate_epp_reenable(cpudata);
+	ret = amd_pstate_epp_reenable(cpudata);
+	if (ret)
+		return ret;
 	cpudata->suspended = false;
 
 	return 0;
@@ -1658,10 +1675,8 @@  static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
 					  min_perf, min_perf, cpudata->boost_state);
 	}
 
-	amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
-	amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
-
-	return 0;
+	return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
+				      AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
 }
 
 static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)