diff mbox series

[5/5] platform/x86/intel/ifs: Add an entry rendezvous for SAF

Message ID 20240125082254.424859-6-ashok.raj@intel.com (mailing list archive)
State Accepted, archived
Headers show
Series Miscelleanous fixes and improvements to Infield Scan | expand

Commit Message

Ashok Raj Jan. 25, 2024, 8:22 a.m. UTC
The activation for SAF includes a parameter to make microcode wait for both
threads to join. It's preferable to perform an entry rendezvous before
the activation to ensure that they start the `wrmsr` close enough to each
other. In some cases it has been observed that one of the threads might be
just a bit late to arrive. An entry rendezvous reduces the likelihood of
these cases occurring.

Add an entry rendezvous to ensure the activation on both threads happen
close enough to each other.

Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
---
 drivers/platform/x86/intel/ifs/runtest.c | 48 +++++++++++++-----------
 1 file changed, 26 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index e3307dd8e3c4..95b4b71fab53 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -140,6 +140,29 @@  static bool can_restart(union ifs_status status)
 	return false;
 }
 
+#define SPINUNIT 100 /* 100 nsec */
+static atomic_t array_cpus_in;
+static atomic_t scan_cpus_in;
+
+/*
+ * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
+ */
+static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
+{
+	int cpu = smp_processor_id();
+	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+	int all_cpus = cpumask_weight(smt_mask);
+
+	atomic_inc(t);
+	while (atomic_read(t) < all_cpus) {
+		if (timeout < SPINUNIT)
+			return;
+		ndelay(SPINUNIT);
+		timeout -= SPINUNIT;
+		touch_nmi_watchdog();
+	}
+}
+
 /*
  * Execute the scan. Called "simultaneously" on all threads of a core
  * at high priority using the stop_cpus mechanism.
@@ -165,6 +188,8 @@  static int doscan(void *data)
 	/* Only the first logical CPU on a core reports result */
 	first = cpumask_first(cpu_smt_mask(cpu));
 
+	wait_for_sibling_cpu(&scan_cpus_in, NSEC_PER_SEC);
+
 	/*
 	 * This WRMSR will wait for other HT threads to also write
 	 * to this MSR (at most for activate.delay cycles). Then it
@@ -230,6 +255,7 @@  static void ifs_test_core(int cpu, struct device *dev)
 		}
 
 		params.activate = &activate;
+		atomic_set(&scan_cpus_in, 0);
 		stop_core_cpuslocked(cpu, doscan, &params);
 
 		status = params.status;
@@ -270,28 +296,6 @@  static void ifs_test_core(int cpu, struct device *dev)
 	}
 }
 
-#define SPINUNIT 100 /* 100 nsec */
-static atomic_t array_cpus_in;
-
-/*
- * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus()
- */
-static void wait_for_sibling_cpu(atomic_t *t, long long timeout)
-{
-	int cpu = smp_processor_id();
-	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
-	int all_cpus = cpumask_weight(smt_mask);
-
-	atomic_inc(t);
-	while (atomic_read(t) < all_cpus) {
-		if (timeout < SPINUNIT)
-			return;
-		ndelay(SPINUNIT);
-		timeout -= SPINUNIT;
-		touch_nmi_watchdog();
-	}
-}
-
 static int do_array_test(void *data)
 {
 	union ifs_array *command = data;