diff mbox series

[14/20] x86/mce/amd: Unify AMD DFR handler with MCA Polling

Message ID 20231118193248.1296798-15-yazen.ghannam@amd.com (mailing list archive)
State Handled Elsewhere
Headers show
Series MCA Updates | expand

Commit Message

Yazen Ghannam Nov. 18, 2023, 7:32 p.m. UTC
AMD systems optionally support a Deferred error interrupt. The interrupt
should be used as another signal to trigger MCA polling. This is similar
to how other MCA interrupts are handled.

Deferred errors do not require any special handling related to the
interrupt, e.g. resetting or rearming the interrupt, etc.

However, Scalable MCA systems include a pair of registers, MCA_DESTAT
and MCA_DEADDR, that should be checked for valid errors. This check
should be done whenever MCA registers are polled. Currently, the
Deferred error interrupt does this check, but the MCA polling function
does not.

Call the MCA polling function when handling the Deferred error
interrupt. This keeps all "polling" cases in a common function.

Add a "SMCA DFR handler" for Deferred errors to the AMD vendor-specific
error handler callback. This will do the same status check, register
clearing, and logging that the interrupt handler has done. And it
extends the common polling flow to find AMD Deferred errors.

Give a common name for the AMD MCA interrupts handler now that both
interrupt sources are handled in a unified function.

Remove old code whose functionality is already covered in the common MCA
code.

Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
---
 arch/x86/kernel/cpu/mce/amd.c  | 122 +++++++++------------------------
 arch/x86/kernel/cpu/mce/core.c |  16 ++++-
 2 files changed, 48 insertions(+), 90 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 8735a8b9b7cc..b45ee297cde2 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -325,8 +325,7 @@  static DEFINE_PER_CPU(u64, bank_map);
 /* Map of banks that have more than MCA_MISC0 available. */
 static DEFINE_PER_CPU(u64, smca_misc_banks_map);
 
-static void amd_threshold_interrupt(void);
-static void amd_deferred_error_interrupt(void);
+static void amd_mca_interrupt(void);
 
 static void default_deferred_error_interrupt(void)
 {
@@ -595,7 +594,7 @@  static void enable_deferred_error_interrupt(u64 mca_intr_cfg)
 	if (setup_APIC_eilvt(dfr_offset, DEFERRED_ERROR_VECTOR, APIC_EILVT_MSG_FIX, 0))
 		return;
 
-	deferred_error_int_vector = amd_deferred_error_interrupt;
+	deferred_error_int_vector = amd_mca_interrupt;
 
 	if (mce_flags.smca)
 		return;
@@ -874,33 +873,6 @@  bool amd_mce_usable_address(struct mce *m)
 	return false;
 }
 
-static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
-{
-	struct mce m;
-
-	mce_setup(&m);
-
-	m.status = status;
-	m.misc   = misc;
-	m.bank   = bank;
-	m.tsc	 = rdtsc();
-
-	if (m.status & MCI_STATUS_ADDRV) {
-		m.addr = addr;
-
-		smca_extract_err_addr(&m);
-	}
-
-	if (mce_flags.smca) {
-		rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);
-
-		if (m.status & MCI_STATUS_SYNDV)
-			rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
-	}
-
-	mce_log(&m);
-}
-
 DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error)
 {
 	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
@@ -910,75 +882,46 @@  DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error)
 	apic_eoi();
 }
 
-/*
- * Returns true if the logged error is deferred. False, otherwise.
- */
-static inline bool
-_log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
-{
-	u64 status, addr = 0;
-
-	rdmsrl(msr_stat, status);
-	if (!(status & MCI_STATUS_VAL))
-		return false;
-
-	if (status & MCI_STATUS_ADDRV)
-		rdmsrl(msr_addr, addr);
-
-	__log_error(bank, status, addr, misc);
-
-	wrmsrl(msr_stat, 0);
-
-	return status & MCI_STATUS_DEFERRED;
-}
-
-static bool _log_error_deferred(unsigned int bank, u32 misc)
-{
-	if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS),
-			     mca_msr_reg(bank, MCA_ADDR), misc))
-		return false;
-
-	/*
-	 * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers.
-	 * Return true here to avoid accessing these registers.
-	 */
-	if (!mce_flags.smca)
-		return true;
-
-	/* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */
-	wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
-	return true;
-}
-
 /*
  * We have three scenarios for checking for Deferred errors:
  *
  * 1) Non-SMCA systems check MCA_STATUS and log error if found.
+ *    This is already handled in machine_check_poll().
  * 2) SMCA systems check MCA_STATUS. If error is found then log it and also
  *    clear MCA_DESTAT.
  * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and
  *    log it.
  */
-static void log_error_deferred(unsigned int bank)
+static void handle_smca_dfr_error(struct mce *m)
 {
-	if (_log_error_deferred(bank, 0))
+	struct mce m_dfr;
+	u64 mca_destat;
+
+	/* Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. */
+	if (!mce_flags.smca)
 		return;
 
-	/*
-	 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
-	 * for a valid error.
-	 */
-	_log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank),
-			      MSR_AMD64_SMCA_MCx_DEADDR(bank), 0);
-}
+	/* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */
+	if (m->status & MCI_STATUS_DEFERRED)
+		goto out;
 
-/* APIC interrupt handler for deferred errors */
-static void amd_deferred_error_interrupt(void)
-{
-	unsigned int bank;
+	/* MCA_STATUS didn't have a deferred error, so check MCA_DESTAT for one. */
+	mca_destat = mce_rdmsrl(MSR_AMD64_SMCA_MCx_DESTAT(m->bank));
 
-	for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank)
-		log_error_deferred(bank);
+	if (!(mca_destat & MCI_STATUS_VAL))
+		return;
+
+	/* Reuse the same data collected from machine_check_poll(). */
+	memcpy(&m_dfr, m, sizeof(m_dfr));
+
+	/* Save the MCA_DE{STAT,ADDR} values. */
+	m_dfr.status = mca_destat;
+	m_dfr.addr = mce_rdmsrl(MSR_AMD64_SMCA_MCx_DEADDR(m_dfr.bank));
+
+	mce_log(&m_dfr);
+
+out:
+	wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(m->bank), 0);
 }
 
 static void reset_block(struct threshold_block *block)
@@ -1028,10 +971,10 @@  static void reset_thr_blocks(unsigned int bank)
 }
 
 /*
- * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt
- * goes off when error_count reaches threshold_limit.
+ * The same procedure should be used when checking MCA banks in non-urgent
+ * situations, e.g. polling and interrupts.
  */
-static void amd_threshold_interrupt(void)
+static void amd_mca_interrupt(void)
 {
 	/* Check all banks for now. This could be optimized in the future. */
 	machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
@@ -1040,6 +983,7 @@  static void amd_threshold_interrupt(void)
 void amd_handle_error(struct mce *m)
 {
 	reset_thr_blocks(m->bank);
+	handle_smca_dfr_error(m);
 }
 
 /*
@@ -1514,6 +1458,6 @@  int mce_threshold_create_device(unsigned int cpu)
 	this_cpu_write(threshold_banks, bp);
 
 	if (thresholding_irq_en)
-		mce_threshold_vector = amd_threshold_interrupt;
+		mce_threshold_vector = amd_mca_interrupt;
 	return 0;
 }
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 040dc226c6a5..a81c0df217e2 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -663,6 +663,14 @@  static void vendor_handle_error(struct mce *m)
 
 DEFINE_PER_CPU(unsigned, mce_poll_count);
 
+static bool smca_destat_is_valid(unsigned int bank)
+{
+	if (!mce_flags.smca)
+		return false;
+
+	return mce_rdmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank)) & MCI_STATUS_VAL;
+}
+
 /*
  * Poll for corrected events or events that happened before reset.
  * Those are just logged through /dev/mcelog.
@@ -704,8 +712,14 @@  bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 		m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
 
 		/* If this entry is not valid, ignore it */
-		if (!(m.status & MCI_STATUS_VAL))
+		if (!(m.status & MCI_STATUS_VAL)) {
+			if (smca_destat_is_valid(i)) {
+				mce_read_aux(&m, i);
+				goto clear_it;
+			}
+
 			continue;
+		}
 
 		/*
 		 * If we are logging everything (at CPU online) or this