@@ -45,7 +45,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
* cmci_discover_lock protects against parallel discovery attempts
* which could race against each other.
*/
-static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
+DEFINE_RAW_SPINLOCK(cmci_discover_lock);
/*
* On systems that do support CMCI but it's disabled, polling for MCEs can
@@ -61,7 +61,7 @@ static DEFINE_SPINLOCK(cmci_poll_lock);
* MCi_CTL2 threshold for each bank when there is no storm.
* Default value for each bank may have been set by BIOS.
*/
-static u16 cmci_threshold[MAX_NR_BANKS];
+u16 cmci_threshold[MAX_NR_BANKS];
/*
* High threshold to limit CMCI rate during storms. Max supported is
@@ -73,7 +73,6 @@ static u16 cmci_threshold[MAX_NR_BANKS];
* to corrected errors, so keeping CMCI enabled means that uncorrected
* errors will still be processed in a timely fashion.
*/
-#define CMCI_STORM_THRESHOLD 32749
static int cmci_supported(int *banks)
{
@@ -7,7 +7,7 @@
#include <linux/device.h>
#include <asm/mce.h>
-
+#include <linux/spinlock.h>
enum severity_level {
MCE_NO_SEVERITY,
MCE_DEFERRED_SEVERITY,
@@ -334,11 +334,16 @@ static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
}
extern void (*mc_poll_banks)(void);
+#define CMCI_STORM_THRESHOLD 32749
+extern raw_spinlock_t cmci_discover_lock;
+extern u16 cmci_threshold[MAX_NR_BANKS];
#ifdef CONFIG_X86_MCE_ZHAOXIN
void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c);
void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c);
+void mce_zhaoxin_handle_storm(int bank, bool on);
#else
static inline void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { }
static inline void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c) { }
+static inline void mce_zhaoxin_handle_storm(int bank, bool on) { }
#endif
#endif /* __X86_MCE_INTERNAL_H__ */
@@ -63,6 +63,10 @@ static void mce_handle_storm(unsigned int bank, bool on)
case X86_VENDOR_INTEL:
mce_intel_handle_storm(bank, on);
break;
+ case X86_VENDOR_ZHAOXIN:
+ case X86_VENDOR_CENTAUR:
+ mce_zhaoxin_handle_storm(bank, on);
+ break;
}
}
@@ -33,3 +33,21 @@ void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
{
intel_clear_lmce();
}
+
+void mce_zhaoxin_handle_storm(int bank, bool on)
+{
+ unsigned long flags;
+ u64 val;
+
+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ if (on) {
+ val &= ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK);
+ val |= CMCI_STORM_THRESHOLD;
+ } else {
+ val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
+ val |= (MCI_CTL2_CMCI_EN | cmci_threshold[bank]);
+ }
+ wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}