@@ -496,6 +496,7 @@ bool mce_usable_address(struct mce *m)
case X86_VENDOR_INTEL:
case X86_VENDOR_ZHAOXIN:
+ case X86_VENDOR_CENTAUR:
return intel_mce_usable_address(m);
default:
@@ -513,6 +514,7 @@ bool mce_is_memory_error(struct mce *m)
case X86_VENDOR_INTEL:
case X86_VENDOR_ZHAOXIN:
+ case X86_VENDOR_CENTAUR:
/*
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
*
@@ -1247,7 +1249,8 @@ static noinstr bool mce_check_crashing_cpu(void)
mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
- if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) {
if (mcgstatus & MCG_STATUS_LMCES)
return false;
}
@@ -1521,7 +1524,8 @@ noinstr void do_machine_check(struct pt_regs *regs)
* on Intel, Zhaoxin only.
*/
if (m.cpuvendor == X86_VENDOR_INTEL ||
- m.cpuvendor == X86_VENDOR_ZHAOXIN)
+ m.cpuvendor == X86_VENDOR_ZHAOXIN ||
+ m.cpuvendor == X86_VENDOR_CENTAUR)
lmce = m.mcgstatus & MCG_STATUS_LMCES;
/*
@@ -1970,6 +1974,18 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
}
}
+ if (c->x86_vendor == X86_VENDOR_CENTAUR) {
+ /*
+ * All newer Centaur CPUs support MCE broadcasting. Enable
+ * synchronization with a one second timeout.
+ */
+ if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
+ c->x86 > 6) {
+ if (cfg->monarch_timeout < 0)
+ cfg->monarch_timeout = USEC_PER_SEC;
+ }
+ }
+
if (cfg->monarch_timeout < 0)
cfg->monarch_timeout = 0;
if (cfg->bootlog != 0)
@@ -2012,21 +2028,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
}
}
-static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
-{
- struct mca_config *cfg = &mca_cfg;
-
- /*
- * All newer Centaur CPUs support MCE broadcasting. Enable
- * synchronization with a one second timeout.
- */
- if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
- c->x86 > 6) {
- if (cfg->monarch_timeout < 0)
- cfg->monarch_timeout = USEC_PER_SEC;
- }
-}
-
static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
{
struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
@@ -2072,9 +2073,6 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
break;
case X86_VENDOR_CENTAUR:
- mce_centaur_feature_init(c);
- break;
-
case X86_VENDOR_ZHAOXIN:
mce_zhaoxin_feature_init(c);
break;
@@ -2092,6 +2090,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
break;
case X86_VENDOR_ZHAOXIN:
+ case X86_VENDOR_CENTAUR:
mce_zhaoxin_feature_clear(c);
break;
@@ -2401,7 +2400,8 @@ static void vendor_disable_error_reporting(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
- boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
+ boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
return;
mce_disable_error_reporting();
@@ -88,7 +88,8 @@ static int cmci_supported(int *banks)
* makes sure none of the backdoors are entered otherwise.
*/
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
- boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
+ boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
return 0;
if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)