@@ -2,6 +2,7 @@ obj-y += amd_nonfatal.o
obj-y += mce_amd.o
obj-y += mcaction.o
obj-y += barrier.o
+obj-y += intel_nonfatal.o
obj-y += mctelem.o
obj-y += mce.o
obj-y += mce-apei.o
new file mode 100644
@@ -0,0 +1,83 @@
+/*
+ * Non Fatal Machine Check Exception Reporting
+ *
+ * (C) Copyright 2002 Dave Jones. <davej@codemonkey.org.uk>
+ *
+ * This file contains routines to check for non-fatal MCEs every 15s
+ *
+ */
+
+#include <xen/event.h>
+
+#include "mce.h"
+#include "vmce.h"
+
+static struct timer mce_timer;
+
+#define MCE_PERIOD MILLISECS(8000)
+#define MCE_PERIOD_MIN MILLISECS(2000)
+#define MCE_PERIOD_MAX MILLISECS(16000)
+
+static uint64_t period = MCE_PERIOD;
+static int adjust = 0;
+static int variable_period = 1;
+
+static void cf_check mce_checkregs(void *info)
+{
+ mctelem_cookie_t mctc;
+ struct mca_summary bs;
+ static uint64_t dumpcount = 0;
+
+ mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask),
+ &bs, NULL);
+
+ if (bs.errcnt && mctc != NULL) {
+ adjust++;
+
+ /* If Dom0 enabled the VIRQ_MCA event, then notify it.
+ * Otherwise, if dom0 has had plenty of time to register
+ * the virq handler but still hasn't then dump telemetry
+ * to the Xen console. The call count may be incremented
+ * on multiple cpus at once and is indicative only - just
+ * a simple-minded attempt to avoid spamming the console
+ * for corrected errors in early startup.
+ */
+
+ if (dom0_vmce_enabled()) {
+ mctelem_commit(mctc);
+ send_global_virq(VIRQ_MCA);
+ } else if (++dumpcount >= 10) {
+ x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
+ mctelem_dismiss(mctc);
+ } else {
+ mctelem_dismiss(mctc);
+ }
+ } else if (mctc != NULL) {
+ mctelem_dismiss(mctc);
+ }
+}
+
+static void cf_check mce_work_fn(void *data)
+{
+ on_each_cpu(mce_checkregs, NULL, 1);
+
+ if (variable_period) {
+ if (adjust)
+ period /= (adjust + 1);
+ else
+ period *= 2;
+ if (period > MCE_PERIOD_MAX)
+ period = MCE_PERIOD_MAX;
+ if (period < MCE_PERIOD_MIN)
+ period = MCE_PERIOD_MIN;
+ }
+
+ set_timer(&mce_timer, NOW() + period);
+ adjust = 0;
+}
+
+void __init intel_nonfatal_mcheck_init(struct cpuinfo_x86 *unused)
+{
+ init_timer(&mce_timer, mce_work_fn, NULL, 0);
+ set_timer(&mce_timer, NOW() + MCE_PERIOD);
+}
@@ -47,6 +47,7 @@ enum mcheck_type amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp);
enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c, bool bsp);
void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
+void intel_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
extern unsigned int firstbank;
extern unsigned int ppin_msr;
@@ -7,84 +7,7 @@
*
*/
-#include <xen/init.h>
-#include <xen/types.h>
-#include <xen/kernel.h>
-#include <xen/smp.h>
-#include <xen/timer.h>
-#include <xen/errno.h>
-#include <xen/event.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/msr.h>
-
#include "mce.h"
-#include "vmce.h"
-
-static struct timer mce_timer;
-
-#define MCE_PERIOD MILLISECS(8000)
-#define MCE_PERIOD_MIN MILLISECS(2000)
-#define MCE_PERIOD_MAX MILLISECS(16000)
-
-static uint64_t period = MCE_PERIOD;
-static int adjust = 0;
-static int variable_period = 1;
-
-static void cf_check mce_checkregs(void *info)
-{
- mctelem_cookie_t mctc;
- struct mca_summary bs;
- static uint64_t dumpcount = 0;
-
- mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask),
- &bs, NULL);
-
- if (bs.errcnt && mctc != NULL) {
- adjust++;
-
- /* If Dom0 enabled the VIRQ_MCA event, then notify it.
- * Otherwise, if dom0 has had plenty of time to register
- * the virq handler but still hasn't then dump telemetry
- * to the Xen console. The call count may be incremented
- * on multiple cpus at once and is indicative only - just
- * a simple-minded attempt to avoid spamming the console
- * for corrected errors in early startup.
- */
-
- if (dom0_vmce_enabled()) {
- mctelem_commit(mctc);
- send_global_virq(VIRQ_MCA);
- } else if (++dumpcount >= 10) {
- x86_mcinfo_dump((struct mc_info *)mctelem_dataptr(mctc));
- mctelem_dismiss(mctc);
- } else {
- mctelem_dismiss(mctc);
- }
- } else if (mctc != NULL) {
- mctelem_dismiss(mctc);
- }
-}
-
-static void cf_check mce_work_fn(void *data)
-{
- on_each_cpu(mce_checkregs, NULL, 1);
-
- if (variable_period) {
- if (adjust)
- period /= (adjust + 1);
- else
- period *= 2;
- if (period > MCE_PERIOD_MAX)
- period = MCE_PERIOD_MAX;
- if (period < MCE_PERIOD_MIN)
- period = MCE_PERIOD_MIN;
- }
-
- set_timer(&mce_timer, NOW() + period);
- adjust = 0;
-}
static int __init cf_check init_nonfatal_mce_checker(void)
{
@@ -106,13 +29,10 @@ static int __init cf_check init_nonfatal_mce_checker(void)
/* Assume we are on K8 or newer AMD or Hygon CPU here */
amd_nonfatal_mcheck_init(c);
break;
-
case X86_VENDOR_INTEL:
- init_timer(&mce_timer, mce_work_fn, NULL, 0);
- set_timer(&mce_timer, NOW() + MCE_PERIOD);
+ intel_nonfatal_mcheck_init(c);
break;
}
-
printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n");
return 0;
}
Separate Intel nonfatal MCE initialization code from generic MCE code, the same way it is done for AMD code. This is to be able to later make intel/amd MCE code optional in the build. Also clean up unused includes. No functional change intended. Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com> --- xen/arch/x86/cpu/mcheck/Makefile | 1 + xen/arch/x86/cpu/mcheck/intel_nonfatal.c | 83 ++++++++++++++++++++++++ xen/arch/x86/cpu/mcheck/mce.h | 1 + xen/arch/x86/cpu/mcheck/non-fatal.c | 82 +---------------------- 4 files changed, 86 insertions(+), 81 deletions(-) create mode 100644 xen/arch/x86/cpu/mcheck/intel_nonfatal.c