@@ -800,6 +800,12 @@ static __always_inline bool system_uses_irq_prio_masking(void)
return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
}
+static __always_inline bool system_uses_nmi(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_NMI) &&
+ alternative_has_cap_likely(ARM64_USES_NMI);
+}
+
static inline bool system_supports_mte(void)
{
return alternative_has_cap_unlikely(ARM64_MTE);
@@ -85,6 +85,7 @@
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
+#include <asm/nmi.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
@@ -291,6 +292,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_NMI_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
@@ -1076,9 +1078,11 @@ static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
}
-#ifdef CONFIG_ARM64_PSEUDO_NMI
+#if IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) || IS_ENABLED(CONFIG_ARM64_NMI)
static bool enable_pseudo_nmi;
+#endif
+#ifdef CONFIG_ARM64_PSEUDO_NMI
static int __init early_enable_pseudo_nmi(char *p)
{
return kstrtobool(p, &enable_pseudo_nmi);
@@ -2263,6 +2267,41 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry
}
#endif
+#ifdef CONFIG_ARM64_NMI
+static bool use_nmi(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ if (!has_cpuid_feature(entry, scope))
+ return false;
+
+ /*
+ * Having both real and pseudo NMIs enabled simultaneously is
+ * likely to cause confusion. Since pseudo NMIs must be
+ * enabled with an explicit command line option, if the user
+ * has set that option on a system with real NMIs for some
+ * reason assume they know what they're doing.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) {
+ pr_info("Pseudo NMI enabled, not using architected NMI\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void nmi_enable(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * Enable use of NMIs controlled by ALLINT, SPINTMASK should
+ * be clear by default but make it explicit that we are using
+ * this mode. Ensure that ALLINT is clear first in order to
+ * avoid leaving things masked.
+ */
+ _allint_clear();
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPINTMASK, SCTLR_EL1_NMI);
+ isb();
+}
+#endif
+
#ifdef CONFIG_ARM64_BTI
static void bti_enable(const struct arm64_cpu_capabilities *__unused)
{
@@ -2861,6 +2900,23 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_nv1,
ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1)
},
+#ifdef CONFIG_ARM64_NMI
+ {
+ .desc = "Non-maskable Interrupts present",
+ .capability = ARM64_HAS_NMI,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, NMI, IMP)
+ },
+ {
+ .desc = "Non-maskable Interrupts enabled",
+ .capability = ARM64_USES_NMI,
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+ .matches = use_nmi,
+ .cpu_enable = nmi_enable,
+ ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, NMI, IMP)
+ },
+#endif
{},
};
@@ -43,6 +43,7 @@ HAS_LPA2
HAS_LSE_ATOMICS
HAS_MOPS
HAS_NESTED_VIRT
+HAS_NMI
HAS_PAN
HAS_S1PIE
HAS_RAS_EXTN
@@ -71,6 +72,7 @@ SPECTRE_BHB
SSBS
SVE
UNMAP_KERNEL_AT_EL0
+USES_NMI
WORKAROUND_834220
WORKAROUND_843419
WORKAROUND_845719