@@ -252,5 +252,10 @@ static inline void gic_arch_enable_irqs(void)
WARN_ON_ONCE(true);
}
+static inline bool gic_has_relaxed_pmr_sync(void)
+{
+ return false;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */
@@ -190,5 +190,10 @@ static inline void gic_arch_enable_irqs(void)
asm volatile ("msr daifclr, #3" : : : "memory");
}
+static inline bool gic_has_relaxed_pmr_sync(void)
+{
+ return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
@@ -11,6 +11,8 @@
#include <linux/kasan-checks.h>
+#include <asm/alternative-macros.h>
+
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) asm volatile(__nops(n))
@@ -41,10 +43,11 @@
#ifdef CONFIG_ARM64_PSEUDO_NMI
#define pmr_sync() \
do { \
- extern struct static_key_false gic_pmr_sync; \
- \
- if (static_branch_unlikely(&gic_pmr_sync)) \
- dsb(sy); \
+ asm volatile( \
+ ALTERNATIVE_CB("dsb sy", \
+ ARM64_HAS_GIC_PRIO_RELAXED_SYNC, \
+ alt_cb_patch_nops) \
+ ); \
} while(0)
#else
#define pmr_sync() do {} while (0)
@@ -2056,6 +2056,34 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
return enable_pseudo_nmi;
}
+
+static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ /*
+ * If we're not using priority masking then we won't be poking PMR_EL1,
+ * and there's no need to relax synchronization of writes to it, and
+ * ICC_CTLR_EL1 might not be accessible and we must avoid reads from
+ * that.
+ *
+ * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU
+ * feature, so will be detected earlier.
+ */
+ BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING);
+ if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING))
+ return false;
+
+ /*
+ * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a
+ * hint for interrupt distribution, a DSB is not necessary when
+ * unmasking IRQs via PMR, and we can relax the barrier to a NOP.
+ *
+ * Linux itself doesn't use 1:N distribution, so has no need to
+ * set PMHE. The only reason to have it set is if EL3 requires it
+ * (and we can't change it).
+ */
+ return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0;
+}
#endif
#ifdef CONFIG_ARM64_BTI
@@ -2546,6 +2574,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = can_use_gic_priorities,
},
+ {
+ /*
+ * Depends on ARM64_HAS_GIC_PRIO_MASKING
+ */
+ .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = has_gic_prio_relaxed_sync,
+ },
#endif
#ifdef CONFIG_ARM64_E0PD
{
@@ -311,13 +311,16 @@ alternative_else_nop_endif
.endif
#ifdef CONFIG_ARM64_PSEUDO_NMI
- /* Save pmr */
-alternative_if ARM64_HAS_GIC_PRIO_MASKING
+alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
+ b .Lskip_pmr_save\@
+alternative_else_nop_endif
+
mrs_s x20, SYS_ICC_PMR_EL1
str x20, [sp, #S_PMR_SAVE]
mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
msr_s SYS_ICC_PMR_EL1, x20
-alternative_else_nop_endif
+
+.Lskip_pmr_save\@:
#endif
/*
@@ -336,15 +339,19 @@ alternative_else_nop_endif
.endif
#ifdef CONFIG_ARM64_PSEUDO_NMI
- /* Restore pmr */
-alternative_if ARM64_HAS_GIC_PRIO_MASKING
+alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
+ b .Lskip_pmr_restore\@
+alternative_else_nop_endif
+
ldr x20, [sp, #S_PMR_SAVE]
msr_s SYS_ICC_PMR_EL1, x20
- mrs_s x21, SYS_ICC_CTLR_EL1
- tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
- dsb sy // Ensure priority change is seen by redistributor
-.L__skip_pmr_sync\@:
+
+ /* Ensure priority change is seen by redistributor */
+alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC
+ dsb sy
alternative_else_nop_endif
+
+.Lskip_pmr_restore\@:
#endif
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
@@ -67,9 +67,7 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors);
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
-/* Static key checked in pmr_sync(). */
#ifdef CONFIG_ARM64_PSEUDO_NMI
-KVM_NVHE_ALIAS(gic_pmr_sync);
/* Static key checked in GIC_PRIO_IRQOFF. */
KVM_NVHE_ALIAS(gic_nonsecure_priorities);
#endif
@@ -30,6 +30,7 @@ HAS_GENERIC_AUTH_ARCH_QARMA5
HAS_GENERIC_AUTH_IMP_DEF
HAS_GIC_CPUIF_SYSREGS
HAS_GIC_PRIO_MASKING
+HAS_GIC_PRIO_RELAXED_SYNC
HAS_LDAPR
HAS_LSE_ATOMICS
HAS_NO_FPSIMD
@@ -89,15 +89,6 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
*/
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
-/*
- * Global static key controlling whether an update to PMR allowing more
- * interrupts requires to be propagated to the redistributor (DSB SY).
- * And this needs to be exported for modules to be able to enable
- * interrupts...
- */
-DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
-EXPORT_SYMBOL(gic_pmr_sync);
-
DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
EXPORT_SYMBOL(gic_nonsecure_priorities);
@@ -1768,16 +1759,8 @@ static void gic_enable_nmi_support(void)
for (i = 0; i < gic_data.ppi_nr; i++)
refcount_set(&ppi_nmi_refs[i], 0);
- /*
- * Linux itself doesn't use 1:N distribution, so has no need to
- * set PMHE. The only reason to have it set is if EL3 requires it
- * (and we can't change it).
- */
- if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
- static_branch_enable(&gic_pmr_sync);
-
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
- static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
+ gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
/*
* How priority values are used by the GIC depends on two things: