@@ -56,29 +56,33 @@ do { \
#endif /* CONFIG_PREEMPT */
-#ifdef CONFIG_PREEMPT_COUNT
-
-#define preempt_disable() \
+#define preempt_disable_strict() \
do { \
inc_preempt_count(); \
barrier(); \
} while (0)
-#define sched_preempt_enable_no_resched() \
+#define __atomic_end() \
do { \
barrier(); \
dec_preempt_count(); \
} while (0)
-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-
-#define preempt_enable() \
+#define preempt_enable_strict() \
do { \
- preempt_enable_no_resched(); \
+ __atomic_end(); \
barrier(); \
preempt_check_resched(); \
} while (0)
+#ifdef CONFIG_PREEMPT_COUNT
+
+#define preempt_disable() preempt_disable_strict()
+#define preempt_enable() preempt_enable_strict()
+
+#define sched_preempt_enable_no_resched() __atomic_end()
+#define preempt_enable_no_resched() __atomic_end()
+
/* For debugging and tracer internals only! */
#define add_preempt_count_notrace(val) \
do { preempt_count() += (val); } while (0)
Add preempt_disable_strict and preempt_enable_strict functions that can be used to demarcate atomic sections for which we would like to enforce -even on non-PREEMPT builds with CONFIG_DEBUG_ATOMIC_SLEEP disabled- that sleeping is not allowed. The rationale is that in some cases, the risk of data corruption is high while the likelihood of immediate detection is low, e.g., when using the NEON unit in kernel mode on arm64. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- include/linux/preempt.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-)