diff mbox series

[19/47] headers: Add tasklet_disable_in_atomic()

Message ID 20211019214320.2035704-20-hauke@hauke-m.de (mailing list archive)
State New, archived
Headers show
Series backports: Update to kernel 5.15-rc6 | expand

Commit Message

Hauke Mehrtens Oct. 19, 2021, 9:42 p.m. UTC
The tasklet_disable_in_atomic() function was added in kernel 5.13 and is
used by ath9k now.
The code was copied from the upstream kernel.

Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
---
 backport/backport-include/linux/interrupt.h | 23 ++++++++++++++++
 backport/compat/Makefile                    |  1 +
 backport/compat/backport-5.13.c             | 30 +++++++++++++++++++++
 3 files changed, 54 insertions(+)
 create mode 100644 backport/compat/backport-5.13.c
diff mbox series

Patch

diff --git a/backport/backport-include/linux/interrupt.h b/backport/backport-include/linux/interrupt.h
index f334a963..41d50d7c 100644
--- a/backport/backport-include/linux/interrupt.h
+++ b/backport/backport-include/linux/interrupt.h
@@ -50,4 +50,27 @@  tasklet_setup(struct tasklet_struct *t,
 
 #endif
 
+#if LINUX_VERSION_IS_LESS(5,13,0)
+
+#define tasklet_unlock_spin_wait LINUX_BACKPORT(tasklet_unlock_spin_wait)
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
+
+#else
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
+#endif
+
+/*
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+#define tasklet_disable_in_atomic LINUX_BACKPORT(tasklet_disable_in_atomic)
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+{
+	tasklet_disable_nosync(t);
+	tasklet_unlock_spin_wait(t);
+	smp_mb();
+}
+#endif
+
 #endif /* _BP_LINUX_INTERRUPT_H */
diff --git a/backport/compat/Makefile b/backport/compat/Makefile
index e927a0c8..2761e5f5 100644
--- a/backport/compat/Makefile
+++ b/backport/compat/Makefile
@@ -19,6 +19,7 @@  compat-$(CPTCFG_KERNEL_5_3) += backport-5.3.o
 compat-$(CPTCFG_KERNEL_5_5) += backport-5.5.o
 compat-$(CPTCFG_KERNEL_5_10) += backport-5.10.o
 compat-$(CPTCFG_KERNEL_5_11) += backport-5.11.o
+compat-$(CPTCFG_KERNEL_5_13) += backport-5.13.o
 
 compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/verify.o
 compat-$(CPTCFG_BPAUTO_BUILD_SYSTEM_DATA_VERIFICATION) += verification/pkcs7.asn1.o
diff --git a/backport/compat/backport-5.13.c b/backport/compat/backport-5.13.c
new file mode 100644
index 00000000..c10b3321
--- /dev/null
+++ b/backport/compat/backport-5.13.c
@@ -0,0 +1,30 @@ 
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/interrupt.h>
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+/*
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+{
+	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+			/*
+			 * Prevent a live lock when current preempted soft
+			 * interrupt processing or prevents ksoftirqd from
+			 * running. If the tasklet runs on a different CPU
+			 * then this has no effect other than doing the BH
+			 * disable/enable dance for nothing.
+			 */
+			local_bh_disable();
+			local_bh_enable();
+		} else {
+			cpu_relax();
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(tasklet_unlock_spin_wait);
+#endif