diff mbox series

[v7,1/5] kthread: simplify kthread_use_mm refcounting

Message ID 20230203071837.1136453-2-npiggin@gmail.com (mailing list archive)
State New
Headers show
Series shoot lazy tlbs (lazy tlb refcount scalability improvement) | expand

Commit Message

Nicholas Piggin Feb. 3, 2023, 7:18 a.m. UTC
Remove the special case avoiding refcounting when the mm to be used is
the same as the kernel thread's active (lazy tlb) mm. kthread_use_mm()
should not be such a performance critical path that this matters much.
This simplifies a later change to lazy tlb mm refcounting.

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 kernel/kthread.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/kernel/kthread.c b/kernel/kthread.c
index f97fd01a2932..7424a1839e9a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -1410,14 +1410,13 @@  void kthread_use_mm(struct mm_struct *mm)
 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
 	WARN_ON_ONCE(tsk->mm);
 
+	mmgrab(mm);
+
 	task_lock(tsk);
 	/* Hold off tlb flush IPIs while switching mm's */
 	local_irq_disable();
 	active_mm = tsk->active_mm;
-	if (active_mm != mm) {
-		mmgrab(mm);
-		tsk->active_mm = mm;
-	}
+	tsk->active_mm = mm;
 	tsk->mm = mm;
 	membarrier_update_current_mm(mm);
 	switch_mm_irqs_off(active_mm, mm, tsk);
@@ -1434,12 +1433,9 @@  void kthread_use_mm(struct mm_struct *mm)
 	 * memory barrier after storing to tsk->mm, before accessing
 	 * user-space memory. A full memory barrier for membarrier
 	 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
-	 * mmdrop(), or explicitly with smp_mb().
+	 * mmdrop().
 	 */
-	if (active_mm != mm)
-		mmdrop(active_mm);
-	else
-		smp_mb();
+	mmdrop(active_mm);
 }
 EXPORT_SYMBOL_GPL(kthread_use_mm);