diff mbox series

[3/4] mm/swap: Access struct pagevec remotely

Message ID 20190424111208.24459-4-bigeasy@linutronix.de (mailing list archive)
State New, archived
Headers show
Series mm/swap: Add locking for pagevec | expand

Commit Message

Sebastian Andrzej Siewior April 24, 2019, 11:12 a.m. UTC
From: Thomas Gleixner <tglx@linutronix.de>

When the newly introduced static key would be enabled, struct pagevec is
locked during access. So it is possible to access it from a remote CPU. The
advantage is that the work can be done from the "requesting" CPU without
firing a worker on a remote CPU and waiting for it to complete the work.

No functional change because static key is not enabled.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 mm/swap.c | 75 +++++++++++++++++++++++++++++++++----------------------
 1 file changed, 45 insertions(+), 30 deletions(-)
diff mbox series

Patch

diff --git a/mm/swap.c b/mm/swap.c
index 136c80480dbde..ea623255cd305 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -774,7 +774,8 @@  static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
-	lru_add_drain();
+	if (static_branch_unlikely(&use_pvec_lock))
+		lru_add_drain();
 }
 
 /*
@@ -786,38 +787,52 @@  static void lru_add_drain_per_cpu(struct work_struct *dummy)
  */
 void lru_add_drain_all(void)
 {
-	static DEFINE_MUTEX(lock);
-	static struct cpumask has_work;
-	int cpu;
+	if (static_branch_likely(&use_pvec_lock)) {
+		int cpu;
 
-	/*
-	 * Make sure nobody triggers this path before mm_percpu_wq is fully
-	 * initialized.
-	 */
-	if (WARN_ON(!mm_percpu_wq))
-		return;
-
-	mutex_lock(&lock);
-	cpumask_clear(&has_work);
-
-	for_each_online_cpu(cpu) {
-		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
-
-		if (pagevec_count(&per_cpu(lru_add_pvec.pvec, cpu)) ||
-		    pagevec_count(&per_cpu(lru_rotate_pvecs.pvec, cpu)) ||
-		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs.pvec, cpu)) ||
-		    pagevec_count(&per_cpu(lru_lazyfree_pvecs.pvec, cpu)) ||
-		    need_activate_page_drain(cpu)) {
-			INIT_WORK(work, lru_add_drain_per_cpu);
-			queue_work_on(cpu, mm_percpu_wq, work);
-			cpumask_set_cpu(cpu, &has_work);
+		for_each_online_cpu(cpu) {
+			if (pagevec_count(&per_cpu(lru_add_pvec.pvec, cpu)) ||
+			    pagevec_count(&per_cpu(lru_rotate_pvecs.pvec, cpu)) ||
+			    pagevec_count(&per_cpu(lru_deactivate_file_pvecs.pvec, cpu)) ||
+			    pagevec_count(&per_cpu(lru_lazyfree_pvecs.pvec, cpu)) ||
+			    need_activate_page_drain(cpu)) {
+				lru_add_drain_cpu(cpu);
+			}
 		}
+	} else {
+		static DEFINE_MUTEX(lock);
+		static struct cpumask has_work;
+		int cpu;
+
+		/*
+		 * Make sure nobody triggers this path before mm_percpu_wq
+		 * is fully initialized.
+		 */
+		if (WARN_ON(!mm_percpu_wq))
+			return;
+
+		mutex_lock(&lock);
+		cpumask_clear(&has_work);
+
+		for_each_online_cpu(cpu) {
+			struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+			if (pagevec_count(&per_cpu(lru_add_pvec.pvec, cpu)) ||
+			    pagevec_count(&per_cpu(lru_rotate_pvecs.pvec, cpu)) ||
+			    pagevec_count(&per_cpu(lru_deactivate_file_pvecs.pvec, cpu)) ||
+			    pagevec_count(&per_cpu(lru_lazyfree_pvecs.pvec, cpu)) ||
+			    need_activate_page_drain(cpu)) {
+				INIT_WORK(work, lru_add_drain_per_cpu);
+				queue_work_on(cpu, mm_percpu_wq, work);
+				cpumask_set_cpu(cpu, &has_work);
+			}
+		}
+
+		for_each_cpu(cpu, &has_work)
+			flush_work(&per_cpu(lru_add_drain_work, cpu));
+
+		mutex_unlock(&lock);
 	}
-
-	for_each_cpu(cpu, &has_work)
-		flush_work(&per_cpu(lru_add_drain_work, cpu));
-
-	mutex_unlock(&lock);
 }
 #else
 void lru_add_drain_all(void)