diff mbox series

[10/22] mm/swap: Pull the CPU conditional out of __lru_add_drain_all()

Message ID 20220617175020.717127-11-willy@infradead.org (mailing list archive)
State New
Headers show
Series Convert the swap code to be more folio-based | expand

Commit Message

Matthew Wilcox June 17, 2022, 5:50 p.m. UTC
The function is too long, so pull this complicated conditional out into
cpu_needs_drain().  This ends up shrinking the text by 14 bytes,
by allowing GCC to cache the result of calling per_cpu() instead of
relocating each lookup individually.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/swap.c | 24 ++++++++++++++++--------
 1 file changed, 16 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/mm/swap.c b/mm/swap.c
index e65a195c184b..7966aa6bdd3f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -765,6 +765,21 @@  static void lru_add_drain_per_cpu(struct work_struct *dummy)
 	lru_add_and_bh_lrus_drain();
 }
 
+static bool cpu_needs_drain(unsigned int cpu)
+{
+	struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
+
+	/* Check these in order of likelihood that they're not zero */
+	return folio_batch_count(&fbatches->lru_add) ||
+		data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
+		folio_batch_count(&fbatches->lru_deactivate_file) ||
+		folio_batch_count(&fbatches->lru_deactivate) ||
+		folio_batch_count(&fbatches->lru_lazyfree) ||
+		folio_batch_count(&fbatches->activate) ||
+		need_mlock_page_drain(cpu) ||
+		has_bh_in_lru(cpu, NULL);
+}
+
 /*
  * Doesn't need any cpu hotplug locking because we do rely on per-cpu
  * kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -849,14 +864,7 @@  static inline void __lru_add_drain_all(bool force_all_cpus)
 	for_each_online_cpu(cpu) {
 		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 
-		if (folio_batch_count(&per_cpu(cpu_fbatches.lru_add, cpu)) ||
-		    data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
-		    folio_batch_count(&per_cpu(cpu_fbatches.lru_deactivate_file, cpu)) ||
-		    folio_batch_count(&per_cpu(cpu_fbatches.lru_deactivate, cpu)) ||
-		    folio_batch_count(&per_cpu(cpu_fbatches.lru_lazyfree, cpu)) ||
-		    folio_batch_count(&per_cpu(cpu_fbatches.activate, cpu)) ||
-		    need_mlock_page_drain(cpu) ||
-		    has_bh_in_lru(cpu, NULL)) {
+		if (cpu_needs_drain(cpu)) {
 			INIT_WORK(work, lru_add_drain_per_cpu);
 			queue_work_on(cpu, mm_percpu_wq, work);
 			__cpumask_set_cpu(cpu, &has_work);