diff mbox series

[RFC,56/86] xarray: use cond_resched_xas*()

Message ID 20231107215742.363031-57-ankur.a.arora@oracle.com (mailing list archive)
State New
Headers show
Series Make the kernel preemptible | expand

Commit Message

Ankur Arora Nov. 7, 2023, 9:57 p.m. UTC
Replace the open coded xarray pattern, flush, release resource,
allowing rescheduling  to happen, reacquire by the appropriate
helper.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 fs/dax.c            | 15 +++------------
 mm/filemap.c        |  5 +----
 mm/khugepaged.c     |  5 +----
 mm/memfd.c          | 10 ++--------
 mm/page-writeback.c |  5 +----
 mm/shmem.c          | 10 ++--------
 6 files changed, 10 insertions(+), 40 deletions(-)
diff mbox series

Patch

diff --git a/fs/dax.c b/fs/dax.c
index 8fafecbe42b1..93cf6e8d8990 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -726,10 +726,7 @@  struct page *dax_layout_busy_page_range(struct address_space *mapping,
 		if (++scanned % XA_CHECK_SCHED)
 			continue;
 
-		xas_pause(&xas);
-		xas_unlock_irq(&xas);
-		cond_resched();
-		xas_lock_irq(&xas);
+		cond_resched_xas_lock_irq(&xas);
 	}
 	xas_unlock_irq(&xas);
 	return page;
@@ -784,10 +781,7 @@  static int __dax_clear_dirty_range(struct address_space *mapping,
 		if (++scanned % XA_CHECK_SCHED)
 			continue;
 
-		xas_pause(&xas);
-		xas_unlock_irq(&xas);
-		cond_resched();
-		xas_lock_irq(&xas);
+		cond_resched_xas_lock_irq(&xas);
 	}
 	xas_unlock_irq(&xas);
 
@@ -1052,10 +1046,7 @@  int dax_writeback_mapping_range(struct address_space *mapping,
 		if (++scanned % XA_CHECK_SCHED)
 			continue;
 
-		xas_pause(&xas);
-		xas_unlock_irq(&xas);
-		cond_resched();
-		xas_lock_irq(&xas);
+		cond_resched_xas_lock_irq(&xas);
 	}
 	xas_unlock_irq(&xas);
 	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
diff --git a/mm/filemap.c b/mm/filemap.c
index f0a15ce1bd1b..dc4dcc5eaf5e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -4210,10 +4210,7 @@  static void filemap_cachestat(struct address_space *mapping,
 			cs->nr_writeback += nr_pages;
 
 resched:
-		if (need_resched()) {
-			xas_pause(&xas);
-			cond_resched_rcu();
-		}
+		cond_resched_xas_rcu(&xas);
 	}
 	rcu_read_unlock();
 }
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 88433cc25d8a..4025225ef434 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2290,10 +2290,7 @@  static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
 
 		present++;
 
-		if (need_resched()) {
-			xas_pause(&xas);
-			cond_resched_rcu();
-		}
+		cond_resched_xas_rcu(&xas);
 	}
 	rcu_read_unlock();
 
diff --git a/mm/memfd.c b/mm/memfd.c
index 2dba2cb6f0d0..5c92f7317dbe 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -55,10 +55,7 @@  static void memfd_tag_pins(struct xa_state *xas)
 			continue;
 		latency = 0;
 
-		xas_pause(xas);
-		xas_unlock_irq(xas);
-		cond_resched();
-		xas_lock_irq(xas);
+		cond_resched_xas_lock_irq(xas);
 	}
 	xas_unlock_irq(xas);
 }
@@ -123,10 +120,7 @@  static int memfd_wait_for_pins(struct address_space *mapping)
 				continue;
 			latency = 0;
 
-			xas_pause(&xas);
-			xas_unlock_irq(&xas);
-			cond_resched();
-			xas_lock_irq(&xas);
+			cond_resched_xas_lock_irq(&xas);
 		}
 		xas_unlock_irq(&xas);
 	}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b8d3d7040a50..61a190b9d83c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2351,10 +2351,7 @@  void tag_pages_for_writeback(struct address_space *mapping,
 		if (++tagged % XA_CHECK_SCHED)
 			continue;
 
-		xas_pause(&xas);
-		xas_unlock_irq(&xas);
-		cond_resched();
-		xas_lock_irq(&xas);
+		cond_resched_xas_lock_irq(&xas);
 	}
 	xas_unlock_irq(&xas);
 }
diff --git a/mm/shmem.c b/mm/shmem.c
index 69595d341882..112172031b2c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -882,10 +882,7 @@  unsigned long shmem_partial_swap_usage(struct address_space *mapping,
 			swapped++;
 		if (xas.xa_index == max)
 			break;
-		if (need_resched()) {
-			xas_pause(&xas);
-			cond_resched_rcu();
-		}
+		cond_resched_xas_rcu(&xas);
 	}
 
 	rcu_read_unlock();
@@ -1299,10 +1296,7 @@  static int shmem_find_swap_entries(struct address_space *mapping,
 		if (!folio_batch_add(fbatch, folio))
 			break;
 
-		if (need_resched()) {
-			xas_pause(&xas);
-			cond_resched_rcu();
-		}
+		cond_resched_xas_rcu(&xas);
 	}
 	rcu_read_unlock();