diff mbox series

mm: refactor folio_undo_large_rmappable()

Message ID 20240515064506.72253-1-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: refactor folio_undo_large_rmappable() | expand

Commit Message

Kefeng Wang May 15, 2024, 6:45 a.m. UTC
All folio_undo_large_rmappable() callers will check folio_test_large()
which already checked by folio_order(), so only add the check
folio_test_large_rmappable() into the function to avoid repeated calls.

In addtion, move all the checks into headfile to save a function call
for non-large-rmappable or empty deferred_list folio.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/huge_memory.c | 13 +------------
 mm/internal.h    | 17 ++++++++++++++++-
 mm/memcontrol.c  |  3 +--
 mm/page_alloc.c  |  3 +--
 mm/swap.c        |  8 ++------
 mm/vmscan.c      |  8 ++------
 6 files changed, 23 insertions(+), 29 deletions(-)

Comments

Lance Yang May 15, 2024, 6:53 a.m. UTC | #1
Hi Kefeng,

> In addtion, move all the checks into headfile to save a function call
> for non-large-rmappable or empty deferred_list folio.

s/addtion/addition

And, IMO, 'headfile' looks a bit weird :)
s/headfile/header file

Thanks,
Lance
Kefeng Wang May 16, 2024, 4:43 a.m. UTC | #2
On 2024/5/15 14:53, Lance Yang wrote:
> Hi Kefeng,
> 
>> In addtion, move all the checks into headfile to save a function call
>> for non-large-rmappable or empty deferred_list folio.
> 
> s/addtion/addition
> 
> And, IMO, 'headfile' looks a bit weird :)
> s/headfile/header file

Thanks, will fix them.
> 
> Thanks,
> Lance
Vishal Moola May 16, 2024, 3:54 p.m. UTC | #3
On Wed, May 15, 2024 at 02:45:06PM +0800, Kefeng Wang wrote:
> All folio_undo_large_rmappable() callers will check folio_test_large()
> which already checked by folio_order(), so only add the check

This commit message is a little misleading. The folio_order() check in
folio_undo_large_rmappable() is primarily for catching order-1 folios,
not to check folio_test_large(). You can read more about it in
commit: 8897277acfef7f70fdecc.

Aside from that, I like the patch. It should make this code easier to
maintain.
Kefeng Wang May 17, 2024, 5:18 a.m. UTC | #4
On 2024/5/16 23:54, Vishal Moola wrote:
> On Wed, May 15, 2024 at 02:45:06PM +0800, Kefeng Wang wrote:
>> All folio_undo_large_rmappable() callers will check folio_test_large()
>> which already checked by folio_order(), so only add the check
> 
> This commit message is a little misleading. The folio_order() check in
> folio_undo_large_rmappable() is primarily for catching order-1 folios,
> not to check folio_test_large(). You can read more about it in
> commit: 8897277acfef7f70fdecc.

folio_order <=1 cover the small folio and order=1 cases, maybe I could 
update message to try to make it clearer,

> 
> Aside from that, I like the patch. It should make this code easier to
> maintain.

Thanks.
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9efb6fefc391..2e5c5690449a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3257,22 +3257,11 @@  int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
 	return ret;
 }
 
-void folio_undo_large_rmappable(struct folio *folio)
+void __folio_undo_large_rmappable(struct folio *folio)
 {
 	struct deferred_split *ds_queue;
 	unsigned long flags;
 
-	if (folio_order(folio) <= 1)
-		return;
-
-	/*
-	 * At this point, there is no one trying to add the folio to
-	 * deferred_list. If folio is not in deferred_list, it's safe
-	 * to check without acquiring the split_queue_lock.
-	 */
-	if (data_race(list_empty(&folio->_deferred_list)))
-		return;
-
 	ds_queue = get_deferred_split_queue(folio);
 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
 	if (!list_empty(&folio->_deferred_list)) {
diff --git a/mm/internal.h b/mm/internal.h
index b2c75b12014e..447171d171ce 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -605,7 +605,22 @@  static inline void folio_set_order(struct folio *folio, unsigned int order)
 #endif
 }
 
-void folio_undo_large_rmappable(struct folio *folio);
+void __folio_undo_large_rmappable(struct folio *folio);
+static inline void folio_undo_large_rmappable(struct folio *folio)
+{
+	if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
+		return;
+
+	/*
+	 * At this point, there is no one trying to add the folio to
+	 * deferred_list. If folio is not in deferred_list, it's safe
+	 * to check without acquiring the split_queue_lock.
+	 */
+	if (data_race(list_empty(&folio->_deferred_list)))
+		return;
+
+	__folio_undo_large_rmappable(folio);
+}
 
 static inline struct folio *page_rmappable_folio(struct page *page)
 {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index feb6651ee1e8..cdf6b595e40e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -7875,8 +7875,7 @@  void mem_cgroup_migrate(struct folio *old, struct folio *new)
 	 * In addition, the old folio is about to be freed after migration, so
 	 * removing from the split queue a bit earlier seems reasonable.
 	 */
-	if (folio_test_large(old) && folio_test_large_rmappable(old))
-		folio_undo_large_rmappable(old);
+	folio_undo_large_rmappable(old);
 	old->memcg_data = 0;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cd584aace6bf..b1e3eb5787de 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2645,8 +2645,7 @@  void free_unref_folios(struct folio_batch *folios)
 		unsigned long pfn = folio_pfn(folio);
 		unsigned int order = folio_order(folio);
 
-		if (order > 0 && folio_test_large_rmappable(folio))
-			folio_undo_large_rmappable(folio);
+		folio_undo_large_rmappable(folio);
 		if (!free_pages_prepare(&folio->page, order))
 			continue;
 		/*
diff --git a/mm/swap.c b/mm/swap.c
index 67786cb77130..dc205bdfbbd4 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -123,8 +123,7 @@  void __folio_put(struct folio *folio)
 	}
 
 	page_cache_release(folio);
-	if (folio_test_large(folio) && folio_test_large_rmappable(folio))
-		folio_undo_large_rmappable(folio);
+	folio_undo_large_rmappable(folio);
 	mem_cgroup_uncharge(folio);
 	free_unref_page(&folio->page, folio_order(folio));
 }
@@ -1002,10 +1001,7 @@  void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
 			free_huge_folio(folio);
 			continue;
 		}
-		if (folio_test_large(folio) &&
-		    folio_test_large_rmappable(folio))
-			folio_undo_large_rmappable(folio);
-
+		folio_undo_large_rmappable(folio);
 		__page_cache_release(folio, &lruvec, &flags);
 
 		if (j != i)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6981a71c8ef0..615d2422d0e4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1454,9 +1454,7 @@  static unsigned int shrink_folio_list(struct list_head *folio_list,
 		 */
 		nr_reclaimed += nr_pages;
 
-		if (folio_test_large(folio) &&
-		    folio_test_large_rmappable(folio))
-			folio_undo_large_rmappable(folio);
+		folio_undo_large_rmappable(folio);
 		if (folio_batch_add(&free_folios, folio) == 0) {
 			mem_cgroup_uncharge_folios(&free_folios);
 			try_to_unmap_flush();
@@ -1863,9 +1861,7 @@  static unsigned int move_folios_to_lru(struct lruvec *lruvec,
 		if (unlikely(folio_put_testzero(folio))) {
 			__folio_clear_lru_flags(folio);
 
-			if (folio_test_large(folio) &&
-			    folio_test_large_rmappable(folio))
-				folio_undo_large_rmappable(folio);
+			folio_undo_large_rmappable(folio);
 			if (folio_batch_add(&free_folios, folio) == 0) {
 				spin_unlock_irq(&lruvec->lru_lock);
 				mem_cgroup_uncharge_folios(&free_folios);