@@ -3257,22 +3257,11 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
return ret;
}
-void folio_undo_large_rmappable(struct folio *folio)
+void __folio_undo_large_rmappable(struct folio *folio)
{
struct deferred_split *ds_queue;
unsigned long flags;
- if (folio_order(folio) <= 1)
- return;
-
- /*
- * At this point, there is no one trying to add the folio to
- * deferred_list. If folio is not in deferred_list, it's safe
- * to check without acquiring the split_queue_lock.
- */
- if (data_race(list_empty(&folio->_deferred_list)))
- return;
-
ds_queue = get_deferred_split_queue(folio);
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (!list_empty(&folio->_deferred_list)) {
@@ -605,7 +605,22 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
#endif
}
-void folio_undo_large_rmappable(struct folio *folio);
+void __folio_undo_large_rmappable(struct folio *folio);
+static inline void folio_undo_large_rmappable(struct folio *folio)
+{
+ if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
+ return;
+
+ /*
+ * At this point, there is no one trying to add the folio to
+ * deferred_list. If folio is not in deferred_list, it's safe
+ * to check without acquiring the split_queue_lock.
+ */
+ if (data_race(list_empty(&folio->_deferred_list)))
+ return;
+
+ __folio_undo_large_rmappable(folio);
+}
static inline struct folio *page_rmappable_folio(struct page *page)
{
@@ -7875,8 +7875,7 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new)
* In addition, the old folio is about to be freed after migration, so
* removing from the split queue a bit earlier seems reasonable.
*/
- if (folio_test_large(old) && folio_test_large_rmappable(old))
- folio_undo_large_rmappable(old);
+ folio_undo_large_rmappable(old);
old->memcg_data = 0;
}
@@ -2645,8 +2645,7 @@ void free_unref_folios(struct folio_batch *folios)
unsigned long pfn = folio_pfn(folio);
unsigned int order = folio_order(folio);
- if (order > 0 && folio_test_large_rmappable(folio))
- folio_undo_large_rmappable(folio);
+ folio_undo_large_rmappable(folio);
if (!free_pages_prepare(&folio->page, order))
continue;
/*
@@ -123,8 +123,7 @@ void __folio_put(struct folio *folio)
}
page_cache_release(folio);
- if (folio_test_large(folio) && folio_test_large_rmappable(folio))
- folio_undo_large_rmappable(folio);
+ folio_undo_large_rmappable(folio);
mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, folio_order(folio));
}
@@ -1002,10 +1001,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
free_huge_folio(folio);
continue;
}
- if (folio_test_large(folio) &&
- folio_test_large_rmappable(folio))
- folio_undo_large_rmappable(folio);
-
+ folio_undo_large_rmappable(folio);
__page_cache_release(folio, &lruvec, &flags);
if (j != i)
@@ -1454,9 +1454,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
*/
nr_reclaimed += nr_pages;
- if (folio_test_large(folio) &&
- folio_test_large_rmappable(folio))
- folio_undo_large_rmappable(folio);
+ folio_undo_large_rmappable(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
mem_cgroup_uncharge_folios(&free_folios);
try_to_unmap_flush();
@@ -1863,9 +1861,7 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
if (unlikely(folio_put_testzero(folio))) {
__folio_clear_lru_flags(folio);
- if (folio_test_large(folio) &&
- folio_test_large_rmappable(folio))
- folio_undo_large_rmappable(folio);
+ folio_undo_large_rmappable(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
spin_unlock_irq(&lruvec->lru_lock);
mem_cgroup_uncharge_folios(&free_folios);
All folio_undo_large_rmappable() callers will check folio_test_large() which already checked by folio_order(), so only add the check folio_test_large_rmappable() into the function to avoid repeated calls. In addtion, move all the checks into headfile to save a function call for non-large-rmappable or empty deferred_list folio. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- mm/huge_memory.c | 13 +------------ mm/internal.h | 17 ++++++++++++++++- mm/memcontrol.c | 3 +-- mm/page_alloc.c | 3 +-- mm/swap.c | 8 ++------ mm/vmscan.c | 8 ++------ 6 files changed, 23 insertions(+), 29 deletions(-)