diff mbox series

[v1,05/14] mm/swap.c: export activate_page()

Message ID 20210313075747.3781593-6-yuzhao@google.com (mailing list archive)
State New, archived
Headers show
Series Multigenerational LRU | expand

Commit Message

Yu Zhao March 13, 2021, 7:57 a.m. UTC
Export activate_page(), which is a merger between the existing
activate_page() and __lru_cache_activate_page(),  so it can be used to
activate pages that are already on lru or queued in lru_pvecs.lru_add.

Signed-off-by: Yu Zhao <yuzhao@google.com>
---
 include/linux/swap.h |  1 +
 mm/swap.c            | 28 +++++++++++++++-------------
 2 files changed, 16 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4cc6ec3bf0ab..de2bbbf181ba 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -344,6 +344,7 @@  extern void lru_add_drain_cpu(int cpu);
 extern void lru_add_drain_cpu_zone(struct zone *zone);
 extern void lru_add_drain_all(void);
 extern void rotate_reclaimable_page(struct page *page);
+extern void activate_page(struct page *page);
 extern void deactivate_file_page(struct page *page);
 extern void deactivate_page(struct page *page);
 extern void mark_page_lazyfree(struct page *page);
diff --git a/mm/swap.c b/mm/swap.c
index 31b844d4ed94..f20ed56ebbbf 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -334,7 +334,7 @@  static bool need_activate_page_drain(int cpu)
 	return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
 }
 
-static void activate_page(struct page *page)
+static void activate_page_on_lru(struct page *page)
 {
 	page = compound_head(page);
 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -354,7 +354,7 @@  static inline void activate_page_drain(int cpu)
 {
 }
 
-static void activate_page(struct page *page)
+static void activate_page_on_lru(struct page *page)
 {
 	struct lruvec *lruvec;
 
@@ -368,11 +368,22 @@  static void activate_page(struct page *page)
 }
 #endif
 
-static void __lru_cache_activate_page(struct page *page)
+/*
+ * If the page is on the LRU, queue it for activation via
+ * lru_pvecs.activate_page. Otherwise, assume the page is on a
+ * pagevec, mark it active and it'll be moved to the active
+ * LRU on the next drain.
+ */
+void activate_page(struct page *page)
 {
 	struct pagevec *pvec;
 	int i;
 
+	if (PageLRU(page)) {
+		activate_page_on_lru(page);
+		return;
+	}
+
 	local_lock(&lru_pvecs.lock);
 	pvec = this_cpu_ptr(&lru_pvecs.lru_add);
 
@@ -421,16 +432,7 @@  void mark_page_accessed(struct page *page)
 		 * evictable page accessed has no effect.
 		 */
 	} else if (!PageActive(page)) {
-		/*
-		 * If the page is on the LRU, queue it for activation via
-		 * lru_pvecs.activate_page. Otherwise, assume the page is on a
-		 * pagevec, mark it active and it'll be moved to the active
-		 * LRU on the next drain.
-		 */
-		if (PageLRU(page))
-			activate_page(page);
-		else
-			__lru_cache_activate_page(page);
+		activate_page(page);
 		ClearPageReferenced(page);
 		workingset_activation(page);
 	}