diff mbox series

[32/75] mm: Turn putback_lru_page() into folio_putback_lru()

Message ID 20220204195852.1751729-33-willy@infradead.org (mailing list archive)
State New
Headers show
Series MM folio patches for 5.18 | expand

Commit Message

Matthew Wilcox (Oracle) Feb. 4, 2022, 7:58 p.m. UTC
Add a putback_lru_page() wrapper.  Removes a couple of compound_head()
calls.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/folio-compat.c |  5 +++++
 mm/internal.h     |  3 ++-
 mm/vmscan.c       | 16 ++++++++--------
 3 files changed, 15 insertions(+), 9 deletions(-)

Comments

Christoph Hellwig Feb. 7, 2022, 7:50 a.m. UTC | #1
Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index a4a7725f4486..46fa179e32fb 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -159,3 +159,8 @@  int isolate_lru_page(struct page *page)
 		return -EBUSY;
 	return folio_isolate_lru((struct folio *)page);
 }
+
+void putback_lru_page(struct page *page)
+{
+	folio_putback_lru(page_folio(page));
+}
diff --git a/mm/internal.h b/mm/internal.h
index 8b0249909b06..b7a2195c12b1 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -159,7 +159,8 @@  extern unsigned long highest_memmap_pfn;
  */
 int isolate_lru_page(struct page *page);
 int folio_isolate_lru(struct folio *folio);
-extern void putback_lru_page(struct page *page);
+void putback_lru_page(struct page *page);
+void folio_putback_lru(struct folio *folio);
 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
 
 /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 08dcb1897f58..9f11960b1db8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1355,18 +1355,18 @@  int remove_mapping(struct address_space *mapping, struct page *page)
 }
 
 /**
- * putback_lru_page - put previously isolated page onto appropriate LRU list
- * @page: page to be put back to appropriate lru list
+ * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
+ * @folio: Folio to be returned to an LRU list.
  *
- * Add previously isolated @page to appropriate LRU list.
- * Page may still be unevictable for other reasons.
+ * Add previously isolated @folio to appropriate LRU list.
+ * The folio may still be unevictable for other reasons.
  *
- * lru_lock must not be held, interrupts must be enabled.
+ * Context: lru_lock must not be held, interrupts must be enabled.
  */
-void putback_lru_page(struct page *page)
+void folio_putback_lru(struct folio *folio)
 {
-	lru_cache_add(page);
-	put_page(page);		/* drop ref from isolate */
+	folio_add_lru(folio);
+	folio_put(folio);		/* drop ref from isolate */
 }
 
 enum page_references {