diff mbox series

[4/5] mm/vmscan: Convert shrink_active_list() to use a folio

Message ID 20220617154248.700416-5-willy@infradead.org (mailing list archive)
State New
Headers show
Series Convert much of vmscan to folios | expand

Commit Message

Matthew Wilcox June 17, 2022, 3:42 p.m. UTC
Remove a few hidden calls to compound_head, saving 411 bytes of text.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/vmscan.c | 61 +++++++++++++++++++++++++----------------------------
 1 file changed, 29 insertions(+), 32 deletions(-)

Comments

Christoph Hellwig June 19, 2022, 6:40 a.m. UTC | #1
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
Matthew Wilcox July 11, 2022, 8:53 p.m. UTC | #2
On Fri, Jun 17, 2022 at 04:42:47PM +0100, Matthew Wilcox (Oracle) wrote:
> @@ -2478,23 +2477,21 @@ static void shrink_active_list(unsigned long nr_to_scan,
>  
>  	while (!list_empty(&l_hold)) {
>  		struct folio *folio;
> -		struct page *page;
>  
>  		cond_resched();
>  		folio = lru_to_folio(&l_hold);
>  		list_del(&folio->lru);
> -		page = &folio->page;
>  
> -		if (unlikely(!page_evictable(page))) {
> -			putback_lru_page(page);
> +		if (unlikely(!folio_evictable(folio))) {
> +			folio_putback_lru(folio);
>  			continue;
>  		}
>  
>  		if (unlikely(buffer_heads_over_limit)) {
> -			if (page_has_private(page) && trylock_page(page)) {
> -				if (page_has_private(page))
> -					try_to_release_page(page, 0);
> -				unlock_page(page);
> +			if (folio_get_private(folio) && folio_trylock(folio)) {
> +				if (folio_get_private(folio))
> +					filemap_release_folio(folio, 0);
> +				folio_unlock(folio);
>  			}
>  		}
>  

Hi Andrew.  Hugh points out that the above is not an equivalent
transformation for pages which are in the swapcache.  Can you apply
this fix, or would you like a full patch?

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0070c7fb600a..7e34f4c8d956 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2544,8 +2544,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
 		}
 
 		if (unlikely(buffer_heads_over_limit)) {
-			if (folio_get_private(folio) && folio_trylock(folio)) {
-				if (folio_get_private(folio))
+			if (folio_test_private(folio) && folio_trylock(folio)) {
+				if (folio_test_private(folio))
 					filemap_release_folio(folio, 0);
 				folio_unlock(folio);
 			}
Andrew Morton July 13, 2022, 6:13 p.m. UTC | #3
On Mon, 11 Jul 2022 21:53:04 +0100 Matthew Wilcox <willy@infradead.org> wrote:

> On Fri, Jun 17, 2022 at 04:42:47PM +0100, Matthew Wilcox (Oracle) wrote:
> > @@ -2478,23 +2477,21 @@ static void shrink_active_list(unsigned long nr_to_scan,
> >  
> >  	while (!list_empty(&l_hold)) {
> >  		struct folio *folio;
> > -		struct page *page;
> >  
> >  		cond_resched();
> >  		folio = lru_to_folio(&l_hold);
> >  		list_del(&folio->lru);
> > -		page = &folio->page;
> >  
> > -		if (unlikely(!page_evictable(page))) {
> > -			putback_lru_page(page);
> > +		if (unlikely(!folio_evictable(folio))) {
> > +			folio_putback_lru(folio);
> >  			continue;
> >  		}
> >  
> >  		if (unlikely(buffer_heads_over_limit)) {
> > -			if (page_has_private(page) && trylock_page(page)) {
> > -				if (page_has_private(page))
> > -					try_to_release_page(page, 0);
> > -				unlock_page(page);
> > +			if (folio_get_private(folio) && folio_trylock(folio)) {
> > +				if (folio_get_private(folio))
> > +					filemap_release_folio(folio, 0);
> > +				folio_unlock(folio);
> >  			}
> >  		}
> >  
> 
> Hi Andrew.  Hugh points out that the above is not an equivalent
> transformation for pages which are in the swapcache.  Can you apply
> this fix, or would you like a full patch?
> 
> ...

The original is in mm-stable and rebasing that is bad (sigh) so I'll
add this as a standalone patch.  So yes, please send along the real
thing.
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index f8ec446041c3..0a0e013a3457 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -26,8 +26,7 @@ 
 #include <linux/file.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
-#include <linux/buffer_head.h>	/* for try_to_release_page(),
-					buffer_heads_over_limit */
+#include <linux/buffer_head.h>	/* for buffer_heads_over_limit */
 #include <linux/mm_inline.h>
 #include <linux/backing-dev.h>
 #include <linux/rmap.h>
@@ -2429,21 +2428,21 @@  shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 }
 
 /*
- * shrink_active_list() moves pages from the active LRU to the inactive LRU.
+ * shrink_active_list() moves folios from the active LRU to the inactive LRU.
  *
- * We move them the other way if the page is referenced by one or more
+ * We move them the other way if the folio is referenced by one or more
  * processes.
  *
- * If the pages are mostly unmapped, the processing is fast and it is
+ * If the folios are mostly unmapped, the processing is fast and it is
  * appropriate to hold lru_lock across the whole operation.  But if
- * the pages are mapped, the processing is slow (folio_referenced()), so
- * we should drop lru_lock around each page.  It's impossible to balance
- * this, so instead we remove the pages from the LRU while processing them.
- * It is safe to rely on PG_active against the non-LRU pages in here because
- * nobody will play with that bit on a non-LRU page.
+ * the folios are mapped, the processing is slow (folio_referenced()), so
+ * we should drop lru_lock around each folio.  It's impossible to balance
+ * this, so instead we remove the folios from the LRU while processing them.
+ * It is safe to rely on the active flag against the non-LRU folios in here
+ * because nobody will play with that bit on a non-LRU folio.
  *
- * The downside is that we have to touch page->_refcount against each page.
- * But we had to alter page->flags anyway.
+ * The downside is that we have to touch folio->_refcount against each folio.
+ * But we had to alter folio->flags anyway.
  */
 static void shrink_active_list(unsigned long nr_to_scan,
 			       struct lruvec *lruvec,
@@ -2453,7 +2452,7 @@  static void shrink_active_list(unsigned long nr_to_scan,
 	unsigned long nr_taken;
 	unsigned long nr_scanned;
 	unsigned long vm_flags;
-	LIST_HEAD(l_hold);	/* The pages which were snipped off */
+	LIST_HEAD(l_hold);	/* The folios which were snipped off */
 	LIST_HEAD(l_active);
 	LIST_HEAD(l_inactive);
 	unsigned nr_deactivate, nr_activate;
@@ -2478,23 +2477,21 @@  static void shrink_active_list(unsigned long nr_to_scan,
 
 	while (!list_empty(&l_hold)) {
 		struct folio *folio;
-		struct page *page;
 
 		cond_resched();
 		folio = lru_to_folio(&l_hold);
 		list_del(&folio->lru);
-		page = &folio->page;
 
-		if (unlikely(!page_evictable(page))) {
-			putback_lru_page(page);
+		if (unlikely(!folio_evictable(folio))) {
+			folio_putback_lru(folio);
 			continue;
 		}
 
 		if (unlikely(buffer_heads_over_limit)) {
-			if (page_has_private(page) && trylock_page(page)) {
-				if (page_has_private(page))
-					try_to_release_page(page, 0);
-				unlock_page(page);
+			if (folio_get_private(folio) && folio_trylock(folio)) {
+				if (folio_get_private(folio))
+					filemap_release_folio(folio, 0);
+				folio_unlock(folio);
 			}
 		}
 
@@ -2502,34 +2499,34 @@  static void shrink_active_list(unsigned long nr_to_scan,
 		if (folio_referenced(folio, 0, sc->target_mem_cgroup,
 				     &vm_flags) != 0) {
 			/*
-			 * Identify referenced, file-backed active pages and
+			 * Identify referenced, file-backed active folios and
 			 * give them one more trip around the active list. So
 			 * that executable code get better chances to stay in
-			 * memory under moderate memory pressure.  Anon pages
+			 * memory under moderate memory pressure.  Anon folios
 			 * are not likely to be evicted by use-once streaming
-			 * IO, plus JVM can create lots of anon VM_EXEC pages,
+			 * IO, plus JVM can create lots of anon VM_EXEC folios,
 			 * so we ignore them here.
 			 */
-			if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
-				nr_rotated += thp_nr_pages(page);
-				list_add(&page->lru, &l_active);
+			if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) {
+				nr_rotated += folio_nr_pages(folio);
+				list_add(&folio->lru, &l_active);
 				continue;
 			}
 		}
 
-		ClearPageActive(page);	/* we are de-activating */
-		SetPageWorkingset(page);
-		list_add(&page->lru, &l_inactive);
+		folio_clear_active(folio);	/* we are de-activating */
+		folio_set_workingset(folio);
+		list_add(&folio->lru, &l_inactive);
 	}
 
 	/*
-	 * Move pages back to the lru list.
+	 * Move folios back to the lru list.
 	 */
 	spin_lock_irq(&lruvec->lru_lock);
 
 	nr_activate = move_pages_to_lru(lruvec, &l_active);
 	nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
-	/* Keep all free pages in l_active list */
+	/* Keep all free folios in l_active list */
 	list_splice(&l_inactive, &l_active);
 
 	__count_vm_events(PGDEACTIVATE, nr_deactivate);